From 7a6f1a445066c7c6720cd0583cf1cf56f6646955 Mon Sep 17 00:00:00 2001 From: "Felipe R. Monteiro" Date: Thu, 8 Aug 2024 21:07:32 -0400 Subject: [PATCH 1/7] Bump Kani version to 0.54.0 (#3430) ## 0.54.0 ### Major Changes * We added support for slices in the `#[kani::modifies(...)]` clauses when using function contracts. * We introduce an `#[safety_constraint(...)]` attribute helper for the `Arbitrary` and `Invariant` macros. * We enabled support for concrete playback for harness that contains stubs or function contracts. * We added support for log2*, log10*, powif*, fma*, and sqrt* intrisincs. ### Breaking Changes * The `-Z ptr-to-ref-cast-checks` option has been removed, and pointer validity checks when casting raw pointers to references are now run by default. ## What's Changed * Make Kani reject mutable pointer casts if padding is incompatible and memory initialization is checked by @artemagvanian in https://github.com/model-checking/kani/pull/3332 * Fix visibility of some Kani intrinsics by @artemagvanian in https://github.com/model-checking/kani/pull/3323 * Function Contracts: Modify Slices by @pi314mm in https://github.com/model-checking/kani/pull/3295 * Support for disabling automatically generated pointer checks to avoid reinstrumentation by @artemagvanian in https://github.com/model-checking/kani/pull/3344 * Add support for global transformations by @artemagvanian in https://github.com/model-checking/kani/pull/3348 * Enable an `#[safety_constraint(...)]` attribute helper for the `Arbitrary` and `Invariant` macros by @adpaco-aws in https://github.com/model-checking/kani/pull/3283 * Fix contract handling of promoted constants and constant static by @celinval in https://github.com/model-checking/kani/pull/3305 * Bump CBMC Viewer to 3.9 by @tautschnig in https://github.com/model-checking/kani/pull/3373 * Update to CBMC version 6.1.1 by @tautschnig in https://github.com/model-checking/kani/pull/2995 * Define a struct-level `#[safety_constraint(...)]` attribute by @adpaco-aws in https://github.com/model-checking/kani/pull/3270 * Enable concrete playback for contract and stubs by @celinval in https://github.com/model-checking/kani/pull/3389 * Add code scanner tool by @celinval in https://github.com/model-checking/kani/pull/3120 * Enable contracts in associated functions by @celinval in https://github.com/model-checking/kani/pull/3363 * Enable log2*, log10* intrinsics by @tautschnig in https://github.com/model-checking/kani/pull/3001 * Enable powif* intrinsics by @tautschnig in https://github.com/model-checking/kani/pull/2999 * Enable fma* intrinsics by @tautschnig in https://github.com/model-checking/kani/pull/3002 * Enable sqrt* intrinsics by @tautschnig in https://github.com/model-checking/kani/pull/3000 * Remove assigns clause for ZST pointers by @carolynzech in https://github.com/model-checking/kani/pull/3417 * Instrumentation for delayed UB stemming from uninitialized memory by @artemagvanian in https://github.com/model-checking/kani/pull/3374 * Unify kani library and kani core logic by @jaisnan in https://github.com/model-checking/kani/pull/3333 * Stabilize pointer-to-reference cast validity checks by @artemagvanian in https://github.com/model-checking/kani/pull/3426 * Rust toolchain upgraded to `nightly-2024-08-07` by @jaisnan @qinheping @tautschnig @feliperodri ## New Contributors * @carolynzech made their first contribution in https://github.com/model-checking/kani/pull/3387 **Full Changelog**: https://github.com/model-checking/kani/compare/kani-0.53.0...kani-0.54.0 By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 and MIT licenses. Signed-off-by: Felipe R. Monteiro --- CHANGELOG.md | 40 ++++++++++++++++++++++++++++++++++ Cargo.lock | 20 ++++++++--------- Cargo.toml | 2 +- cprover_bindings/Cargo.toml | 2 +- kani-compiler/Cargo.toml | 2 +- kani-driver/Cargo.toml | 2 +- kani_metadata/Cargo.toml | 2 +- library/kani/Cargo.toml | 2 +- library/kani_core/Cargo.toml | 2 +- library/kani_macros/Cargo.toml | 2 +- library/std/Cargo.toml | 2 +- tools/build-kani/Cargo.toml | 2 +- 12 files changed, 60 insertions(+), 20 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 47ae290853a3..d36def1a45f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,46 @@ This file contains notable changes (e.g. breaking changes, major changes, etc.) This file was introduced starting Kani 0.23.0, so it only contains changes from version 0.23.0 onwards. +## [0.54.0] + +### Major Changes +* We added support for slices in the `#[kani::modifies(...)]` clauses when using function contracts. +* We introduce an `#[safety_constraint(...)]` attribute helper for the `Arbitrary` and `Invariant` macros. +* We enabled support for concrete playback for harness that contains stubs or function contracts. +* We added support for log2*, log10*, powif*, fma*, and sqrt* intrisincs. + +### Breaking Changes +* The `-Z ptr-to-ref-cast-checks` option has been removed, and pointer validity checks when casting raw pointers to references are now run by default. + +## What's Changed +* Make Kani reject mutable pointer casts if padding is incompatible and memory initialization is checked by @artemagvanian in https://github.com/model-checking/kani/pull/3332 +* Fix visibility of some Kani intrinsics by @artemagvanian in https://github.com/model-checking/kani/pull/3323 +* Function Contracts: Modify Slices by @pi314mm in https://github.com/model-checking/kani/pull/3295 +* Support for disabling automatically generated pointer checks to avoid reinstrumentation by @artemagvanian in https://github.com/model-checking/kani/pull/3344 +* Add support for global transformations by @artemagvanian in https://github.com/model-checking/kani/pull/3348 +* Enable an `#[safety_constraint(...)]` attribute helper for the `Arbitrary` and `Invariant` macros by @adpaco-aws in https://github.com/model-checking/kani/pull/3283 +* Fix contract handling of promoted constants and constant static by @celinval in https://github.com/model-checking/kani/pull/3305 +* Bump CBMC Viewer to 3.9 by @tautschnig in https://github.com/model-checking/kani/pull/3373 +* Update to CBMC version 6.1.1 by @tautschnig in https://github.com/model-checking/kani/pull/2995 +* Define a struct-level `#[safety_constraint(...)]` attribute by @adpaco-aws in https://github.com/model-checking/kani/pull/3270 +* Enable concrete playback for contract and stubs by @celinval in https://github.com/model-checking/kani/pull/3389 +* Add code scanner tool by @celinval in https://github.com/model-checking/kani/pull/3120 +* Enable contracts in associated functions by @celinval in https://github.com/model-checking/kani/pull/3363 +* Enable log2*, log10* intrinsics by @tautschnig in https://github.com/model-checking/kani/pull/3001 +* Enable powif* intrinsics by @tautschnig in https://github.com/model-checking/kani/pull/2999 +* Enable fma* intrinsics by @tautschnig in https://github.com/model-checking/kani/pull/3002 +* Enable sqrt* intrinsics by @tautschnig in https://github.com/model-checking/kani/pull/3000 +* Remove assigns clause for ZST pointers by @carolynzech in https://github.com/model-checking/kani/pull/3417 +* Instrumentation for delayed UB stemming from uninitialized memory by @artemagvanian in https://github.com/model-checking/kani/pull/3374 +* Unify kani library and kani core logic by @jaisnan in https://github.com/model-checking/kani/pull/3333 +* Stabilize pointer-to-reference cast validity checks by @artemagvanian in https://github.com/model-checking/kani/pull/3426 +* Rust toolchain upgraded to `nightly-2024-08-07` by @jaisnan @qinheping @tautschnig @feliperodri + +## New Contributors +* @carolynzech made their first contribution in https://github.com/model-checking/kani/pull/3387 + +**Full Changelog**: https://github.com/model-checking/kani/compare/kani-0.53.0...kani-0.54.0 + ## [0.53.0] ### Major Changes diff --git a/Cargo.lock b/Cargo.lock index 52250d2468c8..12c28c49c1bf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -92,7 +92,7 @@ checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "build-kani" -version = "0.53.0" +version = "0.54.0" dependencies = [ "anyhow", "cargo_metadata", @@ -234,7 +234,7 @@ dependencies = [ [[package]] name = "cprover_bindings" -version = "0.53.0" +version = "0.54.0" dependencies = [ "lazy_static", "linear-map", @@ -432,7 +432,7 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "kani" -version = "0.53.0" +version = "0.54.0" dependencies = [ "kani_core", "kani_macros", @@ -440,7 +440,7 @@ dependencies = [ [[package]] name = "kani-compiler" -version = "0.53.0" +version = "0.54.0" dependencies = [ "clap", "cprover_bindings", @@ -461,7 +461,7 @@ dependencies = [ [[package]] name = "kani-driver" -version = "0.53.0" +version = "0.54.0" dependencies = [ "anyhow", "cargo_metadata", @@ -489,7 +489,7 @@ dependencies = [ [[package]] name = "kani-verifier" -version = "0.53.0" +version = "0.54.0" dependencies = [ "anyhow", "home", @@ -498,14 +498,14 @@ dependencies = [ [[package]] name = "kani_core" -version = "0.53.0" +version = "0.54.0" dependencies = [ "kani_macros", ] [[package]] name = "kani_macros" -version = "0.53.0" +version = "0.54.0" dependencies = [ "proc-macro-error", "proc-macro2", @@ -515,7 +515,7 @@ dependencies = [ [[package]] name = "kani_metadata" -version = "0.53.0" +version = "0.54.0" dependencies = [ "clap", "cprover_bindings", @@ -1034,7 +1034,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "std" -version = "0.53.0" +version = "0.54.0" dependencies = [ "kani", ] diff --git a/Cargo.toml b/Cargo.toml index 68b5bcc20ff3..f2301983fcb4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "kani-verifier" -version = "0.53.0" +version = "0.54.0" edition = "2021" description = "A bit-precise model checker for Rust." readme = "README.md" diff --git a/cprover_bindings/Cargo.toml b/cprover_bindings/Cargo.toml index aced9e5b9b65..c53ffb207fcf 100644 --- a/cprover_bindings/Cargo.toml +++ b/cprover_bindings/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "cprover_bindings" -version = "0.53.0" +version = "0.54.0" edition = "2021" license = "MIT OR Apache-2.0" publish = false diff --git a/kani-compiler/Cargo.toml b/kani-compiler/Cargo.toml index 23389c156302..24ed00f54338 100644 --- a/kani-compiler/Cargo.toml +++ b/kani-compiler/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "kani-compiler" -version = "0.53.0" +version = "0.54.0" edition = "2021" license = "MIT OR Apache-2.0" publish = false diff --git a/kani-driver/Cargo.toml b/kani-driver/Cargo.toml index d58d686d7d43..c57ec8e8e2f2 100644 --- a/kani-driver/Cargo.toml +++ b/kani-driver/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "kani-driver" -version = "0.53.0" +version = "0.54.0" edition = "2021" description = "Build a project with Kani and run all proof harnesses" license = "MIT OR Apache-2.0" diff --git a/kani_metadata/Cargo.toml b/kani_metadata/Cargo.toml index 816752a58e03..de91900d6d9c 100644 --- a/kani_metadata/Cargo.toml +++ b/kani_metadata/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "kani_metadata" -version = "0.53.0" +version = "0.54.0" edition = "2021" license = "MIT OR Apache-2.0" publish = false diff --git a/library/kani/Cargo.toml b/library/kani/Cargo.toml index 1fba7875672a..7d7ced8ee0b7 100644 --- a/library/kani/Cargo.toml +++ b/library/kani/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "kani" -version = "0.53.0" +version = "0.54.0" edition = "2021" license = "MIT OR Apache-2.0" publish = false diff --git a/library/kani_core/Cargo.toml b/library/kani_core/Cargo.toml index 5388dcfb9427..8928992c3f16 100644 --- a/library/kani_core/Cargo.toml +++ b/library/kani_core/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "kani_core" -version = "0.53.0" +version = "0.54.0" edition = "2021" license = "MIT OR Apache-2.0" publish = false diff --git a/library/kani_macros/Cargo.toml b/library/kani_macros/Cargo.toml index 42eb37a56584..475e2978df91 100644 --- a/library/kani_macros/Cargo.toml +++ b/library/kani_macros/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "kani_macros" -version = "0.53.0" +version = "0.54.0" edition = "2021" license = "MIT OR Apache-2.0" publish = false diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml index ae70767f6781..9f0d09b0d7bb 100644 --- a/library/std/Cargo.toml +++ b/library/std/Cargo.toml @@ -5,7 +5,7 @@ # Note: this package is intentionally named std to make sure the names of # standard library symbols are preserved name = "std" -version = "0.53.0" +version = "0.54.0" edition = "2021" license = "MIT OR Apache-2.0" publish = false diff --git a/tools/build-kani/Cargo.toml b/tools/build-kani/Cargo.toml index 7c8e6eef122a..cd2985e4ad68 100644 --- a/tools/build-kani/Cargo.toml +++ b/tools/build-kani/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "build-kani" -version = "0.53.0" +version = "0.54.0" edition = "2021" description = "Builds Kani, Sysroot and release bundle." license = "MIT OR Apache-2.0" From bec5fd1a2298f7f455c559ddfc938fd7a24e9b4d Mon Sep 17 00:00:00 2001 From: Michael Tautschnig Date: Fri, 9 Aug 2024 16:37:57 +0200 Subject: [PATCH 2/7] Update CBMC build instructions for Amazon Linux 2 (#3431) The default compiler on Amazon Linux 2 is GCC 7, which is not sufficiently recent for building CBMC v6+. Install and use GCC 10, adjust warnings to cope with the flex version provided by Amazon Linux 2, and handle the non-default std::filesystem support. Furthermore, apply a workaround until https://github.com/diffblue/cbmc/issues/8357 has been fixed on the CBMC side. By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 and MIT licenses. --- scripts/setup/al2/install_cbmc.sh | 79 +++++++++++++++++++++++++++++-- scripts/setup/al2/install_deps.sh | 1 + 2 files changed, 77 insertions(+), 3 deletions(-) diff --git a/scripts/setup/al2/install_cbmc.sh b/scripts/setup/al2/install_cbmc.sh index 34abcc52db93..de4b5215e083 100755 --- a/scripts/setup/al2/install_cbmc.sh +++ b/scripts/setup/al2/install_cbmc.sh @@ -21,10 +21,83 @@ git clone \ pushd "${WORK_DIR}" -mkdir build -git submodule update --init +# apply workaround for https://github.com/diffblue/cbmc/issues/8357 until it is +# properly fixed in CBMC +cat > varargs.patch << "EOF" +--- a/src/ansi-c/library/stdio.c ++++ b/src/ansi-c/library/stdio.c +@@ -1135,7 +1135,7 @@ int vfscanf(FILE *restrict stream, const char *restrict format, va_list arg) -cmake3 -S . -Bbuild -DWITH_JBMC=OFF -Dsat_impl="minisat2;cadical" + (void)*format; + while((__CPROVER_size_t)__CPROVER_POINTER_OFFSET(*(void **)&arg) < +- __CPROVER_OBJECT_SIZE(arg)) ++ __CPROVER_OBJECT_SIZE(*(void **)&arg)) + { + void *a = va_arg(arg, void *); + __CPROVER_havoc_object(a); +@@ -1233,7 +1233,7 @@ int __stdio_common_vfscanf( + + (void)*format; + while((__CPROVER_size_t)__CPROVER_POINTER_OFFSET(*(void **)&args) < +- __CPROVER_OBJECT_SIZE(args)) ++ __CPROVER_OBJECT_SIZE(*(void **)&args)) + { + void *a = va_arg(args, void *); + __CPROVER_havoc_object(a); +@@ -1312,7 +1312,7 @@ __CPROVER_HIDE:; + (void)*s; + (void)*format; + while((__CPROVER_size_t)__CPROVER_POINTER_OFFSET(*(void **)&arg) < +- __CPROVER_OBJECT_SIZE(arg)) ++ __CPROVER_OBJECT_SIZE(*(void **)&arg)) + { + void *a = va_arg(arg, void *); + __CPROVER_havoc_object(a); +@@ -1388,7 +1388,7 @@ int __stdio_common_vsscanf( + (void)*s; + (void)*format; + while((__CPROVER_size_t)__CPROVER_POINTER_OFFSET(*(void **)&args) < +- __CPROVER_OBJECT_SIZE(args)) ++ __CPROVER_OBJECT_SIZE(*(void **)&args)) + { + void *a = va_arg(args, void *); + __CPROVER_havoc_object(a); +@@ -1774,12 +1774,12 @@ int vsnprintf(char *str, size_t size, const char *fmt, va_list ap) + (void)*fmt; + + while((__CPROVER_size_t)__CPROVER_POINTER_OFFSET(*(void **)&ap) < +- __CPROVER_OBJECT_SIZE(ap)) ++ __CPROVER_OBJECT_SIZE(*(void **)&ap)) + + { + (void)va_arg(ap, int); + __CPROVER_precondition( +- __CPROVER_POINTER_OBJECT(str) != __CPROVER_POINTER_OBJECT(ap), ++ __CPROVER_POINTER_OBJECT(str) != __CPROVER_POINTER_OBJECT(*(void **)&ap), + "vsnprintf object overlap"); + } + +@@ -1822,12 +1822,12 @@ int __builtin___vsnprintf_chk( + (void)*fmt; + + while((__CPROVER_size_t)__CPROVER_POINTER_OFFSET(*(void **)&ap) < +- __CPROVER_OBJECT_SIZE(ap)) ++ __CPROVER_OBJECT_SIZE(*(void **)&ap)) + + { + (void)va_arg(ap, int); + __CPROVER_precondition( +- __CPROVER_POINTER_OBJECT(str) != __CPROVER_POINTER_OBJECT(ap), ++ __CPROVER_POINTER_OBJECT(str) != __CPROVER_POINTER_OBJECT(*(void **)&ap), + "vsnprintf object overlap"); + } + +EOF + +cmake3 -S . -Bbuild -DWITH_JBMC=OFF -Dsat_impl="minisat2;cadical" \ + -DCMAKE_C_COMPILER=gcc10-cc -DCMAKE_CXX_COMPILER=gcc10-c++ \ + -DCMAKE_CXX_STANDARD_LIBRARIES=-lstdc++fs \ + -DCMAKE_CXX_FLAGS=-Wno-error=register cmake3 --build build -- -j$(nproc) sudo make -C build install diff --git a/scripts/setup/al2/install_deps.sh b/scripts/setup/al2/install_deps.sh index 0010aa58bab4..ce901ab8afa5 100755 --- a/scripts/setup/al2/install_deps.sh +++ b/scripts/setup/al2/install_deps.sh @@ -11,6 +11,7 @@ set -eu DEPS=( cmake cmake3 + gcc10-c++ git openssl-devel python3-pip From 6553afa4dc854478da1570517f522776ae0601e2 Mon Sep 17 00:00:00 2001 From: Artem Agvanian Date: Fri, 9 Aug 2024 11:47:40 -0700 Subject: [PATCH 3/7] Handle intrinsics systematically (#3422) Since compiler intrinsics are handled both in codegen and instrumentation, it would make sense to have a single source of truth with regard to which intrinsics we support and update it systematically. This PR introduces an `Intrinsic` enum, which allows to parse intrinsic name into one of the predefined variants we support or a catch-all `Unsupported` variant. By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 and MIT licenses. --- .../codegen/intrinsic.rs | 414 ++++----- kani-compiler/src/intrinsics.rs | 798 ++++++++++++++++++ .../points_to/points_to_analysis.rs | 411 +++++---- .../delayed_ub/initial_target_visitor.rs | 37 +- .../check_uninit/ptr_uninit/uninit_visitor.rs | 378 ++++----- kani-compiler/src/main.rs | 1 + 6 files changed, 1393 insertions(+), 646 deletions(-) create mode 100644 kani-compiler/src/intrinsics.rs diff --git a/kani-compiler/src/codegen_cprover_gotoc/codegen/intrinsic.rs b/kani-compiler/src/codegen_cprover_gotoc/codegen/intrinsic.rs index c4d5396f1fe8..421d79e943c4 100644 --- a/kani-compiler/src/codegen_cprover_gotoc/codegen/intrinsic.rs +++ b/kani-compiler/src/codegen_cprover_gotoc/codegen/intrinsic.rs @@ -5,6 +5,7 @@ use super::typ; use super::{bb_label, PropertyClass}; use crate::codegen_cprover_gotoc::codegen::ty_stable::pointee_type_stable; use crate::codegen_cprover_gotoc::{utils, GotocCtx}; +use crate::intrinsics::Intrinsic; use crate::unwrap_or_return_codegen_unimplemented_stmt; use cbmc::goto_program::{ ArithmeticOverflowResult, BinaryOperator, BuiltinFn, Expr, Location, Stmt, Type, @@ -114,7 +115,7 @@ impl<'tcx> GotocCtx<'tcx> { span: Span, ) -> Stmt { let intrinsic_name = instance.intrinsic_name().unwrap(); - let intrinsic = intrinsic_name.as_str(); + let intrinsic_str = intrinsic_name.as_str(); let loc = self.codegen_span_stable(span); debug!(?instance, "codegen_intrinsic"); debug!(?fargs, "codegen_intrinsic"); @@ -163,7 +164,7 @@ impl<'tcx> GotocCtx<'tcx> { let div_overflow_check = self.codegen_assert_assume( div_does_not_overflow, PropertyClass::ArithmeticOverflow, - format!("attempt to compute {} which would overflow", intrinsic).as_str(), + format!("attempt to compute {} which would overflow", intrinsic_str).as_str(), loc, ); let res = a.$f(b); @@ -257,7 +258,7 @@ impl<'tcx> GotocCtx<'tcx> { macro_rules! codegen_atomic_binop { ($op: ident) => {{ let loc = self.codegen_span_stable(span); - self.store_concurrent_construct(intrinsic, loc); + self.store_concurrent_construct(intrinsic_str, loc); let var1_ref = fargs.remove(0); let var1 = var1_ref.dereference(); let (tmp, decl_stmt) = @@ -280,7 +281,7 @@ impl<'tcx> GotocCtx<'tcx> { macro_rules! unstable_codegen { ($($tt:tt)*) => {{ let expr = self.codegen_unimplemented_expr( - &format!("'{}' intrinsic", intrinsic), + &format!("'{}' intrinsic", intrinsic_str), cbmc_ret_ty, loc, "https://github.com/model-checking/kani/issues/new/choose", @@ -289,342 +290,273 @@ impl<'tcx> GotocCtx<'tcx> { }}; } - if let Some(stripped) = intrinsic.strip_prefix("simd_shuffle") { - assert!(fargs.len() == 3, "`simd_shuffle` had unexpected arguments {fargs:?}"); - let n: u64 = self.simd_shuffle_length(stripped, farg_types, span); - return self.codegen_intrinsic_simd_shuffle(fargs, place, farg_types, ret_ty, n, span); - } + let intrinsic = Intrinsic::from_instance(&instance); match intrinsic { - "add_with_overflow" => { + Intrinsic::AddWithOverflow => { self.codegen_op_with_overflow(BinaryOperator::OverflowResultPlus, fargs, place, loc) } - "arith_offset" => self.codegen_offset(intrinsic, instance, fargs, place, loc), - "assert_inhabited" => self.codegen_assert_intrinsic(instance, intrinsic, span), - "assert_mem_uninitialized_valid" => { - self.codegen_assert_intrinsic(instance, intrinsic, span) + Intrinsic::ArithOffset => { + self.codegen_offset(intrinsic_str, instance, fargs, place, loc) + } + Intrinsic::AssertInhabited => { + self.codegen_assert_intrinsic(instance, intrinsic_str, span) + } + Intrinsic::AssertMemUninitializedValid => { + self.codegen_assert_intrinsic(instance, intrinsic_str, span) + } + Intrinsic::AssertZeroValid => { + self.codegen_assert_intrinsic(instance, intrinsic_str, span) } - "assert_zero_valid" => self.codegen_assert_intrinsic(instance, intrinsic, span), // https://doc.rust-lang.org/core/intrinsics/fn.assume.html // Informs the optimizer that a condition is always true. // If the condition is false, the behavior is undefined. - "assume" => self.codegen_assert_assume( + Intrinsic::Assume => self.codegen_assert_assume( fargs.remove(0).cast_to(Type::bool()), PropertyClass::Assume, "assumption failed", loc, ), - "atomic_and_seqcst" => codegen_atomic_binop!(bitand), - "atomic_and_acquire" => codegen_atomic_binop!(bitand), - "atomic_and_acqrel" => codegen_atomic_binop!(bitand), - "atomic_and_release" => codegen_atomic_binop!(bitand), - "atomic_and_relaxed" => codegen_atomic_binop!(bitand), - name if name.starts_with("atomic_cxchg") => { - self.codegen_atomic_cxchg(intrinsic, fargs, place, loc) + Intrinsic::AtomicAnd(_) => codegen_atomic_binop!(bitand), + Intrinsic::AtomicCxchg(_) | Intrinsic::AtomicCxchgWeak(_) => { + self.codegen_atomic_cxchg(intrinsic_str, fargs, place, loc) } - "atomic_fence_seqcst" => self.codegen_atomic_noop(intrinsic, loc), - "atomic_fence_acquire" => self.codegen_atomic_noop(intrinsic, loc), - "atomic_fence_acqrel" => self.codegen_atomic_noop(intrinsic, loc), - "atomic_fence_release" => self.codegen_atomic_noop(intrinsic, loc), - "atomic_load_seqcst" => self.codegen_atomic_load(intrinsic, fargs, place, loc), - "atomic_load_acquire" => self.codegen_atomic_load(intrinsic, fargs, place, loc), - "atomic_load_relaxed" => self.codegen_atomic_load(intrinsic, fargs, place, loc), - "atomic_load_unordered" => self.codegen_atomic_load(intrinsic, fargs, place, loc), - "atomic_max_seqcst" => codegen_atomic_binop!(max), - "atomic_max_acquire" => codegen_atomic_binop!(max), - "atomic_max_acqrel" => codegen_atomic_binop!(max), - "atomic_max_release" => codegen_atomic_binop!(max), - "atomic_max_relaxed" => codegen_atomic_binop!(max), - "atomic_min_seqcst" => codegen_atomic_binop!(min), - "atomic_min_acquire" => codegen_atomic_binop!(min), - "atomic_min_acqrel" => codegen_atomic_binop!(min), - "atomic_min_release" => codegen_atomic_binop!(min), - "atomic_min_relaxed" => codegen_atomic_binop!(min), - "atomic_nand_seqcst" => codegen_atomic_binop!(bitnand), - "atomic_nand_acquire" => codegen_atomic_binop!(bitnand), - "atomic_nand_acqrel" => codegen_atomic_binop!(bitnand), - "atomic_nand_release" => codegen_atomic_binop!(bitnand), - "atomic_nand_relaxed" => codegen_atomic_binop!(bitnand), - "atomic_or_seqcst" => codegen_atomic_binop!(bitor), - "atomic_or_acquire" => codegen_atomic_binop!(bitor), - "atomic_or_acqrel" => codegen_atomic_binop!(bitor), - "atomic_or_release" => codegen_atomic_binop!(bitor), - "atomic_or_relaxed" => codegen_atomic_binop!(bitor), - "atomic_singlethreadfence_seqcst" => self.codegen_atomic_noop(intrinsic, loc), - "atomic_singlethreadfence_acquire" => self.codegen_atomic_noop(intrinsic, loc), - "atomic_singlethreadfence_acqrel" => self.codegen_atomic_noop(intrinsic, loc), - "atomic_singlethreadfence_release" => self.codegen_atomic_noop(intrinsic, loc), - "atomic_store_seqcst" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_store_release" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_store_relaxed" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_store_unordered" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_umax_seqcst" => codegen_atomic_binop!(max), - "atomic_umax_acquire" => codegen_atomic_binop!(max), - "atomic_umax_acqrel" => codegen_atomic_binop!(max), - "atomic_umax_release" => codegen_atomic_binop!(max), - "atomic_umax_relaxed" => codegen_atomic_binop!(max), - "atomic_umin_seqcst" => codegen_atomic_binop!(min), - "atomic_umin_acquire" => codegen_atomic_binop!(min), - "atomic_umin_acqrel" => codegen_atomic_binop!(min), - "atomic_umin_release" => codegen_atomic_binop!(min), - "atomic_umin_relaxed" => codegen_atomic_binop!(min), - "atomic_xadd_seqcst" => codegen_atomic_binop!(plus), - "atomic_xadd_acquire" => codegen_atomic_binop!(plus), - "atomic_xadd_acqrel" => codegen_atomic_binop!(plus), - "atomic_xadd_release" => codegen_atomic_binop!(plus), - "atomic_xadd_relaxed" => codegen_atomic_binop!(plus), - "atomic_xchg_seqcst" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_xchg_acquire" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_xchg_acqrel" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_xchg_release" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_xchg_relaxed" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_xor_seqcst" => codegen_atomic_binop!(bitxor), - "atomic_xor_acquire" => codegen_atomic_binop!(bitxor), - "atomic_xor_acqrel" => codegen_atomic_binop!(bitxor), - "atomic_xor_release" => codegen_atomic_binop!(bitxor), - "atomic_xor_relaxed" => codegen_atomic_binop!(bitxor), - "atomic_xsub_seqcst" => codegen_atomic_binop!(sub), - "atomic_xsub_acquire" => codegen_atomic_binop!(sub), - "atomic_xsub_acqrel" => codegen_atomic_binop!(sub), - "atomic_xsub_release" => codegen_atomic_binop!(sub), - "atomic_xsub_relaxed" => codegen_atomic_binop!(sub), - "bitreverse" => { + + Intrinsic::AtomicFence(_) => self.codegen_atomic_noop(intrinsic_str, loc), + Intrinsic::AtomicLoad(_) => self.codegen_atomic_load(intrinsic_str, fargs, place, loc), + Intrinsic::AtomicMax(_) => codegen_atomic_binop!(max), + Intrinsic::AtomicMin(_) => codegen_atomic_binop!(min), + Intrinsic::AtomicNand(_) => codegen_atomic_binop!(bitnand), + Intrinsic::AtomicOr(_) => codegen_atomic_binop!(bitor), + Intrinsic::AtomicSingleThreadFence(_) => self.codegen_atomic_noop(intrinsic_str, loc), + Intrinsic::AtomicStore(_) => { + self.codegen_atomic_store(intrinsic_str, fargs, place, loc) + } + Intrinsic::AtomicUmax(_) => codegen_atomic_binop!(max), + Intrinsic::AtomicUmin(_) => codegen_atomic_binop!(min), + Intrinsic::AtomicXadd(_) => codegen_atomic_binop!(plus), + Intrinsic::AtomicXchg(_) => self.codegen_atomic_store(intrinsic_str, fargs, place, loc), + Intrinsic::AtomicXor(_) => codegen_atomic_binop!(bitxor), + Intrinsic::AtomicXsub(_) => codegen_atomic_binop!(sub), + Intrinsic::Bitreverse => { self.codegen_expr_to_place_stable(place, fargs.remove(0).bitreverse(), loc) } // black_box is an identity function that hints to the compiler // to be maximally pessimistic to limit optimizations - "black_box" => self.codegen_expr_to_place_stable(place, fargs.remove(0), loc), - "breakpoint" => Stmt::skip(loc), - "bswap" => self.codegen_expr_to_place_stable(place, fargs.remove(0).bswap(), loc), - "caller_location" => self.codegen_unimplemented_stmt( - intrinsic, - loc, - "https://github.com/model-checking/kani/issues/374", - ), - "catch_unwind" => self.codegen_unimplemented_stmt( - intrinsic, - loc, - "https://github.com/model-checking/kani/issues/267", - ), - "ceilf32" => codegen_simple_intrinsic!(Ceilf), - "ceilf64" => codegen_simple_intrinsic!(Ceil), - "compare_bytes" => self.codegen_compare_bytes(fargs, place, loc), - "copy" => self.codegen_copy(intrinsic, false, fargs, farg_types, Some(place), loc), - "copy_nonoverlapping" => unreachable!( - "Expected `core::intrinsics::unreachable` to be handled by `StatementKind::CopyNonOverlapping`" - ), - "copysignf32" => codegen_simple_intrinsic!(Copysignf), - "copysignf64" => codegen_simple_intrinsic!(Copysign), - "cosf32" => codegen_simple_intrinsic!(Cosf), - "cosf64" => codegen_simple_intrinsic!(Cos), - "ctlz" => codegen_count_intrinsic!(ctlz, true), - "ctlz_nonzero" => codegen_count_intrinsic!(ctlz, false), - "ctpop" => self.codegen_ctpop(place, span, fargs.remove(0), farg_types[0]), - "cttz" => codegen_count_intrinsic!(cttz, true), - "cttz_nonzero" => codegen_count_intrinsic!(cttz, false), - "discriminant_value" => { + Intrinsic::BlackBox => self.codegen_expr_to_place_stable(place, fargs.remove(0), loc), + Intrinsic::Breakpoint => Stmt::skip(loc), + Intrinsic::Bswap => { + self.codegen_expr_to_place_stable(place, fargs.remove(0).bswap(), loc) + } + Intrinsic::CeilF32 => codegen_simple_intrinsic!(Ceilf), + Intrinsic::CeilF64 => codegen_simple_intrinsic!(Ceil), + Intrinsic::CompareBytes => self.codegen_compare_bytes(fargs, place, loc), + Intrinsic::Copy => { + self.codegen_copy(intrinsic_str, false, fargs, farg_types, Some(place), loc) + } + Intrinsic::CopySignF32 => codegen_simple_intrinsic!(Copysignf), + Intrinsic::CopySignF64 => codegen_simple_intrinsic!(Copysign), + Intrinsic::CosF32 => codegen_simple_intrinsic!(Cosf), + Intrinsic::CosF64 => codegen_simple_intrinsic!(Cos), + Intrinsic::Ctlz => codegen_count_intrinsic!(ctlz, true), + Intrinsic::CtlzNonZero => codegen_count_intrinsic!(ctlz, false), + Intrinsic::Ctpop => self.codegen_ctpop(place, span, fargs.remove(0), farg_types[0]), + Intrinsic::Cttz => codegen_count_intrinsic!(cttz, true), + Intrinsic::CttzNonZero => codegen_count_intrinsic!(cttz, false), + Intrinsic::DiscriminantValue => { let sig = instance.ty().kind().fn_sig().unwrap().skip_binder(); let ty = pointee_type_stable(sig.inputs()[0]).unwrap(); let e = self.codegen_get_discriminant(fargs.remove(0).dereference(), ty, ret_ty); self.codegen_expr_to_place_stable(place, e, loc) } - "exact_div" => self.codegen_exact_div(fargs, place, loc), - "exp2f32" => codegen_simple_intrinsic!(Exp2f), - "exp2f64" => codegen_simple_intrinsic!(Exp2), - "expf32" => codegen_simple_intrinsic!(Expf), - "expf64" => codegen_simple_intrinsic!(Exp), - "fabsf32" => codegen_simple_intrinsic!(Fabsf), - "fabsf64" => codegen_simple_intrinsic!(Fabs), - "fadd_fast" => { + Intrinsic::ExactDiv => self.codegen_exact_div(fargs, place, loc), + Intrinsic::Exp2F32 => codegen_simple_intrinsic!(Exp2f), + Intrinsic::Exp2F64 => codegen_simple_intrinsic!(Exp2), + Intrinsic::ExpF32 => codegen_simple_intrinsic!(Expf), + Intrinsic::ExpF64 => codegen_simple_intrinsic!(Exp), + Intrinsic::FabsF32 => codegen_simple_intrinsic!(Fabsf), + Intrinsic::FabsF64 => codegen_simple_intrinsic!(Fabs), + Intrinsic::FaddFast => { let fargs_clone = fargs.clone(); let binop_stmt = codegen_intrinsic_binop!(plus); - self.add_finite_args_checks(intrinsic, fargs_clone, binop_stmt, span) + self.add_finite_args_checks(intrinsic_str, fargs_clone, binop_stmt, span) } - "fdiv_fast" => { + Intrinsic::FdivFast => { let fargs_clone = fargs.clone(); let binop_stmt = codegen_intrinsic_binop!(div); - self.add_finite_args_checks(intrinsic, fargs_clone, binop_stmt, span) + self.add_finite_args_checks(intrinsic_str, fargs_clone, binop_stmt, span) } - "floorf32" => codegen_simple_intrinsic!(Floorf), - "floorf64" => codegen_simple_intrinsic!(Floor), - "fmaf32" => codegen_simple_intrinsic!(Fmaf), - "fmaf64" => codegen_simple_intrinsic!(Fma), - "fmul_fast" => { + Intrinsic::FloorF32 => codegen_simple_intrinsic!(Floorf), + Intrinsic::FloorF64 => codegen_simple_intrinsic!(Floor), + Intrinsic::FmafF32 => codegen_simple_intrinsic!(Fmaf), + Intrinsic::FmafF64 => codegen_simple_intrinsic!(Fma), + Intrinsic::FmulFast => { let fargs_clone = fargs.clone(); let binop_stmt = codegen_intrinsic_binop!(mul); - self.add_finite_args_checks(intrinsic, fargs_clone, binop_stmt, span) + self.add_finite_args_checks(intrinsic_str, fargs_clone, binop_stmt, span) } - "forget" => Stmt::skip(loc), - "fsub_fast" => { + Intrinsic::Forget => Stmt::skip(loc), + Intrinsic::FsubFast => { let fargs_clone = fargs.clone(); let binop_stmt = codegen_intrinsic_binop!(sub); - self.add_finite_args_checks(intrinsic, fargs_clone, binop_stmt, span) + self.add_finite_args_checks(intrinsic_str, fargs_clone, binop_stmt, span) } - "is_val_statically_known" => { + Intrinsic::IsValStaticallyKnown => { // Returning false is sound according do this intrinsic's documentation: // https://doc.rust-lang.org/nightly/std/intrinsics/fn.is_val_statically_known.html self.codegen_expr_to_place_stable(place, Expr::c_false(), loc) } - "likely" => self.codegen_expr_to_place_stable(place, fargs.remove(0), loc), - "log10f32" => codegen_simple_intrinsic!(Log10f), - "log10f64" => codegen_simple_intrinsic!(Log10), - "log2f32" => codegen_simple_intrinsic!(Log2f), - "log2f64" => codegen_simple_intrinsic!(Log2), - "logf32" => codegen_simple_intrinsic!(Logf), - "logf64" => codegen_simple_intrinsic!(Log), - "maxnumf32" => codegen_simple_intrinsic!(Fmaxf), - "maxnumf64" => codegen_simple_intrinsic!(Fmax), - "min_align_of" => codegen_intrinsic_const!(), - "min_align_of_val" => codegen_size_align!(align), - "minnumf32" => codegen_simple_intrinsic!(Fminf), - "minnumf64" => codegen_simple_intrinsic!(Fmin), - "mul_with_overflow" => { + Intrinsic::Likely => self.codegen_expr_to_place_stable(place, fargs.remove(0), loc), + Intrinsic::Log10F32 => codegen_simple_intrinsic!(Log10f), + Intrinsic::Log10F64 => codegen_simple_intrinsic!(Log10), + Intrinsic::Log2F32 => codegen_simple_intrinsic!(Log2f), + Intrinsic::Log2F64 => codegen_simple_intrinsic!(Log2), + Intrinsic::LogF32 => codegen_simple_intrinsic!(Logf), + Intrinsic::LogF64 => codegen_simple_intrinsic!(Log), + Intrinsic::MaxNumF32 => codegen_simple_intrinsic!(Fmaxf), + Intrinsic::MaxNumF64 => codegen_simple_intrinsic!(Fmax), + Intrinsic::MinAlignOf => codegen_intrinsic_const!(), + Intrinsic::MinAlignOfVal => codegen_size_align!(align), + Intrinsic::MinNumF32 => codegen_simple_intrinsic!(Fminf), + Intrinsic::MinNumF64 => codegen_simple_intrinsic!(Fmin), + Intrinsic::MulWithOverflow => { self.codegen_op_with_overflow(BinaryOperator::OverflowResultMult, fargs, place, loc) } - "nearbyintf32" => codegen_simple_intrinsic!(Nearbyintf), - "nearbyintf64" => codegen_simple_intrinsic!(Nearbyint), - "needs_drop" => codegen_intrinsic_const!(), - // As of https://github.com/rust-lang/rust/pull/110822 the `offset` intrinsic is lowered to `mir::BinOp::Offset` - "offset" => unreachable!( - "Expected `core::intrinsics::unreachable` to be handled by `BinOp::OffSet`" - ), - "powf32" => codegen_simple_intrinsic!(Powf), - "powf64" => codegen_simple_intrinsic!(Pow), - "powif32" => codegen_simple_intrinsic!(Powif), - "powif64" => codegen_simple_intrinsic!(Powi), - "pref_align_of" => codegen_intrinsic_const!(), - "ptr_guaranteed_cmp" => self.codegen_ptr_guaranteed_cmp(fargs, place, loc), - "ptr_offset_from" => self.codegen_ptr_offset_from(fargs, place, loc), - "ptr_offset_from_unsigned" => self.codegen_ptr_offset_from_unsigned(fargs, place, loc), - "raw_eq" => self.codegen_intrinsic_raw_eq(instance, fargs, place, loc), - "retag_box_to_raw" => self.codegen_retag_box_to_raw(fargs, place, loc), - "rintf32" => codegen_simple_intrinsic!(Rintf), - "rintf64" => codegen_simple_intrinsic!(Rint), - "rotate_left" => codegen_intrinsic_binop!(rol), - "rotate_right" => codegen_intrinsic_binop!(ror), - "roundf32" => codegen_simple_intrinsic!(Roundf), - "roundf64" => codegen_simple_intrinsic!(Round), - "saturating_add" => codegen_intrinsic_binop_with_mm!(saturating_add), - "saturating_sub" => codegen_intrinsic_binop_with_mm!(saturating_sub), - "sinf32" => codegen_simple_intrinsic!(Sinf), - "sinf64" => codegen_simple_intrinsic!(Sin), - "simd_add" => self.codegen_simd_op_with_overflow( + Intrinsic::NearbyIntF32 => codegen_simple_intrinsic!(Nearbyintf), + Intrinsic::NearbyIntF64 => codegen_simple_intrinsic!(Nearbyint), + Intrinsic::NeedsDrop => codegen_intrinsic_const!(), + Intrinsic::PowF32 => codegen_simple_intrinsic!(Powf), + Intrinsic::PowF64 => codegen_simple_intrinsic!(Pow), + Intrinsic::PowIF32 => codegen_simple_intrinsic!(Powif), + Intrinsic::PowIF64 => codegen_simple_intrinsic!(Powi), + Intrinsic::PrefAlignOf => codegen_intrinsic_const!(), + Intrinsic::PtrGuaranteedCmp => self.codegen_ptr_guaranteed_cmp(fargs, place, loc), + Intrinsic::PtrOffsetFrom => self.codegen_ptr_offset_from(fargs, place, loc), + Intrinsic::PtrOffsetFromUnsigned => { + self.codegen_ptr_offset_from_unsigned(fargs, place, loc) + } + Intrinsic::RawEq => self.codegen_intrinsic_raw_eq(instance, fargs, place, loc), + Intrinsic::RetagBoxToRaw => self.codegen_retag_box_to_raw(fargs, place, loc), + Intrinsic::RintF32 => codegen_simple_intrinsic!(Rintf), + Intrinsic::RintF64 => codegen_simple_intrinsic!(Rint), + Intrinsic::RotateLeft => codegen_intrinsic_binop!(rol), + Intrinsic::RotateRight => codegen_intrinsic_binop!(ror), + Intrinsic::RoundF32 => codegen_simple_intrinsic!(Roundf), + Intrinsic::RoundF64 => codegen_simple_intrinsic!(Round), + Intrinsic::SaturatingAdd => codegen_intrinsic_binop_with_mm!(saturating_add), + Intrinsic::SaturatingSub => codegen_intrinsic_binop_with_mm!(saturating_sub), + Intrinsic::SinF32 => codegen_simple_intrinsic!(Sinf), + Intrinsic::SinF64 => codegen_simple_intrinsic!(Sin), + Intrinsic::SimdAdd => self.codegen_simd_op_with_overflow( Expr::plus, Expr::add_overflow_p, fargs, - intrinsic, + intrinsic_str, place, loc, ), - "simd_and" => codegen_intrinsic_binop!(bitand), + Intrinsic::SimdAnd => codegen_intrinsic_binop!(bitand), // TODO: `simd_rem` doesn't check for overflow cases for floating point operands. // - "simd_div" | "simd_rem" => { - self.codegen_simd_div_with_overflow(fargs, intrinsic, place, loc) + Intrinsic::SimdDiv | Intrinsic::SimdRem => { + self.codegen_simd_div_with_overflow(fargs, intrinsic_str, place, loc) } - "simd_eq" => { + Intrinsic::SimdEq => { self.codegen_simd_cmp(Expr::vector_eq, fargs, place, span, farg_types, ret_ty) } - "simd_extract" => { + Intrinsic::SimdExtract => { self.codegen_intrinsic_simd_extract(fargs, place, farg_types, ret_ty, span) } - "simd_ge" => { + Intrinsic::SimdGe => { self.codegen_simd_cmp(Expr::vector_ge, fargs, place, span, farg_types, ret_ty) } - "simd_gt" => { + Intrinsic::SimdGt => { self.codegen_simd_cmp(Expr::vector_gt, fargs, place, span, farg_types, ret_ty) } - "simd_insert" => { + Intrinsic::SimdInsert => { self.codegen_intrinsic_simd_insert(fargs, place, cbmc_ret_ty, farg_types, span, loc) } - "simd_le" => { + Intrinsic::SimdLe => { self.codegen_simd_cmp(Expr::vector_le, fargs, place, span, farg_types, ret_ty) } - "simd_lt" => { + Intrinsic::SimdLt => { self.codegen_simd_cmp(Expr::vector_lt, fargs, place, span, farg_types, ret_ty) } - "simd_mul" => self.codegen_simd_op_with_overflow( + Intrinsic::SimdMul => self.codegen_simd_op_with_overflow( Expr::mul, Expr::mul_overflow_p, fargs, - intrinsic, + intrinsic_str, place, loc, ), - "simd_ne" => { + Intrinsic::SimdNe => { self.codegen_simd_cmp(Expr::vector_neq, fargs, place, span, farg_types, ret_ty) } - "simd_or" => codegen_intrinsic_binop!(bitor), - "simd_shl" | "simd_shr" => { - self.codegen_simd_shift_with_distance_check(fargs, intrinsic, place, loc) + Intrinsic::SimdOr => codegen_intrinsic_binop!(bitor), + Intrinsic::SimdShl | Intrinsic::SimdShr => { + self.codegen_simd_shift_with_distance_check(fargs, intrinsic_str, place, loc) + } + Intrinsic::SimdShuffle(stripped) => { + let n: u64 = self.simd_shuffle_length(stripped.as_str(), farg_types, span); + self.codegen_intrinsic_simd_shuffle(fargs, place, farg_types, ret_ty, n, span) } - // "simd_shuffle#" => handled in an `if` preceding this match - "simd_sub" => self.codegen_simd_op_with_overflow( + Intrinsic::SimdSub => self.codegen_simd_op_with_overflow( Expr::sub, Expr::sub_overflow_p, fargs, - intrinsic, + intrinsic_str, place, loc, ), - "simd_xor" => codegen_intrinsic_binop!(bitxor), - "size_of" => unreachable!(), - "size_of_val" => codegen_size_align!(size), - "sqrtf32" => codegen_simple_intrinsic!(Sqrtf), - "sqrtf64" => codegen_simple_intrinsic!(Sqrt), - "sub_with_overflow" => self.codegen_op_with_overflow( + Intrinsic::SimdXor => codegen_intrinsic_binop!(bitxor), + Intrinsic::SizeOfVal => codegen_size_align!(size), + Intrinsic::SqrtF32 => codegen_simple_intrinsic!(Sqrtf), + Intrinsic::SqrtF64 => codegen_simple_intrinsic!(Sqrt), + Intrinsic::SubWithOverflow => self.codegen_op_with_overflow( BinaryOperator::OverflowResultMinus, fargs, place, loc, ), - "transmute" => self.codegen_intrinsic_transmute(fargs, ret_ty, place, loc), - "truncf32" => codegen_simple_intrinsic!(Truncf), - "truncf64" => codegen_simple_intrinsic!(Trunc), - "type_id" => codegen_intrinsic_const!(), - "type_name" => codegen_intrinsic_const!(), - "typed_swap" => self.codegen_swap(fargs, farg_types, loc), - "unaligned_volatile_load" => { + Intrinsic::Transmute => self.codegen_intrinsic_transmute(fargs, ret_ty, place, loc), + Intrinsic::TruncF32 => codegen_simple_intrinsic!(Truncf), + Intrinsic::TruncF64 => codegen_simple_intrinsic!(Trunc), + Intrinsic::TypeId => codegen_intrinsic_const!(), + Intrinsic::TypeName => codegen_intrinsic_const!(), + Intrinsic::TypedSwap => self.codegen_swap(fargs, farg_types, loc), + Intrinsic::UnalignedVolatileLoad => { unstable_codegen!(self.codegen_expr_to_place_stable( place, fargs.remove(0).dereference(), loc )) } - "unchecked_add" | "unchecked_mul" | "unchecked_shl" | "unchecked_shr" - | "unchecked_sub" => { - unreachable!("Expected intrinsic `{intrinsic}` to be lowered before codegen") - } - "unchecked_div" => codegen_op_with_div_overflow_check!(div), - "unchecked_rem" => codegen_op_with_div_overflow_check!(rem), - "unlikely" => self.codegen_expr_to_place_stable(place, fargs.remove(0), loc), - "unreachable" => unreachable!( - "Expected `std::intrinsics::unreachable` to be handled by `TerminatorKind::Unreachable`" - ), - "volatile_copy_memory" => unstable_codegen!(codegen_intrinsic_copy!(Memmove)), - "volatile_copy_nonoverlapping_memory" => { + Intrinsic::UncheckedDiv => codegen_op_with_div_overflow_check!(div), + Intrinsic::UncheckedRem => codegen_op_with_div_overflow_check!(rem), + Intrinsic::Unlikely => self.codegen_expr_to_place_stable(place, fargs.remove(0), loc), + Intrinsic::VolatileCopyMemory => unstable_codegen!(codegen_intrinsic_copy!(Memmove)), + Intrinsic::VolatileCopyNonOverlappingMemory => { unstable_codegen!(codegen_intrinsic_copy!(Memcpy)) } - "volatile_load" => self.codegen_volatile_load(fargs, farg_types, place, loc), - "volatile_store" => { + Intrinsic::VolatileLoad => self.codegen_volatile_load(fargs, farg_types, place, loc), + Intrinsic::VolatileStore => { assert!(self.place_ty_stable(place).kind().is_unit()); self.codegen_volatile_store(fargs, farg_types, loc) } - "vtable_size" => self.vtable_info(VTableInfo::Size, fargs, place, loc), - "vtable_align" => self.vtable_info(VTableInfo::Align, fargs, place, loc), - "wrapping_add" => codegen_wrapping_op!(plus), - "wrapping_mul" => codegen_wrapping_op!(mul), - "wrapping_sub" => codegen_wrapping_op!(sub), - "write_bytes" => { + Intrinsic::VtableSize => self.vtable_info(VTableInfo::Size, fargs, place, loc), + Intrinsic::VtableAlign => self.vtable_info(VTableInfo::Align, fargs, place, loc), + Intrinsic::WrappingAdd => codegen_wrapping_op!(plus), + Intrinsic::WrappingMul => codegen_wrapping_op!(mul), + Intrinsic::WrappingSub => codegen_wrapping_op!(sub), + Intrinsic::WriteBytes => { assert!(self.place_ty_stable(place).kind().is_unit()); self.codegen_write_bytes(fargs, farg_types, loc) } // Unimplemented - _ => self.codegen_unimplemented_stmt( - intrinsic, - loc, - "https://github.com/model-checking/kani/issues/new/choose", - ), + Intrinsic::Unimplemented { name, issue_link } => { + self.codegen_unimplemented_stmt(&name, loc, &issue_link) + } } } diff --git a/kani-compiler/src/intrinsics.rs b/kani-compiler/src/intrinsics.rs new file mode 100644 index 000000000000..9485d8525410 --- /dev/null +++ b/kani-compiler/src/intrinsics.rs @@ -0,0 +1,798 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +//! Single source of truth about which intrinsics we support. + +use stable_mir::{ + mir::{mono::Instance, Mutability}, + ty::{FloatTy, IntTy, RigidTy, TyKind, UintTy}, +}; + +// Enumeration of all intrinsics we support right now, with the last option being a catch-all. This +// way, adding an intrinsic would highlight all places where they are used. +#[allow(unused)] +#[derive(Clone, Debug)] +pub enum Intrinsic { + AddWithOverflow, + ArithOffset, + AssertInhabited, + AssertMemUninitializedValid, + AssertZeroValid, + Assume, + AtomicAnd(String), + AtomicCxchg(String), + AtomicCxchgWeak(String), + AtomicFence(String), + AtomicLoad(String), + AtomicMax(String), + AtomicMin(String), + AtomicNand(String), + AtomicOr(String), + AtomicSingleThreadFence(String), + AtomicStore(String), + AtomicUmax(String), + AtomicUmin(String), + AtomicXadd(String), + AtomicXchg(String), + AtomicXor(String), + AtomicXsub(String), + Bitreverse, + BlackBox, + Breakpoint, + Bswap, + CeilF32, + CeilF64, + CompareBytes, + Copy, + CopySignF32, + CopySignF64, + CosF32, + CosF64, + Ctlz, + CtlzNonZero, + Ctpop, + Cttz, + CttzNonZero, + DiscriminantValue, + ExactDiv, + Exp2F32, + Exp2F64, + ExpF32, + ExpF64, + FabsF32, + FabsF64, + FaddFast, + FdivFast, + FloorF32, + FloorF64, + FmafF32, + FmafF64, + FmulFast, + Forget, + FsubFast, + IsValStaticallyKnown, + Likely, + Log10F32, + Log10F64, + Log2F32, + Log2F64, + LogF32, + LogF64, + MaxNumF32, + MaxNumF64, + MinAlignOf, + MinAlignOfVal, + MinNumF32, + MinNumF64, + MulWithOverflow, + NearbyIntF32, + NearbyIntF64, + NeedsDrop, + PowF32, + PowF64, + PowIF32, + PowIF64, + PrefAlignOf, + PtrGuaranteedCmp, + PtrOffsetFrom, + PtrOffsetFromUnsigned, + RawEq, + RetagBoxToRaw, + RintF32, + RintF64, + RotateLeft, + RotateRight, + RoundF32, + RoundF64, + SaturatingAdd, + SaturatingSub, + SinF32, + SinF64, + SimdAdd, + SimdAnd, + SimdDiv, + SimdRem, + SimdEq, + SimdExtract, + SimdGe, + SimdGt, + SimdInsert, + SimdLe, + SimdLt, + SimdMul, + SimdNe, + SimdOr, + SimdShl, + SimdShr, + SimdShuffle(String), + SimdSub, + SimdXor, + SizeOfVal, + SqrtF32, + SqrtF64, + SubWithOverflow, + Transmute, + TruncF32, + TruncF64, + TypeId, + TypeName, + TypedSwap, + UnalignedVolatileLoad, + UncheckedDiv, + UncheckedRem, + Unlikely, + VolatileCopyMemory, + VolatileCopyNonOverlappingMemory, + VolatileLoad, + VolatileStore, + VtableSize, + VtableAlign, + WrappingAdd, + WrappingMul, + WrappingSub, + WriteBytes, + Unimplemented { name: String, issue_link: String }, +} + +/// Assert that top-level types of a function signature match the given patterns. +macro_rules! assert_sig_matches { + ($sig:expr, $($input_type:pat),* => $output_type:pat) => { + let inputs = $sig.inputs(); + let output = $sig.output(); + #[allow(unused_mut)] + let mut index = 0; + $( + #[allow(unused_assignments)] + { + assert!(matches!(inputs[index].kind(), TyKind::RigidTy($input_type))); + index += 1; + } + )* + assert!(inputs.len() == index); + assert!(matches!(output.kind(), TyKind::RigidTy($output_type))); + } +} + +impl Intrinsic { + /// Create an intrinsic enum from a given intrinsic instance, shallowly validating the argument types. + pub fn from_instance(intrinsic_instance: &Instance) -> Self { + let intrinsic_str = intrinsic_instance.intrinsic_name().unwrap(); + let sig = intrinsic_instance.ty().kind().fn_sig().unwrap().skip_binder(); + match intrinsic_str.as_str() { + "add_with_overflow" => { + assert_sig_matches!(sig, _, _ => RigidTy::Tuple(_)); + Self::AddWithOverflow + } + "arith_offset" => { + assert_sig_matches!(sig, + RigidTy::RawPtr(_, Mutability::Not), + RigidTy::Int(IntTy::Isize) + => RigidTy::RawPtr(_, Mutability::Not)); + Self::ArithOffset + } + "assert_inhabited" => { + assert_sig_matches!(sig, => RigidTy::Tuple(_)); + Self::AssertInhabited + } + "assert_mem_uninitialized_valid" => { + assert_sig_matches!(sig, => RigidTy::Tuple(_)); + Self::AssertMemUninitializedValid + } + "assert_zero_valid" => { + assert_sig_matches!(sig, => RigidTy::Tuple(_)); + Self::AssertZeroValid + } + "assume" => { + assert_sig_matches!(sig, RigidTy::Bool => RigidTy::Tuple(_)); + Self::Assume + } + "bitreverse" => { + assert_sig_matches!(sig, _ => _); + Self::Bitreverse + } + "black_box" => { + assert_sig_matches!(sig, _ => _); + Self::BlackBox + } + "breakpoint" => { + assert_sig_matches!(sig, => RigidTy::Tuple(_)); + Self::Breakpoint + } + "bswap" => { + assert_sig_matches!(sig, _ => _); + Self::Bswap + } + "caller_location" => { + assert_sig_matches!(sig, => RigidTy::Ref(_, _, Mutability::Not)); + Self::Unimplemented { + name: intrinsic_str, + issue_link: "https://github.com/model-checking/kani/issues/374".into(), + } + } + "catch_unwind" => { + assert_sig_matches!(sig, RigidTy::FnPtr(_), RigidTy::RawPtr(_, Mutability::Mut), RigidTy::FnPtr(_) => RigidTy::Int(IntTy::I32)); + Self::Unimplemented { + name: intrinsic_str, + issue_link: "https://github.com/model-checking/kani/issues/267".into(), + } + } + "compare_bytes" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not), RigidTy::RawPtr(_, Mutability::Not), RigidTy::Uint(UintTy::Usize) => RigidTy::Int(IntTy::I32)); + Self::CompareBytes + } + "copy" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not), RigidTy::RawPtr(_, Mutability::Mut), RigidTy::Uint(UintTy::Usize) => RigidTy::Tuple(_)); + Self::Copy + } + "copy_nonoverlapping" => unreachable!( + "Expected `core::intrinsics::unreachable` to be handled by `StatementKind::CopyNonOverlapping`" + ), + "ctlz" => { + assert_sig_matches!(sig, _ => RigidTy::Uint(UintTy::U32)); + Self::Ctlz + } + "ctlz_nonzero" => { + assert_sig_matches!(sig, _ => RigidTy::Uint(UintTy::U32)); + Self::CtlzNonZero + } + "ctpop" => { + assert_sig_matches!(sig, _ => RigidTy::Uint(UintTy::U32)); + Self::Ctpop + } + "cttz" => { + assert_sig_matches!(sig, _ => RigidTy::Uint(UintTy::U32)); + Self::Cttz + } + "cttz_nonzero" => { + assert_sig_matches!(sig, _ => RigidTy::Uint(UintTy::U32)); + Self::CttzNonZero + } + "discriminant_value" => { + assert_sig_matches!(sig, RigidTy::Ref(_, _, Mutability::Not) => _); + Self::DiscriminantValue + } + "exact_div" => { + assert_sig_matches!(sig, _, _ => _); + Self::ExactDiv + } + "fadd_fast" => { + assert_sig_matches!(sig, _, _ => _); + Self::FaddFast + } + "fdiv_fast" => { + assert_sig_matches!(sig, _, _ => _); + Self::FdivFast + } + "fmul_fast" => { + assert_sig_matches!(sig, _, _ => _); + Self::FmulFast + } + "forget" => { + assert_sig_matches!(sig, _ => RigidTy::Tuple(_)); + Self::Forget + } + "fsub_fast" => { + assert_sig_matches!(sig, _, _ => _); + Self::FsubFast + } + "is_val_statically_known" => { + assert_sig_matches!(sig, _ => RigidTy::Bool); + Self::IsValStaticallyKnown + } + "likely" => { + assert_sig_matches!(sig, RigidTy::Bool => RigidTy::Bool); + Self::Likely + } + "min_align_of" => { + assert_sig_matches!(sig, => RigidTy::Uint(UintTy::Usize)); + Self::MinAlignOf + } + "min_align_of_val" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not) => RigidTy::Uint(UintTy::Usize)); + Self::MinAlignOfVal + } + "mul_with_overflow" => { + assert_sig_matches!(sig, _, _ => RigidTy::Tuple(_)); + Self::MulWithOverflow + } + "needs_drop" => { + assert_sig_matches!(sig, => RigidTy::Bool); + Self::NeedsDrop + } + // As of https://github.com/rust-lang/rust/pull/110822 the `offset` intrinsic is lowered to `mir::BinOp::Offset` + "offset" => unreachable!( + "Expected `core::intrinsics::unreachable` to be handled by `BinOp::OffSet`" + ), + "pref_align_of" => { + assert_sig_matches!(sig, => RigidTy::Uint(UintTy::Usize)); + Self::PrefAlignOf + } + "ptr_guaranteed_cmp" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not), RigidTy::RawPtr(_, Mutability::Not) => RigidTy::Uint(UintTy::U8)); + Self::PtrGuaranteedCmp + } + "ptr_offset_from" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not), RigidTy::RawPtr(_, Mutability::Not) => RigidTy::Int(IntTy::Isize)); + Self::PtrOffsetFrom + } + "ptr_offset_from_unsigned" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not), RigidTy::RawPtr(_, Mutability::Not) => RigidTy::Uint(UintTy::Usize)); + Self::PtrOffsetFromUnsigned + } + "raw_eq" => { + assert_sig_matches!(sig, RigidTy::Ref(_, _, Mutability::Not), RigidTy::Ref(_, _, Mutability::Not) => RigidTy::Bool); + Self::RawEq + } + "rotate_left" => { + assert_sig_matches!(sig, _, RigidTy::Uint(UintTy::U32) => _); + Self::RotateLeft + } + "rotate_right" => { + assert_sig_matches!(sig, _, RigidTy::Uint(UintTy::U32) => _); + Self::RotateRight + } + "saturating_add" => { + assert_sig_matches!(sig, _, _ => _); + Self::SaturatingAdd + } + "saturating_sub" => { + assert_sig_matches!(sig, _, _ => _); + Self::SaturatingSub + } + "size_of" => unreachable!(), + "size_of_val" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not) => RigidTy::Uint(UintTy::Usize)); + Self::SizeOfVal + } + "sub_with_overflow" => { + assert_sig_matches!(sig, _, _ => RigidTy::Tuple(_)); + Self::SubWithOverflow + } + "transmute" => { + assert_sig_matches!(sig, _ => _); + Self::Transmute + } + "type_id" => { + assert_sig_matches!(sig, => RigidTy::Uint(UintTy::U128)); + Self::TypeId + } + "type_name" => { + assert_sig_matches!(sig, => RigidTy::Ref(_, _, Mutability::Not)); + Self::TypeName + } + "typed_swap" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), RigidTy::RawPtr(_, Mutability::Mut) => RigidTy::Tuple(_)); + Self::TypedSwap + } + "unaligned_volatile_load" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not) => _); + Self::UnalignedVolatileLoad + } + "unchecked_add" | "unchecked_mul" | "unchecked_shl" | "unchecked_shr" + | "unchecked_sub" => { + unreachable!("Expected intrinsic `{intrinsic_str}` to be lowered before codegen") + } + "unchecked_div" => { + assert_sig_matches!(sig, _, _ => _); + Self::UncheckedDiv + } + "unchecked_rem" => { + assert_sig_matches!(sig, _, _ => _); + Self::UncheckedRem + } + "unlikely" => { + assert_sig_matches!(sig, RigidTy::Bool => RigidTy::Bool); + Self::Unlikely + } + "unreachable" => unreachable!( + "Expected `std::intrinsics::unreachable` to be handled by `TerminatorKind::Unreachable`" + ), + "volatile_copy_memory" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), RigidTy::RawPtr(_, Mutability::Not), RigidTy::Uint(UintTy::Usize) => RigidTy::Tuple(_)); + Self::VolatileCopyMemory + } + "volatile_copy_nonoverlapping_memory" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), RigidTy::RawPtr(_, Mutability::Not), RigidTy::Uint(UintTy::Usize) => RigidTy::Tuple(_)); + Self::VolatileCopyNonOverlappingMemory + } + "volatile_load" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not) => _); + Self::VolatileLoad + } + "volatile_store" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => RigidTy::Tuple(_)); + Self::VolatileStore + } + "vtable_size" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not) => RigidTy::Uint(UintTy::Usize)); + Self::VtableSize + } + "vtable_align" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not) => RigidTy::Uint(UintTy::Usize)); + Self::VtableAlign + } + "wrapping_add" => { + assert_sig_matches!(sig, _, _ => _); + Self::WrappingAdd + } + "wrapping_mul" => { + assert_sig_matches!(sig, _, _ => _); + Self::WrappingMul + } + "wrapping_sub" => { + assert_sig_matches!(sig, _, _ => _); + Self::WrappingSub + } + "write_bytes" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), RigidTy::Uint(UintTy::U8), RigidTy::Uint(UintTy::Usize) => RigidTy::Tuple(_)); + Self::WriteBytes + } + _ => try_match_atomic(intrinsic_instance) + .or_else(|| try_match_simd(intrinsic_instance)) + .or_else(|| try_match_f32(intrinsic_instance)) + .or_else(|| try_match_f64(intrinsic_instance)) + .unwrap_or(Self::Unimplemented { + name: intrinsic_str, + issue_link: "https://github.com/model-checking/kani/issues/new/choose".into(), + }), + } + } +} + +/// Match atomic intrinsics by instance, returning an instance of the intrinsics enum if the match +/// is successful. +fn try_match_atomic(intrinsic_instance: &Instance) -> Option { + let intrinsic_str = intrinsic_instance.intrinsic_name().unwrap(); + let sig = intrinsic_instance.ty().kind().fn_sig().unwrap().skip_binder(); + if let Some(suffix) = intrinsic_str.strip_prefix("atomic_and_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicAnd(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_cxchgweak_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _, _ => RigidTy::Tuple(_)); + Some(Intrinsic::AtomicCxchgWeak(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_cxchg_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _, _ => RigidTy::Tuple(_)); + Some(Intrinsic::AtomicCxchg(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_fence_") { + assert_sig_matches!(sig, => RigidTy::Tuple(_)); + Some(Intrinsic::AtomicFence(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_load_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not) => _); + Some(Intrinsic::AtomicLoad(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_max_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicMax(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_min_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicMin(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_nand_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicNand(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_or_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicOr(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_singlethreadfence_") { + assert_sig_matches!(sig, => RigidTy::Tuple(_)); + Some(Intrinsic::AtomicSingleThreadFence(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_store_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => RigidTy::Tuple(_)); + Some(Intrinsic::AtomicStore(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_umax_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicUmax(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_umin_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicUmin(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_xadd_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicXadd(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_xchg_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicXchg(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_xor_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicXor(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_xsub_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicXsub(suffix.into())) + } else { + None + } +} + +/// Match SIMD intrinsics by instance, returning an instance of the intrinsics enum if the match +/// is successful. +fn try_match_simd(intrinsic_instance: &Instance) -> Option { + let intrinsic_str = intrinsic_instance.intrinsic_name().unwrap(); + let sig = intrinsic_instance.ty().kind().fn_sig().unwrap().skip_binder(); + match intrinsic_str.as_str() { + "simd_add" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdAdd) + } + "simd_and" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdAnd) + } + "simd_div" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdDiv) + } + "simd_rem" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdRem) + } + "simd_eq" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdEq) + } + "simd_extract" => { + assert_sig_matches!(sig, _, RigidTy::Uint(UintTy::U32) => _); + Some(Intrinsic::SimdExtract) + } + "simd_ge" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdGe) + } + "simd_gt" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdGt) + } + "simd_insert" => { + assert_sig_matches!(sig, _, RigidTy::Uint(UintTy::U32), _ => _); + Some(Intrinsic::SimdInsert) + } + "simd_le" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdLe) + } + "simd_lt" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdLt) + } + "simd_mul" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdMul) + } + "simd_ne" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdNe) + } + "simd_or" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdOr) + } + "simd_shl" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdShl) + } + "simd_shr" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdShr) + } + "simd_sub" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdSub) + } + "simd_xor" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdXor) + } + name => { + if let Some(suffix) = name.strip_prefix("simd_shuffle") { + assert_sig_matches!(sig, _, _, _ => _); + Some(Intrinsic::SimdShuffle(suffix.into())) + } else { + None + } + } + } +} + +/// Match f32 arithmetic intrinsics by instance, returning an instance of the intrinsics enum if the match +/// is successful. +fn try_match_f32(intrinsic_instance: &Instance) -> Option { + let intrinsic_str = intrinsic_instance.intrinsic_name().unwrap(); + let sig = intrinsic_instance.ty().kind().fn_sig().unwrap().skip_binder(); + match intrinsic_str.as_str() { + "ceilf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::CeilF32) + } + "copysignf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32), RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::CopySignF32) + } + "cosf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::CosF32) + } + "exp2f32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::Exp2F32) + } + "expf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::ExpF32) + } + "fabsf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::FabsF32) + } + "floorf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::FloorF32) + } + "fmaf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32), RigidTy::Float(FloatTy::F32), RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::FmafF32) + } + "log10f32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::Log10F32) + } + "log2f32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::Log2F32) + } + "logf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::LogF32) + } + "maxnumf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32), RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::MaxNumF32) + } + "minnumf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32), RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::MinNumF32) + } + "nearbyintf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::NearbyIntF32) + } + "powf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32), RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::PowF32) + } + "powif32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32), RigidTy::Int(IntTy::I32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::PowIF32) + } + "rintf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::RintF32) + } + "roundf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::RoundF32) + } + "sinf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::SinF32) + } + "sqrtf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::SqrtF32) + } + "truncf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::TruncF32) + } + _ => None, + } +} + +/// Match f64 arithmetic intrinsics by instance, returning an instance of the intrinsics enum if the match +/// is successful. +fn try_match_f64(intrinsic_instance: &Instance) -> Option { + let intrinsic_str = intrinsic_instance.intrinsic_name().unwrap(); + let sig = intrinsic_instance.ty().kind().fn_sig().unwrap().skip_binder(); + match intrinsic_str.as_str() { + "ceilf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::CeilF64) + } + "copysignf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64), RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::CopySignF64) + } + "cosf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::CosF64) + } + "exp2f64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::Exp2F64) + } + "expf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::ExpF64) + } + "fabsf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::FabsF64) + } + "floorf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::FloorF64) + } + "fmaf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64), RigidTy::Float(FloatTy::F64), RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::FmafF64) + } + "log10f64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::Log10F64) + } + "log2f64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::Log2F64) + } + "logf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::LogF64) + } + "maxnumf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64), RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::MaxNumF64) + } + "minnumf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64), RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::MinNumF64) + } + "nearbyintf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::NearbyIntF64) + } + "powf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64), RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::PowF64) + } + "powif64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64), RigidTy::Int(IntTy::I32) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::PowIF64) + } + "rintf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::RintF64) + } + "roundf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::RoundF64) + } + "sinf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::SinF64) + } + "sqrtf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::SqrtF64) + } + "truncf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::TruncF64) + } + _ => None, + } +} diff --git a/kani-compiler/src/kani_middle/points_to/points_to_analysis.rs b/kani-compiler/src/kani_middle/points_to/points_to_analysis.rs index 640318ccb584..15593549b690 100644 --- a/kani-compiler/src/kani_middle/points_to/points_to_analysis.rs +++ b/kani-compiler/src/kani_middle/points_to/points_to_analysis.rs @@ -24,12 +24,14 @@ //! Currently, the analysis is not field-sensitive: e.g., if a field of a place aliases to some //! other place, we treat it as if the place itself aliases to another place. -use crate::kani_middle::{ - points_to::{MemLoc, PointsToGraph}, - reachability::CallGraph, - transform::RustcInternalMir, +use crate::{ + intrinsics::Intrinsic, + kani_middle::{ + points_to::{MemLoc, PointsToGraph}, + reachability::CallGraph, + transform::RustcInternalMir, + }, }; -use rustc_ast::Mutability; use rustc_middle::{ mir::{ BasicBlock, BinOp, Body, CallReturnPlaces, Location, NonDivergingIntrinsic, Operand, Place, @@ -202,161 +204,117 @@ impl<'a, 'tcx> Analysis<'tcx> for PointsToAnalysis<'a, 'tcx> { match instance.def { // Intrinsics could introduce aliasing edges we care about, so need to handle them. InstanceKind::Intrinsic(def_id) => { - match self.tcx.intrinsic(def_id).unwrap().name.to_string().as_str() { - name if name.starts_with("atomic") => { - match name { - // All `atomic_cxchg` intrinsics take `dst, old, src` as arguments. - // This is equivalent to `destination = *dst; *dst = src`. - name if name.starts_with("atomic_cxchg") => { - assert_eq!( - args.len(), - 3, - "Unexpected number of arguments for `{name}`" - ); - assert!(matches!( - args[0].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Mut) - )); - let src_set = - self.successors_for_operand(state, args[2].node.clone()); - let dst_set = - self.successors_for_deref(state, args[0].node.clone()); - let destination_set = - state.resolve_place(*destination, self.instance); - state.extend(&destination_set, &state.successors(&dst_set)); - state.extend(&dst_set, &src_set); - } - // All `atomic_load` intrinsics take `src` as an argument. - // This is equivalent to `destination = *src`. - name if name.starts_with("atomic_load") => { - assert_eq!( - args.len(), - 1, - "Unexpected number of arguments for `{name}`" - ); - assert!(matches!( - args[0].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Not) - )); - let src_set = - self.successors_for_deref(state, args[0].node.clone()); - let destination_set = - state.resolve_place(*destination, self.instance); - state.extend(&destination_set, &state.successors(&src_set)); - } - // All `atomic_store` intrinsics take `dst, val` as arguments. - // This is equivalent to `*dst = val`. - name if name.starts_with("atomic_store") => { - assert_eq!( - args.len(), - 2, - "Unexpected number of arguments for `{name}`" - ); - assert!(matches!( - args[0].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Mut) - )); - let dst_set = - self.successors_for_deref(state, args[0].node.clone()); - let val_set = - self.successors_for_operand(state, args[1].node.clone()); - state.extend(&dst_set, &val_set); - } - // All other `atomic` intrinsics take `dst, src` as arguments. - // This is equivalent to `destination = *dst; *dst = src`. - _ => { - assert_eq!( - args.len(), - 2, - "Unexpected number of arguments for `{name}`" - ); - assert!(matches!( - args[0].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Mut) - )); - let src_set = - self.successors_for_operand(state, args[1].node.clone()); - let dst_set = - self.successors_for_deref(state, args[0].node.clone()); - let destination_set = - state.resolve_place(*destination, self.instance); - state.extend(&destination_set, &state.successors(&dst_set)); - state.extend(&dst_set, &src_set); - } - }; - } - // Similar to `copy_nonoverlapping`, argument order is `src`, `dst`, `count`. - "copy" => { - assert_eq!(args.len(), 3, "Unexpected number of arguments for `copy`"); - assert!(matches!( - args[0].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Not) - )); - assert!(matches!( - args[1].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Mut) - )); - self.apply_copy_effect( - state, - args[0].node.clone(), - args[1].node.clone(), - ); - } - // Similar to `copy_nonoverlapping`, argument order is `dst`, `src`, `count`. - "volatile_copy_memory" | "volatile_copy_nonoverlapping_memory" => { - assert_eq!(args.len(), 3, "Unexpected number of arguments for `copy`"); - assert!(matches!( - args[0].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Mut) - )); - assert!(matches!( - args[1].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Not) - )); - self.apply_copy_effect( - state, - args[1].node.clone(), - args[0].node.clone(), - ); - } - // Semantically equivalent to dest = *a - "volatile_load" | "unaligned_volatile_load" => { - assert_eq!( - args.len(), - 1, - "Unexpected number of arguments for `volatile_load`" - ); - assert!(matches!( - args[0].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Not) - )); - // Destination of the return value. - let lvalue_set = state.resolve_place(*destination, self.instance); - let rvalue_set = self.successors_for_deref(state, args[0].node.clone()); - state.extend(&lvalue_set, &state.successors(&rvalue_set)); - } - // Semantically equivalent *a = b. - "volatile_store" | "unaligned_volatile_store" => { - assert_eq!( - args.len(), - 2, - "Unexpected number of arguments for `volatile_store`" - ); - assert!(matches!( - args[0].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Mut) - )); - let lvalue_set = self.successors_for_deref(state, args[0].node.clone()); - let rvalue_set = - self.successors_for_operand(state, args[1].node.clone()); - state.extend(&lvalue_set, &rvalue_set); - } - _ => { - // TODO: this probably does not handle all relevant intrinsics, so more - // need to be added. For more information, see: - // https://github.com/model-checking/kani/issues/3300 - if self.tcx.is_mir_available(def_id) { - self.apply_regular_call_effect(state, instance, args, destination); + // Check if the intrinsic has a body we can analyze. + if self.tcx.is_mir_available(def_id) { + self.apply_regular_call_effect(state, instance, args, destination); + } else { + // Check all of the other intrinsics. + match Intrinsic::from_instance(&rustc_internal::stable(instance)) { + intrinsic if is_identity_aliasing_intrinsic(intrinsic.clone()) => { + // Treat the intrinsic as an aggregate, taking a union of all of the + // arguments' aliases. + let destination_set = + state.resolve_place(*destination, self.instance); + let operands_set = args + .into_iter() + .flat_map(|operand| { + self.successors_for_operand(state, operand.node.clone()) + }) + .collect(); + state.extend(&destination_set, &operands_set); + } + // All `atomic_cxchg` intrinsics take `dst, old, src` as arguments. + // This is equivalent to `destination = *dst; *dst = src`. + Intrinsic::AtomicCxchg(_) | Intrinsic::AtomicCxchgWeak(_) => { + let src_set = + self.successors_for_operand(state, args[2].node.clone()); + let dst_set = + self.successors_for_deref(state, args[0].node.clone()); + let destination_set = + state.resolve_place(*destination, self.instance); + state.extend(&destination_set, &state.successors(&dst_set)); + state.extend(&dst_set, &src_set); + } + // All `atomic_load` intrinsics take `src` as an argument. + // This is equivalent to `destination = *src`. + Intrinsic::AtomicLoad(_) => { + let src_set = + self.successors_for_deref(state, args[0].node.clone()); + let destination_set = + state.resolve_place(*destination, self.instance); + state.extend(&destination_set, &state.successors(&src_set)); + } + // All `atomic_store` intrinsics take `dst, val` as arguments. + // This is equivalent to `*dst = val`. + Intrinsic::AtomicStore(_) => { + let dst_set = + self.successors_for_deref(state, args[0].node.clone()); + let val_set = + self.successors_for_operand(state, args[1].node.clone()); + state.extend(&dst_set, &val_set); + } + // All other `atomic` intrinsics take `dst, src` as arguments. + // This is equivalent to `destination = *dst; *dst = src`. + Intrinsic::AtomicAnd(_) + | Intrinsic::AtomicMax(_) + | Intrinsic::AtomicMin(_) + | Intrinsic::AtomicNand(_) + | Intrinsic::AtomicOr(_) + | Intrinsic::AtomicUmax(_) + | Intrinsic::AtomicUmin(_) + | Intrinsic::AtomicXadd(_) + | Intrinsic::AtomicXchg(_) + | Intrinsic::AtomicXor(_) + | Intrinsic::AtomicXsub(_) => { + let src_set = + self.successors_for_operand(state, args[1].node.clone()); + let dst_set = + self.successors_for_deref(state, args[0].node.clone()); + let destination_set = + state.resolve_place(*destination, self.instance); + state.extend(&destination_set, &state.successors(&dst_set)); + state.extend(&dst_set, &src_set); + } + // Similar to `copy_nonoverlapping`, argument order is `src`, `dst`, `count`. + Intrinsic::Copy => { + self.apply_copy_effect( + state, + args[0].node.clone(), + args[1].node.clone(), + ); + } + // Similar to `copy_nonoverlapping`, argument order is `dst`, `src`, `count`. + Intrinsic::VolatileCopyMemory + | Intrinsic::VolatileCopyNonOverlappingMemory => { + self.apply_copy_effect( + state, + args[1].node.clone(), + args[0].node.clone(), + ); + } + // Semantically equivalent to dest = *a + Intrinsic::VolatileLoad | Intrinsic::UnalignedVolatileLoad => { + // Destination of the return value. + let lvalue_set = state.resolve_place(*destination, self.instance); + let rvalue_set = + self.successors_for_deref(state, args[0].node.clone()); + state.extend(&lvalue_set, &state.successors(&rvalue_set)); + } + // Semantically equivalent *a = b. + Intrinsic::VolatileStore => { + let lvalue_set = + self.successors_for_deref(state, args[0].node.clone()); + let rvalue_set = + self.successors_for_operand(state, args[1].node.clone()); + state.extend(&lvalue_set, &rvalue_set); + } + Intrinsic::Unimplemented { .. } => { + // This will be taken care of at the codegen level. + } + intrinsic => { + unimplemented!( + "Kani does not support reasoning about aliasing in presence of intrinsic `{intrinsic:?}`. For more information about the state of uninitialized memory checks implementation, see: https://github.com/model-checking/kani/issues/3300." + ); } } } @@ -652,3 +610,136 @@ impl<'a, 'tcx> PointsToAnalysis<'a, 'tcx> { } } } + +/// Determines if the intrinsic does not influence aliasing beyond being treated as an identity +/// function (i.e. propagate aliasing without changes). +fn is_identity_aliasing_intrinsic(intrinsic: Intrinsic) -> bool { + match intrinsic { + Intrinsic::AddWithOverflow + | Intrinsic::ArithOffset + | Intrinsic::AssertInhabited + | Intrinsic::AssertMemUninitializedValid + | Intrinsic::AssertZeroValid + | Intrinsic::Assume + | Intrinsic::Bitreverse + | Intrinsic::BlackBox + | Intrinsic::Breakpoint + | Intrinsic::Bswap + | Intrinsic::CeilF32 + | Intrinsic::CeilF64 + | Intrinsic::CompareBytes + | Intrinsic::CopySignF32 + | Intrinsic::CopySignF64 + | Intrinsic::CosF32 + | Intrinsic::CosF64 + | Intrinsic::Ctlz + | Intrinsic::CtlzNonZero + | Intrinsic::Ctpop + | Intrinsic::Cttz + | Intrinsic::CttzNonZero + | Intrinsic::DiscriminantValue + | Intrinsic::ExactDiv + | Intrinsic::Exp2F32 + | Intrinsic::Exp2F64 + | Intrinsic::ExpF32 + | Intrinsic::ExpF64 + | Intrinsic::FabsF32 + | Intrinsic::FabsF64 + | Intrinsic::FaddFast + | Intrinsic::FdivFast + | Intrinsic::FloorF32 + | Intrinsic::FloorF64 + | Intrinsic::FmafF32 + | Intrinsic::FmafF64 + | Intrinsic::FmulFast + | Intrinsic::Forget + | Intrinsic::FsubFast + | Intrinsic::IsValStaticallyKnown + | Intrinsic::Likely + | Intrinsic::Log10F32 + | Intrinsic::Log10F64 + | Intrinsic::Log2F32 + | Intrinsic::Log2F64 + | Intrinsic::LogF32 + | Intrinsic::LogF64 + | Intrinsic::MaxNumF32 + | Intrinsic::MaxNumF64 + | Intrinsic::MinAlignOf + | Intrinsic::MinAlignOfVal + | Intrinsic::MinNumF32 + | Intrinsic::MinNumF64 + | Intrinsic::MulWithOverflow + | Intrinsic::NearbyIntF32 + | Intrinsic::NearbyIntF64 + | Intrinsic::NeedsDrop + | Intrinsic::PowF32 + | Intrinsic::PowF64 + | Intrinsic::PowIF32 + | Intrinsic::PowIF64 + | Intrinsic::PrefAlignOf + | Intrinsic::PtrGuaranteedCmp + | Intrinsic::PtrOffsetFrom + | Intrinsic::PtrOffsetFromUnsigned + | Intrinsic::RawEq + | Intrinsic::RintF32 + | Intrinsic::RintF64 + | Intrinsic::RotateLeft + | Intrinsic::RotateRight + | Intrinsic::RoundF32 + | Intrinsic::RoundF64 + | Intrinsic::SaturatingAdd + | Intrinsic::SaturatingSub + | Intrinsic::SinF32 + | Intrinsic::SinF64 + | Intrinsic::SizeOfVal + | Intrinsic::SqrtF32 + | Intrinsic::SqrtF64 + | Intrinsic::SubWithOverflow + | Intrinsic::TruncF32 + | Intrinsic::TruncF64 + | Intrinsic::TypeId + | Intrinsic::TypeName + | Intrinsic::UncheckedDiv + | Intrinsic::UncheckedRem + | Intrinsic::Unlikely + | Intrinsic::VtableSize + | Intrinsic::VtableAlign + | Intrinsic::WrappingAdd + | Intrinsic::WrappingMul + | Intrinsic::WrappingSub + | Intrinsic::WriteBytes => { + /* Intrinsics that do not interact with aliasing beyond propagating it. */ + true + } + Intrinsic::SimdAdd + | Intrinsic::SimdAnd + | Intrinsic::SimdDiv + | Intrinsic::SimdRem + | Intrinsic::SimdEq + | Intrinsic::SimdExtract + | Intrinsic::SimdGe + | Intrinsic::SimdGt + | Intrinsic::SimdInsert + | Intrinsic::SimdLe + | Intrinsic::SimdLt + | Intrinsic::SimdMul + | Intrinsic::SimdNe + | Intrinsic::SimdOr + | Intrinsic::SimdShl + | Intrinsic::SimdShr + | Intrinsic::SimdShuffle(_) + | Intrinsic::SimdSub + | Intrinsic::SimdXor => { + /* SIMD operations */ + true + } + Intrinsic::AtomicFence(_) | Intrinsic::AtomicSingleThreadFence(_) => { + /* Atomic fences */ + true + } + _ => { + /* Everything else */ + false + } + } +} diff --git a/kani-compiler/src/kani_middle/transform/check_uninit/delayed_ub/initial_target_visitor.rs b/kani-compiler/src/kani_middle/transform/check_uninit/delayed_ub/initial_target_visitor.rs index 11ac412703ae..35d0e7041704 100644 --- a/kani-compiler/src/kani_middle/transform/check_uninit/delayed_ub/initial_target_visitor.rs +++ b/kani-compiler/src/kani_middle/transform/check_uninit/delayed_ub/initial_target_visitor.rs @@ -4,14 +4,17 @@ //! This module contains the visitor responsible for collecting initial analysis targets for delayed //! UB instrumentation. -use crate::kani_middle::transform::check_uninit::ty_layout::tys_layout_equal_to_size; +use crate::{ + intrinsics::Intrinsic, + kani_middle::transform::check_uninit::ty_layout::tys_layout_equal_to_size, +}; use stable_mir::{ mir::{ alloc::GlobalAlloc, mono::{Instance, InstanceKind, StaticDef}, visit::Location, - Body, CastKind, LocalDecl, MirVisitor, Mutability, NonDivergingIntrinsic, Operand, Place, - Rvalue, Statement, StatementKind, Terminator, TerminatorKind, + Body, CastKind, LocalDecl, MirVisitor, NonDivergingIntrinsic, Operand, Place, Rvalue, + Statement, StatementKind, Terminator, TerminatorKind, }, ty::{ConstantKind, RigidTy, TyKind}, }; @@ -101,34 +104,12 @@ impl MirVisitor for InitialTargetVisitor { if let TerminatorKind::Call { func, args, .. } = &term.kind { let instance = try_resolve_instance(self.body.locals(), func).unwrap(); if instance.kind == InstanceKind::Intrinsic { - match instance.intrinsic_name().unwrap().as_str() { - "copy" => { - assert_eq!(args.len(), 3, "Unexpected number of arguments for `copy`"); - assert!(matches!( - args[0].ty(self.body.locals()).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Not)) - )); - assert!(matches!( - args[1].ty(self.body.locals()).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Mut)) - )); + match Intrinsic::from_instance(&instance) { + Intrinsic::Copy => { // Here, `dst` is the second argument. self.push_operand(&args[1]); } - "volatile_copy_memory" | "volatile_copy_nonoverlapping_memory" => { - assert_eq!( - args.len(), - 3, - "Unexpected number of arguments for `volatile_copy`" - ); - assert!(matches!( - args[0].ty(self.body.locals()).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Mut)) - )); - assert!(matches!( - args[1].ty(self.body.locals()).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Not)) - )); + Intrinsic::VolatileCopyMemory | Intrinsic::VolatileCopyNonOverlappingMemory => { // Here, `dst` is the first argument. self.push_operand(&args[0]); } diff --git a/kani-compiler/src/kani_middle/transform/check_uninit/ptr_uninit/uninit_visitor.rs b/kani-compiler/src/kani_middle/transform/check_uninit/ptr_uninit/uninit_visitor.rs index 837e14abc886..f682f93a261e 100644 --- a/kani-compiler/src/kani_middle/transform/check_uninit/ptr_uninit/uninit_visitor.rs +++ b/kani-compiler/src/kani_middle/transform/check_uninit/ptr_uninit/uninit_visitor.rs @@ -3,12 +3,15 @@ // //! Visitor that collects all instructions relevant to uninitialized memory access. -use crate::kani_middle::transform::{ - body::{InsertPosition, MutableBody, SourceInstruction}, - check_uninit::{ - relevant_instruction::{InitRelevantInstruction, MemoryInitOp}, - ty_layout::tys_layout_compatible_to_size, - TargetFinder, +use crate::{ + intrinsics::Intrinsic, + kani_middle::transform::{ + body::{InsertPosition, MutableBody, SourceInstruction}, + check_uninit::{ + relevant_instruction::{InitRelevantInstruction, MemoryInitOp}, + ty_layout::tys_layout_compatible_to_size, + TargetFinder, + }, }, }; use stable_mir::{ @@ -16,8 +19,8 @@ use stable_mir::{ alloc::GlobalAlloc, mono::{Instance, InstanceKind}, visit::{Location, PlaceContext}, - BasicBlockIdx, CastKind, LocalDecl, MirVisitor, Mutability, NonDivergingIntrinsic, Operand, - Place, PointerCoercion, ProjectionElem, Rvalue, Statement, StatementKind, Terminator, + BasicBlockIdx, CastKind, LocalDecl, MirVisitor, NonDivergingIntrinsic, Operand, Place, + PointerCoercion, ProjectionElem, Rvalue, Statement, StatementKind, Terminator, TerminatorKind, }, ty::{ConstantKind, RigidTy, TyKind}, @@ -182,46 +185,30 @@ impl MirVisitor for CheckUninitVisitor { }; match instance.kind { InstanceKind::Intrinsic => { - match instance.intrinsic_name().unwrap().as_str() { - intrinsic_name if can_skip_intrinsic(intrinsic_name) => { + match Intrinsic::from_instance(&instance) { + intrinsic_name if can_skip_intrinsic(intrinsic_name.clone()) => { /* Intrinsics that can be safely skipped */ } - name if name.starts_with("atomic") => { - let num_args = match name { - // All `atomic_cxchg` intrinsics take `dst, old, src` as arguments. - name if name.starts_with("atomic_cxchg") => 3, - // All `atomic_load` intrinsics take `src` as an argument. - name if name.starts_with("atomic_load") => 1, - // All other `atomic` intrinsics take `dst, src` as arguments. - _ => 2, - }; - assert_eq!( - args.len(), - num_args, - "Unexpected number of arguments for `{name}`" - ); - assert!(matches!( - args[0].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(..)) - )); + Intrinsic::AtomicAnd(_) + | Intrinsic::AtomicCxchg(_) + | Intrinsic::AtomicCxchgWeak(_) + | Intrinsic::AtomicLoad(_) + | Intrinsic::AtomicMax(_) + | Intrinsic::AtomicMin(_) + | Intrinsic::AtomicNand(_) + | Intrinsic::AtomicOr(_) + | Intrinsic::AtomicStore(_) + | Intrinsic::AtomicUmax(_) + | Intrinsic::AtomicUmin(_) + | Intrinsic::AtomicXadd(_) + | Intrinsic::AtomicXchg(_) + | Intrinsic::AtomicXor(_) + | Intrinsic::AtomicXsub(_) => { self.push_target(MemoryInitOp::Check { operand: args[0].clone(), }); } - "compare_bytes" => { - assert_eq!( - args.len(), - 3, - "Unexpected number of arguments for `compare_bytes`" - ); - assert!(matches!( - args[0].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Not)) - )); - assert!(matches!( - args[1].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Not)) - )); + Intrinsic::CompareBytes => { self.push_target(MemoryInitOp::CheckSliceChunk { operand: args[0].clone(), count: args[2].clone(), @@ -231,22 +218,7 @@ impl MirVisitor for CheckUninitVisitor { count: args[2].clone(), }); } - "copy" - | "volatile_copy_memory" - | "volatile_copy_nonoverlapping_memory" => { - assert_eq!( - args.len(), - 3, - "Unexpected number of arguments for `copy`" - ); - assert!(matches!( - args[0].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Not)) - )); - assert!(matches!( - args[1].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Mut)) - )); + Intrinsic::Copy => { self.push_target(MemoryInitOp::CheckSliceChunk { operand: args[0].clone(), count: args[2].clone(), @@ -258,20 +230,20 @@ impl MirVisitor for CheckUninitVisitor { position: InsertPosition::After, }); } - "typed_swap" => { - assert_eq!( - args.len(), - 2, - "Unexpected number of arguments for `typed_swap`" - ); - assert!(matches!( - args[0].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Mut)) - )); - assert!(matches!( - args[1].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Mut)) - )); + Intrinsic::VolatileCopyMemory + | Intrinsic::VolatileCopyNonOverlappingMemory => { + self.push_target(MemoryInitOp::CheckSliceChunk { + operand: args[1].clone(), + count: args[2].clone(), + }); + self.push_target(MemoryInitOp::SetSliceChunk { + operand: args[0].clone(), + count: args[2].clone(), + value: true, + position: InsertPosition::After, + }); + } + Intrinsic::TypedSwap => { self.push_target(MemoryInitOp::Check { operand: args[0].clone(), }); @@ -279,46 +251,19 @@ impl MirVisitor for CheckUninitVisitor { operand: args[1].clone(), }); } - "volatile_load" | "unaligned_volatile_load" => { - assert_eq!( - args.len(), - 1, - "Unexpected number of arguments for `volatile_load`" - ); - assert!(matches!( - args[0].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Not)) - )); + Intrinsic::VolatileLoad | Intrinsic::UnalignedVolatileLoad => { self.push_target(MemoryInitOp::Check { operand: args[0].clone(), }); } - "volatile_store" => { - assert_eq!( - args.len(), - 2, - "Unexpected number of arguments for `volatile_store`" - ); - assert!(matches!( - args[0].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Mut)) - )); + Intrinsic::VolatileStore => { self.push_target(MemoryInitOp::Set { operand: args[0].clone(), value: true, position: InsertPosition::After, }); } - "write_bytes" => { - assert_eq!( - args.len(), - 3, - "Unexpected number of arguments for `write_bytes`" - ); - assert!(matches!( - args[0].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Mut)) - )); + Intrinsic::WriteBytes => { self.push_target(MemoryInitOp::SetSliceChunk { operand: args[0].clone(), count: args[2].clone(), @@ -328,7 +273,7 @@ impl MirVisitor for CheckUninitVisitor { } intrinsic => { self.push_target(MemoryInitOp::Unsupported { - reason: format!("Kani does not support reasoning about memory initialization of intrinsic `{intrinsic}`."), + reason: format!("Kani does not support reasoning about memory initialization of intrinsic `{intrinsic:?}`."), }); } } @@ -519,132 +464,131 @@ impl MirVisitor for CheckUninitVisitor { /// Determines if the intrinsic has no memory initialization related function and hence can be /// safely skipped. -fn can_skip_intrinsic(intrinsic_name: &str) -> bool { - match intrinsic_name { - "add_with_overflow" - | "arith_offset" - | "assert_inhabited" - | "assert_mem_uninitialized_valid" - | "assert_zero_valid" - | "assume" - | "bitreverse" - | "black_box" - | "breakpoint" - | "bswap" - | "caller_location" - | "ceilf32" - | "ceilf64" - | "copysignf32" - | "copysignf64" - | "cosf32" - | "cosf64" - | "ctlz" - | "ctlz_nonzero" - | "ctpop" - | "cttz" - | "cttz_nonzero" - | "discriminant_value" - | "exact_div" - | "exp2f32" - | "exp2f64" - | "expf32" - | "expf64" - | "fabsf32" - | "fabsf64" - | "fadd_fast" - | "fdiv_fast" - | "floorf32" - | "floorf64" - | "fmaf32" - | "fmaf64" - | "fmul_fast" - | "forget" - | "fsub_fast" - | "is_val_statically_known" - | "likely" - | "log10f32" - | "log10f64" - | "log2f32" - | "log2f64" - | "logf32" - | "logf64" - | "maxnumf32" - | "maxnumf64" - | "min_align_of" - | "min_align_of_val" - | "minnumf32" - | "minnumf64" - | "mul_with_overflow" - | "nearbyintf32" - | "nearbyintf64" - | "needs_drop" - | "powf32" - | "powf64" - | "powif32" - | "powif64" - | "pref_align_of" - | "raw_eq" - | "rintf32" - | "rintf64" - | "rotate_left" - | "rotate_right" - | "roundf32" - | "roundf64" - | "saturating_add" - | "saturating_sub" - | "sinf32" - | "sinf64" - | "sqrtf32" - | "sqrtf64" - | "sub_with_overflow" - | "truncf32" - | "truncf64" - | "type_id" - | "type_name" - | "unchecked_div" - | "unchecked_rem" - | "unlikely" - | "vtable_size" - | "vtable_align" - | "wrapping_add" - | "wrapping_mul" - | "wrapping_sub" => { +fn can_skip_intrinsic(intrinsic: Intrinsic) -> bool { + match intrinsic { + Intrinsic::AddWithOverflow + | Intrinsic::ArithOffset + | Intrinsic::AssertInhabited + | Intrinsic::AssertMemUninitializedValid + | Intrinsic::AssertZeroValid + | Intrinsic::Assume + | Intrinsic::Bitreverse + | Intrinsic::BlackBox + | Intrinsic::Breakpoint + | Intrinsic::Bswap + | Intrinsic::CeilF32 + | Intrinsic::CeilF64 + | Intrinsic::CopySignF32 + | Intrinsic::CopySignF64 + | Intrinsic::CosF32 + | Intrinsic::CosF64 + | Intrinsic::Ctlz + | Intrinsic::CtlzNonZero + | Intrinsic::Ctpop + | Intrinsic::Cttz + | Intrinsic::CttzNonZero + | Intrinsic::DiscriminantValue + | Intrinsic::ExactDiv + | Intrinsic::Exp2F32 + | Intrinsic::Exp2F64 + | Intrinsic::ExpF32 + | Intrinsic::ExpF64 + | Intrinsic::FabsF32 + | Intrinsic::FabsF64 + | Intrinsic::FaddFast + | Intrinsic::FdivFast + | Intrinsic::FloorF32 + | Intrinsic::FloorF64 + | Intrinsic::FmafF32 + | Intrinsic::FmafF64 + | Intrinsic::FmulFast + | Intrinsic::Forget + | Intrinsic::FsubFast + | Intrinsic::IsValStaticallyKnown + | Intrinsic::Likely + | Intrinsic::Log10F32 + | Intrinsic::Log10F64 + | Intrinsic::Log2F32 + | Intrinsic::Log2F64 + | Intrinsic::LogF32 + | Intrinsic::LogF64 + | Intrinsic::MaxNumF32 + | Intrinsic::MaxNumF64 + | Intrinsic::MinAlignOf + | Intrinsic::MinAlignOfVal + | Intrinsic::MinNumF32 + | Intrinsic::MinNumF64 + | Intrinsic::MulWithOverflow + | Intrinsic::NearbyIntF32 + | Intrinsic::NearbyIntF64 + | Intrinsic::NeedsDrop + | Intrinsic::PowF32 + | Intrinsic::PowF64 + | Intrinsic::PowIF32 + | Intrinsic::PowIF64 + | Intrinsic::PrefAlignOf + | Intrinsic::RawEq + | Intrinsic::RintF32 + | Intrinsic::RintF64 + | Intrinsic::RotateLeft + | Intrinsic::RotateRight + | Intrinsic::RoundF32 + | Intrinsic::RoundF64 + | Intrinsic::SaturatingAdd + | Intrinsic::SaturatingSub + | Intrinsic::SinF32 + | Intrinsic::SinF64 + | Intrinsic::SqrtF32 + | Intrinsic::SqrtF64 + | Intrinsic::SubWithOverflow + | Intrinsic::TruncF32 + | Intrinsic::TruncF64 + | Intrinsic::TypeId + | Intrinsic::TypeName + | Intrinsic::UncheckedDiv + | Intrinsic::UncheckedRem + | Intrinsic::Unlikely + | Intrinsic::VtableSize + | Intrinsic::VtableAlign + | Intrinsic::WrappingAdd + | Intrinsic::WrappingMul + | Intrinsic::WrappingSub => { /* Intrinsics that do not interact with memory initialization. */ true } - "ptr_guaranteed_cmp" | "ptr_offset_from" | "ptr_offset_from_unsigned" | "size_of_val" => { + Intrinsic::PtrGuaranteedCmp + | Intrinsic::PtrOffsetFrom + | Intrinsic::PtrOffsetFromUnsigned + | Intrinsic::SizeOfVal => { /* AFAICS from the documentation, none of those require the pointer arguments to be actually initialized. */ true } - name if name.starts_with("simd") => { + Intrinsic::SimdAdd + | Intrinsic::SimdAnd + | Intrinsic::SimdDiv + | Intrinsic::SimdRem + | Intrinsic::SimdEq + | Intrinsic::SimdExtract + | Intrinsic::SimdGe + | Intrinsic::SimdGt + | Intrinsic::SimdInsert + | Intrinsic::SimdLe + | Intrinsic::SimdLt + | Intrinsic::SimdMul + | Intrinsic::SimdNe + | Intrinsic::SimdOr + | Intrinsic::SimdShl + | Intrinsic::SimdShr + | Intrinsic::SimdShuffle(_) + | Intrinsic::SimdSub + | Intrinsic::SimdXor => { /* SIMD operations */ true } - name if name.starts_with("atomic_fence") - || name.starts_with("atomic_singlethreadfence") => - { + Intrinsic::AtomicFence(_) | Intrinsic::AtomicSingleThreadFence(_) => { /* Atomic fences */ true } - "copy_nonoverlapping" => unreachable!( - "Expected `core::intrinsics::unreachable` to be handled by `StatementKind::CopyNonOverlapping`" - ), - "offset" => unreachable!( - "Expected `core::intrinsics::unreachable` to be handled by `BinOp::OffSet`" - ), - "unreachable" => unreachable!( - "Expected `std::intrinsics::unreachable` to be handled by `TerminatorKind::Unreachable`" - ), - "transmute" | "transmute_copy" | "unchecked_add" | "unchecked_mul" | "unchecked_shl" - | "size_of" | "unchecked_shr" | "unchecked_sub" => { - unreachable!("Expected intrinsic to be lowered before codegen") - } - "catch_unwind" => { - unimplemented!("") - } - "retag_box_to_raw" => { - unreachable!("This was removed in the latest Rust version.") - } _ => { /* Everything else */ false diff --git a/kani-compiler/src/main.rs b/kani-compiler/src/main.rs index 7f1fb144a09b..e47483fb4fa5 100644 --- a/kani-compiler/src/main.rs +++ b/kani-compiler/src/main.rs @@ -39,6 +39,7 @@ extern crate tempfile; mod args; #[cfg(feature = "cprover")] mod codegen_cprover_gotoc; +mod intrinsics; mod kani_compiler; mod kani_middle; mod kani_queries; From a55834b8715a0ef1a9b51c7d15f0e72d28b028b2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 17:44:16 +0000 Subject: [PATCH 4/7] Bump tests/perf/s2n-quic from `445f73b` to `ab9723a` (#3434) Bumps [tests/perf/s2n-quic](https://github.com/aws/s2n-quic) from `445f73b` to `ab9723a`.
Commits

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tests/perf/s2n-quic | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/perf/s2n-quic b/tests/perf/s2n-quic index 445f73b27eae..ab9723a772f0 160000 --- a/tests/perf/s2n-quic +++ b/tests/perf/s2n-quic @@ -1 +1 @@ -Subproject commit 445f73b27eae529bb895a7678968e4c0c215ef8a +Subproject commit ab9723a772f03a9793c9863e73c9a48fab3c5235 From 952e75389251532259b266d9b4c27756f08b222d Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 14:12:09 -0400 Subject: [PATCH 5/7] Automatic cargo update to 2024-08-12 (#3433) Dependency upgrade resulting from `cargo update`. Co-authored-by: tautschnig <1144736+tautschnig@users.noreply.github.com> --- Cargo.lock | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 12c28c49c1bf..1e8e743d5e56 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,9 +146,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clap" -version = "4.5.13" +version = "4.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fbb260a053428790f3de475e304ff84cdbc4face759ea7a3e64c1edd938a7fc" +checksum = "11d8838454fda655dafd3accb2b6e2bea645b9e4078abe84a22ceb947235c5cc" dependencies = [ "clap_builder", "clap_derive", @@ -156,9 +156,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.13" +version = "4.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64b17d7ea74e9f833c7dbf2cbe4fb12ff26783eda4782a8975b72f895c9b4d99" +checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" dependencies = [ "anstream", "anstyle", @@ -175,7 +175,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -510,7 +510,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -950,29 +950,29 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.204" +version = "1.0.206" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +checksum = "5b3e4cd94123dd520a128bcd11e34d9e9e423e7e3e50425cb1b4b1e3549d0284" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.204" +version = "1.0.206" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +checksum = "fabfb6138d2383ea8208cf98ccf69cdfb1aff4088460681d84189aa259762f97" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] name = "serde_json" -version = "1.0.122" +version = "1.0.124" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" +checksum = "66ad62847a56b3dba58cc891acd13884b9c61138d330c0d7b6181713d4fce38d" dependencies = [ "itoa", "memchr", @@ -1072,7 +1072,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -1087,9 +1087,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.72" +version = "2.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" +checksum = "1fceb41e3d546d0bd83421d3409b1460cc7444cd389341a4c880fe7a042cb3d7" dependencies = [ "proc-macro2", "quote", @@ -1126,7 +1126,7 @@ checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -1192,7 +1192,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -1467,5 +1467,5 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] From e2a209bcb847803e8a5abb20ef7002b109ed0bf6 Mon Sep 17 00:00:00 2001 From: Michael Tautschnig Date: Mon, 12 Aug 2024 23:27:29 +0200 Subject: [PATCH 6/7] Actually apply CBMC patch (#3436) The patch introduced in #3431 not only needs to be created, but also needs to be applied. By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 and MIT licenses. --- scripts/setup/al2/install_cbmc.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/setup/al2/install_cbmc.sh b/scripts/setup/al2/install_cbmc.sh index de4b5215e083..3bac22ace3db 100755 --- a/scripts/setup/al2/install_cbmc.sh +++ b/scripts/setup/al2/install_cbmc.sh @@ -93,6 +93,7 @@ cat > varargs.patch << "EOF" } EOF +patch -p1 < varargs.patch cmake3 -S . -Bbuild -DWITH_JBMC=OFF -Dsat_impl="minisat2;cadical" \ -DCMAKE_C_COMPILER=gcc10-cc -DCMAKE_CXX_COMPILER=gcc10-c++ \ From f27a5ed1426c705ec6faa181f080579b90983436 Mon Sep 17 00:00:00 2001 From: "Celina G. Val" Date: Thu, 15 Aug 2024 07:18:06 -0700 Subject: [PATCH 7/7] Add test related to issue 3432 (#3439) In some cases, Kani would report a spurious counter example for cases where a match arm contained more than one pattern. This was fixed by changing how we handle storage lifecycle in #2995. This PR is only adding the related test to the regression. Resolves #3432 By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 and MIT licenses. --- tests/kani/Match/match_pattern.rs | 57 +++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 tests/kani/Match/match_pattern.rs diff --git a/tests/kani/Match/match_pattern.rs b/tests/kani/Match/match_pattern.rs new file mode 100644 index 000000000000..1b8689aee881 --- /dev/null +++ b/tests/kani/Match/match_pattern.rs @@ -0,0 +1,57 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +//! Test that Kani can correctly handle match patterns joined with the `|` operator. +//! It contains two equivalent methods that only differ by grouping march patterns. +//! Kani used to only be able to verify one as reported in: +//! + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[cfg_attr(kani, derive(kani::Arbitrary))] +pub enum AbstractInt { + Bottom = 0, + Zero = 1, + Top = 2, +} + +impl AbstractInt { + /// Code with exhausive match expression where each arm contains one pattern. + pub fn merge(self, other: Self) -> Self { + use AbstractInt::*; + match (self, other) { + (Bottom, x) => x, + (x, Bottom) => x, + (Zero, Zero) => Zero, + (Top, _) => Top, + (_, Top) => Top, + } + } + + /// Code with exhausive match expression where an arm may contain multiple patterns. + pub fn merge_joined(self, other: Self) -> Self { + use AbstractInt::*; + match (self, other) { + (Bottom, x) | (x, Bottom) => x, + (Zero, Zero) => Zero, + (Top, _) | (_, Top) => Top, + } + } +} + +#[cfg(kani)] +mod test { + use super::*; + + #[kani::proof] + fn merge_with_bottom() { + let x: AbstractInt = kani::any(); + assert!(x.merge(AbstractInt::Bottom) == x); + assert!(AbstractInt::Bottom.merge(x) == x) + } + + #[kani::proof] + fn check_equivalence() { + let x: AbstractInt = kani::any(); + let y: AbstractInt = kani::any(); + assert_eq!(x.merge(y), x.merge_joined(y)); + } +}