diff --git a/.cargo/config.toml b/.cargo/config.toml index 6d501b0c3154..9bb4e0808345 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -4,7 +4,7 @@ # Command aliases [alias] # Build kani with development configuration. -build-dev = "run -p build-kani -- build-dev" +build-dev = "run --target-dir target/tools -p build-kani -- build-dev" # Build kani release bundle. bundle = "run -p build-kani -- bundle" diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index be51fa56865c..97189d72d5f1 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -5,22 +5,22 @@ inputs: os: description: In which Operating System is this running required: true - install_cbmc: - description: Whether to install CBMC + kani_dir: + description: Path to Kani's root directory required: false - default: 'true' + default: '.' runs: using: composite steps: - name: Install dependencies - run: ./scripts/setup/${{ inputs.os }}/install_deps.sh + run: cd ${{ inputs.kani_dir }} && ./scripts/setup/${{ inputs.os }}/install_deps.sh shell: bash - name: Install Rust toolchain - run: ./scripts/setup/install_rustup.sh + run: cd ${{ inputs.kani_dir }} && ./scripts/setup/install_rustup.sh shell: bash - name: Update submodules run: | - git submodule update --init --depth 1 + cd ${{ inputs.kani_dir }} && git submodule update --init --depth 1 shell: bash diff --git a/.github/workflows/cbmc-latest.yml b/.github/workflows/cbmc-latest.yml index 1c81834748d2..9191e8250550 100644 --- a/.github/workflows/cbmc-latest.yml +++ b/.github/workflows/cbmc-latest.yml @@ -21,40 +21,21 @@ jobs: matrix: os: [macos-11, ubuntu-18.04, ubuntu-20.04, ubuntu-22.04] steps: - - name: Checkout CBMC under "cbmc" + - name: Checkout Kani under "kani" uses: actions/checkout@v3 with: - repository: diffblue/cbmc - path: cbmc - - - name: Build CBMC - run: | - cd cbmc - cmake -S . -Bbuild -DWITH_JBMC=OFF - cmake --build build -- -j 4 - sudo cmake --build build --target install - # Cleanup cbmc directory - cd .. - rm -rf cbmc - - - name: Checkout Kani - uses: actions/checkout@v3 + path: kani - name: Setup Kani Dependencies - uses: ./.github/actions/setup + uses: ./kani/.github/actions/setup with: os: ${{ matrix.os }} - install_cbmc: 'false' + kani_dir: 'kani' - name: Build Kani + working-directory: ./kani run: cargo build-dev - - name: Execute Kani regressions - run: ./scripts/kani-regression.sh - - perf: - runs-on: ubuntu-20.04 - steps: - name: Checkout CBMC under "cbmc" uses: actions/checkout@v3 with: @@ -62,26 +43,49 @@ jobs: path: cbmc - name: Build CBMC + working-directory: ./cbmc run: | - cd cbmc - cmake -S . -Bbuild -DWITH_JBMC=OFF - cmake --build build -- -j 4 - sudo cmake --build build --target install - # Cleanup cbmc directory - cd .. - rm -rf cbmc + make -C src minisat2-download cadical-download + make -C src -j4 MINISAT2=../../minisat-2.2.1 CADICAL=../../cadical + # Prepend the bin directory to $PATH + echo "${GITHUB_WORKSPACE}/cbmc/build/bin" >> $GITHUB_PATH - - name: Checkout Kani + - name: Execute Kani regressions + working-directory: ./kani + run: ./scripts/kani-regression.sh + + perf: + runs-on: ubuntu-20.04 + steps: + - name: Checkout Kani under "kani" uses: actions/checkout@v3 + with: + path: kani - name: Setup Kani Dependencies - uses: ./.github/actions/setup + uses: ./kani/.github/actions/setup with: os: ubuntu-20.04 - install_cbmc: 'false' + kani_dir: 'kani' - name: Build Kani using release mode + working-directory: ./kani run: cargo build-dev -- --release + - name: Checkout CBMC under "cbmc" + uses: actions/checkout@v3 + with: + repository: diffblue/cbmc + path: cbmc + + - name: Build CBMC + working-directory: ./cbmc + run: | + cmake -S . -Bbuild -DWITH_JBMC=OFF + cmake --build build -- -j 4 + # Prepend the bin directory to $PATH + echo "${GITHUB_WORKSPACE}/cbmc/build/bin" >> $GITHUB_PATH + - name: Execute Kani performance tests + working-directory: ./kani run: ./scripts/kani-perf.sh diff --git a/.github/workflows/kani.yml b/.github/workflows/kani.yml index 6e145939b74b..feecccf84ebc 100644 --- a/.github/workflows/kani.yml +++ b/.github/workflows/kani.yml @@ -33,6 +33,25 @@ jobs: - name: Execute Kani regression run: ./scripts/kani-regression.sh + write-json-symtab-regression: + runs-on: ubuntu-20.04 + steps: + - name: Checkout Kani + uses: actions/checkout@v3 + + - name: Setup Kani Dependencies + uses: ./.github/actions/setup + with: + os: ubuntu-20.04 + + - name: Build Kani + run: cargo build-dev + + - name: Execute Kani regression + env: + KANI_ENABLE_WRITE_JSON_SYMTAB: 1 + run: ./scripts/kani-regression.sh + experimental-features-regression: runs-on: ubuntu-20.04 env: diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000000..ed16c5f372e5 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,26 @@ +# Changelog + +This file contains notable changes (e.g. breaking changes, major changes, etc.) in Kani releases. + +This file was introduced starting Kani 0.23.0, so it only contains changes from version 0.23.0 onwards. + +## [0.23.0] + +### Breaking Changes + +- Remove the second parameter in the `kani::any_where` function by @zhassan-aws in #2257 +We removed the second parameter in the `kani::any_where` function (`_msg: &'static str`) to make the function more ergonomic to use. +We suggest moving the explanation for why the assumption is introduced into a comment. +For example, the following code: +```rust + let len: usize = kani::any_where(|x| *x < 5, "Restrict the length to a value less than 5"); +``` +should be replaced by: +```rust + // Restrict the length to a value less than 5 + let len: usize = kani::any_where(|x| *x < 5); +``` + +### Major Changes + +- Enable the build cache to avoid recompiling crates that haven't changed, and introduce `--force-build` option to compile all crates from scratch by @celinval in #2232. diff --git a/Cargo.lock b/Cargo.lock index d19981bfe96b..b959a1e46c4f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -25,9 +25,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf6ccdb167abbf410dcb915cabd428929d7f6a04980b54a11f26a39f1c7f7107" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ "cfg-if", "once_cell", @@ -45,9 +45,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" +checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" [[package]] name = "ar" @@ -93,7 +93,7 @@ dependencies = [ [[package]] name = "build-kani" -version = "0.19.0" +version = "0.23.0" dependencies = [ "anyhow", "cargo_metadata", @@ -103,9 +103,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77df041dc383319cc661b428b6961a005db4d6808d5e12536931b1ca9556055" +checksum = "6031a462f977dd38968b6f23378356512feeace69cef817e1a4475108093cec3" dependencies = [ "serde", ] @@ -121,9 +121,9 @@ dependencies = [ [[package]] name = "cargo_metadata" -version = "0.15.2" +version = "0.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982a0cf6a99c350d7246035613882e376d58cebe571785abc5da4f648d53ac0a" +checksum = "08a1ec454bc3eead8719cb56e15dbbfecdbc14e4b3a3ae4936cc6e31f5fc0d07" dependencies = [ "camino", "cargo-platform", @@ -135,9 +135,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] name = "cfg-if" @@ -147,9 +147,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clap" -version = "4.0.32" +version = "4.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7db700bc935f9e43e88d00b0850dae18a63773cfbec6d8e070fccf7fef89a39" +checksum = "c3d7ae14b20b94cb02149ed21a86c423859cbe18dc7ed69845cace50e52b40a5" dependencies = [ "bitflags", "clap_derive", @@ -162,9 +162,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.0.21" +version = "4.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014" +checksum = "44bec8e5c9d09e439c4335b1af0abaab56dcf3b94999a936e1bb47b9134288f0" dependencies = [ "heck", "proc-macro-error", @@ -175,9 +175,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.3.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8" +checksum = "350b9cf31731f9957399229e9b2adc51eeabdfbe9d71d9a0552275fd12710d09" dependencies = [ "os_str_bytes", ] @@ -213,23 +213,24 @@ dependencies = [ [[package]] name = "console" -version = "0.15.4" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9b6515d269224923b26b5febea2ed42b2d5f2ce37284a4dd670fedd6cb8347a" +checksum = "c3d79fbe8970a77e3e34151cc13d3b3e248aa0faaecb9f6091fa07ebefe5ad60" dependencies = [ "encode_unicode", "lazy_static", "libc", "unicode-width", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] name = "cprover_bindings" -version = "0.19.0" +version = "0.23.0" dependencies = [ "lazy_static", "linear-map", + "memuse", "num", "num-traits", "serde", @@ -249,9 +250,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" dependencies = [ "cfg-if", "crossbeam-utils", @@ -259,9 +260,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -270,9 +271,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.13" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg", "cfg-if", @@ -283,9 +284,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if", ] @@ -317,9 +318,9 @@ dependencies = [ [[package]] name = "either" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "encode_unicode" @@ -391,18 +392,18 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.2", + "ahash 0.8.3", ] [[package]] name = "heck" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" @@ -422,6 +423,12 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" + [[package]] name = "home" version = "0.5.4" @@ -443,49 +450,58 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46112a93252b123d31a119a8d1a1ac19deac4fac6e0e8b0df58f0d4e5870e63c" +checksum = "1abeb7a0dd0f8181267ff8adc397075586500b81b28a73e8a0208b00fc170fb3" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.45.0", ] [[package]] name = "is-terminal" -version = "0.4.2" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28dfb6c8100ccc63462345b67d1bbc3679177c75ee4bf59bf29c8b1d110b8189" +checksum = "21b6b32576413a8e69b90e952e4a026476040d81017b80445deda5f2d3921857" dependencies = [ - "hermit-abi 0.2.6", + "hermit-abi 0.3.1", "io-lifetimes", "rustix", - "windows-sys", + "windows-sys 0.45.0", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", ] [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "kani" -version = "0.19.0" +version = "0.23.0" dependencies = [ "kani_macros", ] [[package]] name = "kani-compiler" -version = "0.19.0" +version = "0.23.0" dependencies = [ "ar", "atty", - "bitflags", "clap", "cprover_bindings", "home", + "itertools", "kani_metadata", "kani_queries", "lazy_static", @@ -506,7 +522,7 @@ dependencies = [ [[package]] name = "kani-driver" -version = "0.19.0" +version = "0.23.0" dependencies = [ "anyhow", "atty", @@ -518,20 +534,24 @@ dependencies = [ "kani_metadata", "once_cell", "pathdiff", + "rand", "rayon", "regex", "rustc-demangle", "serde", "serde_json", + "strum", + "strum_macros", "toml", "tracing", "tracing-subscriber", "tracing-tree", + "which", ] [[package]] name = "kani-verifier" -version = "0.19.0" +version = "0.23.0" dependencies = [ "anyhow", "home", @@ -540,7 +560,7 @@ dependencies = [ [[package]] name = "kani_macros" -version = "0.19.0" +version = "0.23.0" dependencies = [ "proc-macro-error", "proc-macro2", @@ -550,15 +570,17 @@ dependencies = [ [[package]] name = "kani_metadata" -version = "0.19.0" +version = "0.23.0" dependencies = [ "cprover_bindings", "serde", + "strum", + "strum_macros", ] [[package]] name = "kani_queries" -version = "0.19.0" +version = "0.23.0" dependencies = [ "strum", "strum_macros", @@ -629,23 +651,29 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg", ] +[[package]] +name = "memuse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2145869435ace5ea6ea3d35f59be559317ec9a0d04e1812d5f185a87b6d36f1a" + [[package]] name = "mio" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi", - "windows-sys", + "windows-sys 0.45.0", ] [[package]] @@ -685,9 +713,9 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19" +checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" dependencies = [ "num-traits", ] @@ -746,27 +774,27 @@ dependencies = [ [[package]] name = "object" -version = "0.30.1" +version = "0.30.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d864c91689fdc196779b98dba0aceac6118594c2df6ee5d943eb6a8df4d107a" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" dependencies = [ "crc32fast", - "hashbrown 0.13.1", + "hashbrown 0.13.2", "indexmap", "memchr", ] [[package]] name = "once_cell" -version = "1.17.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "os_info" -version = "3.5.1" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4750134fb6a5d49afc80777394ad5d95b04bc12068c6abb92fae8f43817270f" +checksum = "5c424bc68d15e0778838ac013b5b3449544d8133633d8016319e7e05a820b8c0" dependencies = [ "log", "winapi", @@ -796,15 +824,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.5" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" +checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-sys", + "windows-sys 0.45.0", ] [[package]] @@ -819,6 +847,12 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -845,9 +879,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.49" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5" +checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" dependencies = [ "unicode-ident", ] @@ -872,11 +906,41 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + [[package]] name = "rayon" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", @@ -884,9 +948,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -944,29 +1008,29 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.6" +version = "0.36.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4feacf7db682c6c329c4ede12649cd36ecab0f3be5b7d74e6a20304725db4549" +checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" dependencies = [ "bitflags", "errno", "io-lifetimes", "libc", "linux-raw-sys", - "windows-sys", + "windows-sys 0.45.0", ] [[package]] name = "rustversion" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "same-file" @@ -1014,15 +1078,24 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.91" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" +checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" dependencies = [ "itoa", "ryu", "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4" +dependencies = [ + "serde", +] + [[package]] name = "serde_test" version = "1.0.152" @@ -1034,9 +1107,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.16" +version = "0.9.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92b5b431e8907b50339b51223b97d102db8d987ced36f6e4d03621db9316c834" +checksum = "f82e6c8c047aa50a7328632d067bcae6ef38772a79e28daf32f735e0e4f3dd10" dependencies = [ "indexmap", "itoa", @@ -1062,9 +1135,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "signal-hook" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a253b5e89e2698464fc26b545c9edceb338e18a89effeeecfea192c3025be29d" +checksum = "732768f1176d21d09e076c23a93123d40bba92d50c4058da34d45c8de8e682b9" dependencies = [ "libc", "signal-hook-registry", @@ -1083,9 +1156,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] @@ -1098,7 +1171,7 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "std" -version = "0.19.0" +version = "0.23.0" dependencies = [ "kani", ] @@ -1141,9 +1214,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.107" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", @@ -1152,27 +1225,27 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ "winapi-util", ] [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" dependencies = [ "proc-macro2", "quote", @@ -1181,20 +1254,46 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if", "once_cell", ] [[package]] name = "toml" -version = "0.5.10" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f" +checksum = "f7afcae9e3f0fe2c370fd4657108972cbb2fa9db1b9f84849cefd80741b01cb6" dependencies = [ "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a1eb0622d28f4b9c90adc4ea4b2b46b47663fde9ac5fafcb14a1369d5508825" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", ] [[package]] @@ -1297,9 +1396,9 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-width" @@ -1309,9 +1408,9 @@ checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "unsafe-libyaml" -version = "0.2.5" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc7ed8ba44ca06be78ea1ad2c3682a43349126c8818054231ee6f4748012aed2" +checksum = "ad2024452afd3874bf539695e04af6732ba06517424dbf958fdb16a01f3bef6c" [[package]] name = "valuable" @@ -1353,9 +1452,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "which" -version = "4.3.0" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" +checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" dependencies = [ "either", "libc", @@ -1408,44 +1507,77 @@ dependencies = [ "windows_x86_64_msvc", ] +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" +checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" [[package]] name = "windows_aarch64_msvc" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" [[package]] name = "windows_i686_gnu" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" +checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" [[package]] name = "windows_i686_msvc" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" +checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" [[package]] name = "windows_x86_64_gnu" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" +checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" +checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" [[package]] name = "windows_x86_64_msvc" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" + +[[package]] +name = "winnow" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee7b2c67f962bf5042bfd8b6a916178df33a26eec343ae064cb8e069f638fa6f" +dependencies = [ + "memchr", +] diff --git a/Cargo.toml b/Cargo.toml index 477aa79faa71..6c54db93faa1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "kani-verifier" -version = "0.19.0" +version = "0.23.0" edition = "2021" description = "A bit-precise model checker for Rust." readme = "README.md" @@ -14,7 +14,7 @@ repository = "https://github.com/model-checking/kani" documentation = "https://model-checking.github.io/kani/" homepage = "https://github.com/model-checking/kani" # N.B. Cargo.* is included automatically: -include = ["/src", "/build.rs", "/rust-toolchain.toml", "/LICENSE-*", "/README.md"] +include = ["/src", "/build.rs", "/rust-toolchain.toml", "/LICENSE-*", "/README.md", "/CHANGELOG.md"] [dependencies] anyhow = "1" @@ -66,4 +66,5 @@ exclude = [ "tests/cargo-ui", "tests/slow", "tests/assess-scan-test-scaffold", + "tests/script-based-pre", ] diff --git a/cprover_bindings/Cargo.toml b/cprover_bindings/Cargo.toml index 9688d9d1f5d9..618e96be367b 100644 --- a/cprover_bindings/Cargo.toml +++ b/cprover_bindings/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "cprover_bindings" -version = "0.19.0" +version = "0.23.0" edition = "2021" license = "MIT OR Apache-2.0" publish = false @@ -23,3 +23,4 @@ linear-map = {version = "1.2", features = ["serde_impl"]} [dev-dependencies] serde_test = "1" +memuse = "0.2.1" diff --git a/cprover_bindings/src/irep/goto_binary_serde.rs b/cprover_bindings/src/irep/goto_binary_serde.rs new file mode 100644 index 000000000000..3be33cc2a843 --- /dev/null +++ b/cprover_bindings/src/irep/goto_binary_serde.rs @@ -0,0 +1,1478 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +//! GOTO binary serializer. + +use crate::irep::{Irep, IrepId, Symbol, SymbolTable}; +use crate::{InternString, InternedString}; +use std::collections::HashMap; +use std::fs::File; +use std::hash::Hash; +use std::io::{self, BufReader}; +use std::io::{BufWriter, Bytes, Error, ErrorKind, Read, Write}; +use std::path::PathBuf; + +/// Writes a symbol table to a file in goto binary format in version 5. +/// +/// In CBMC, the serialization rules are defined in : +/// - src/goto-programs/write_goto_binary.h +/// - src/util/irep_serialization.h +/// - src/util/irep_hash_container.h +/// - src/util/irep_hash.h +pub fn write_goto_binary_file(filename: &PathBuf, source: &crate::goto_program::SymbolTable) { + let out_file = File::create(filename).unwrap(); + let mut writer = BufWriter::new(out_file); + let mut serializer = GotoBinarySerializer::new(&mut writer); + let irep_symbol_table = &source.to_irep(); + serializer.write_file(irep_symbol_table).unwrap(); +} + +/// Reads a symbol table from a file expected to be in goto binary format in version 5. +// +/// In CBMC, the deserialization rules are defined in : +/// - src/goto-programs/read_goto_binary.h +/// - src/util/irep_serialization.h +/// - src/util/irep_hash_container.h +/// - src/util/irep_hash.h +pub fn read_goto_binary_file(filename: &PathBuf) { + let file = File::open(filename).unwrap(); + let reader = BufReader::new(file); + let mut deserializer = GotoBinaryDeserializer::new(reader); + deserializer.read_file().unwrap(); +} + +/// # Design overview +/// +/// When saving a [SymbolTable] to a binary file, the [Irep] describing each +/// symbol's type, value and source location are structurally hashed and +/// uniquely numbered so that structurally identical [Irep] only get written +/// in full to the file the first time they are encountered and that ulterior +/// occurrences are referenced by their unique number instead. +/// The key concept at play is that of a numbering, ie a function that assigns +/// numbers to values of a given type. +/// +/// The [IrepNumbering] struct offers high-level methods to number +/// [InternedString], [IrepId] and [Irep] values: +/// - [InternedString] objects get mapped to [NumberedString] objects based on +/// the characters they contain. +/// - [IrepId] objects get mapped to [NumberedString] objects based on the +/// characters of their string representation, in the same number space +/// as [InternedString]. +/// - [Irep] objects get mapped to [NumberedIrep] based on: +/// + the unique numbers assigned to their [Irep::id] attribute, +/// + the unique numbers of [Irep] found in their [Irep::sub] attribute, +/// + the pairs of unique numbers assigned to the ([IrepId],[Irep]) pairs +/// found in their [Ipre::named_sub] attribute. +/// +/// In order to assign the same number to structurally identical [Irep] objects, +/// [IrepNumbering] essentially implements a cache where each [NumberedIrep] is +/// keyed under an [IrepKey] that describes its internal structure. +/// +/// An [IrepKey] is simply the vector of unique numbers describing the +/// `id`, `sub` and `named_sub` attributes of a [Irep]. +/// +/// A [NumberedIrep] is conceptually a pair made of the [IrepKey] itself and the +/// unique number assigned to that key. +/// +/// The cache implemented by [IrepNumbering] is bidirectional. It lets you +/// compute the [NumberedIrep] of an [Irep], and lets you fetch a numbered +/// [NumberedIrep] from its unique number. +/// +/// In practice: +/// - the forward directon from [IrepKey] to unique numbers is +/// implemented using a `HashMap` +/// - the inverse direction from unique numbers to [NumberedIrep] is implemented +/// using a `Vec` called the `index` that stores [NumberedIrep] +/// under their unique number. +/// +/// Earlier we said that an [NumberedIrep] is conceptually a pair formed of +/// an [IrepKey] and its unique number. It is represented using only +/// a pair formed of a `usize` representing the unique number, and a `usize` +/// representing the index at which the key can be found inside a single vector +/// of type `Vec` called `keys` where all [IrepKey] are concatenated when +/// they first get numbered. The inverse map of keys is represented this way +/// because the Rust hash map that implements the forward cache owns +/// its keys which would make it difficult to store keys references in inverse +/// cache, which would introduce circular dependencies and require `Rc` and +/// liftetimes annotations everywhere. +/// +/// Numberig an [Irep] consists in recursively traversing it and numbering its +/// contents in a bottom-up fashion, then assembling its [IrepKey] and querying +/// the cache to either return an existing [NumberedIrep] or creating a new one +/// (in case that key has never been seen before). +/// +/// The [GotoBinarySerializer] internally uses a [IrepNumbering] and a cache +/// of [NumberedIrep] and [NumberedString] it has already written to file. +/// +/// When given an [InternedString], [IrepId] or [Irep] to serialize, +/// the [GotoBinarySerializer] first numbers that object using its internal +/// [IrepNumbering] instance. Then it looks up that unique number in its cache +/// of already written objects. If the object was seen before, only the unique +/// number of that object is written to file. If the object was never seen +/// before, then the unique number of that object is written to file, followed +/// by the objects describing its contents (themselves only being written fully +/// if they have never been seen before, or only referenced if they have been +/// seen before, etc.) +/// +/// The [GotoBinaryDeserializer] also uses an [IrepNumbering] and a cache +/// of [NumberedIrep] and [NumberedString] it has already read from file. +/// Dually to the serializer, it will only attempt to decode the contents of an +/// object from the byte stream on the first occurrence. + +/// A numbered [InternedString]. The number is guaranteed to be in [0,N]. +/// Had to introduce this indirection because [InternedString] does not let you +/// access its unique id, so we have to build one ourselves. +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] +struct NumberedString { + number: usize, + string: InternedString, +} + +/// An [Irep] represented by the vector of unique numbers of its contents. +#[derive(PartialEq, Eq, Hash, Clone, Debug)] +struct IrepKey { + numbers: Vec, +} + +impl IrepKey { + /// Packs an [Irep]'s contents unique numbers into a new key object: + /// - `id` must be the unique number assigned to an [Irep]'s + /// [Irep::id] field. + /// - `sub` must be the vector of unique number assigned to an [Irep]'s + /// [Irep::sub] field. + /// - `named_sub` must be the vector of unique number assigned to an [Irep]'s + /// [Irep::named_sub] field. + /// + /// The `id`, `sub` and `named_sub` passed as arguments are packed as follows + /// in the key's `number` field: + /// ``` + /// id + /// sub.len() + /// sub[0] + /// ... + /// sub[sub.len()-1] + /// named_sub.len() + /// named_sub[0].0 + /// named_sub[0].1 + /// ... + /// named_sub[named_sub.len()-1].0 + /// named_sub[named_sub.len()-1].1 + /// ``` + fn new(id: usize, sub: &[usize], named_sub: &[(usize, usize)]) -> Self { + let size = sub.len() + 2 * named_sub.len() + 3; + let mut vec: Vec = Vec::with_capacity(size); + vec.push(id); + vec.push(sub.len()); + vec.extend_from_slice(sub); + vec.push(named_sub.len()); + for (k, v) in named_sub { + vec.push(*k); + vec.push(*v); + } + IrepKey { numbers: vec } + } +} + +/// Inverse cache of unique [NumberedIrep] objects. +struct IrepNumberingInv { + /// Maps [Irep] numbers to [NumberedIrep]s; + index: Vec, + + /// Stores the concactenation of all [IrepKey] seen by the [IrepNumbering] + /// object owning this inverse numbering. + keys: Vec, +} + +impl IrepNumberingInv { + fn new() -> Self { + IrepNumberingInv { index: Vec::new(), keys: Vec::new() } + } + + /// Adds a key to the mapping and returns the unique number assigned to that key. + fn add_key(&mut self, key: &IrepKey) -> usize { + let number = self.index.len(); + self.index.push(NumberedIrep { number, start_index: self.keys.len() }); + self.keys.extend(&key.numbers); + number + } + + /// Returns a NumberedIrep from its unique number if it exists, None otherwise. + fn numbered_irep_from_number(&self, irep_number: usize) -> Option { + self.index.get(irep_number).copied() + } +} + +/// A numbering of [InternedString], [IrepId] and [Irep] based on their contents. +struct IrepNumbering { + /// Map from [InternedString] to their unique numbers. + string_cache: HashMap, + + /// Inverse string cache. + inv_string_cache: Vec, + + /// Map from [IrepKey] to their unique numbers. + cache: HashMap, + + /// Inverse cache, allows to get a NumberedIrep from its unique number. + inv_cache: IrepNumberingInv, +} + +impl IrepNumbering { + fn new() -> Self { + IrepNumbering { + string_cache: HashMap::new(), + inv_string_cache: Vec::new(), + cache: HashMap::new(), + inv_cache: IrepNumberingInv::new(), + } + } + + /// Returns a [NumberedString] from its number if it exists, None otherwise. + fn numbered_string_from_number(&mut self, string_number: usize) -> Option { + self.inv_string_cache.get(string_number).copied() + } + + /// Returns a [NumberedIrep] from its number if it exists, None otherwise. + fn numbered_irep_from_number(&mut self, irep_number: usize) -> Option { + self.inv_cache.numbered_irep_from_number(irep_number) + } + + /// Turns a [InternedString] into a [NumberedString]. + fn number_string(&mut self, string: &InternedString) -> NumberedString { + let len = self.string_cache.len(); + let entry = self.string_cache.entry(*string); + let number = *entry.or_insert_with(|| { + self.inv_string_cache.push(NumberedString { number: len, string: *string }); + len + }); + self.inv_string_cache[number] + } + + /// Turns a [IrepId] to a [NumberedString]. The [IrepId] gets the number of its + /// string representation. + fn number_irep_id(&mut self, irep_id: &IrepId) -> NumberedString { + self.number_string(&irep_id.to_string().intern()) + } + + /// Turns an [Irep] into a [NumberedIrep]. The [Irep] is recursively traversed + /// and numbered in a bottom-up fashion. Structurally identical [Irep]s + /// result in the same [NumberedIrep]. + fn number_irep(&mut self, irep: &Irep) -> NumberedIrep { + // build the key + let id = self.number_irep_id(&irep.id).number; + let sub: Vec = irep.sub.iter().map(|sub| self.number_irep(sub).number).collect(); + let named_sub: Vec<(usize, usize)> = irep + .named_sub + .iter() + .map(|(key, value)| (self.number_irep_id(key).number, self.number_irep(value).number)) + .collect(); + let key = IrepKey::new(id, &sub, &named_sub); + self.get_or_insert(&key) + } + + /// Gets the existing [NumberedIrep] from the [IrepKey] or inserts a fresh + /// one and returns it. + fn get_or_insert(&mut self, key: &IrepKey) -> NumberedIrep { + if let Some(number) = self.cache.get(key) { + // Return the NumberedIrep from the inverse cache + return self.inv_cache.index[*number]; + } + // This is where the key gets its unique number assigned. + let number = self.inv_cache.add_key(&key); + self.cache.insert(key.clone(), number); + self.inv_cache.index[number] + } + + /// Returns the unique number of the `id` field of the given [NumberedIrep]. + fn id(&self, numbered_irep: &NumberedIrep) -> NumberedString { + self.inv_string_cache[self.inv_cache.keys[numbered_irep.start_index]] + } + + /// Returns `#sub`, the number of `sub` [Irep]s of the given [NumberedIrep]. + /// It is found at `numbered_irep.start_index + 1` in the inverse cache. + fn nof_sub(&self, numbered_irep: &NumberedIrep) -> usize { + self.inv_cache.keys[numbered_irep.start_index + 1] + } + + /// Returns the [NumberedIrep] for the ith `sub` of the given [NumberedIrep]. + fn sub(&self, numbered_irep: &NumberedIrep, sub_idx: usize) -> NumberedIrep { + let sub_number = self.inv_cache.keys[numbered_irep.start_index + sub_idx + 2]; + self.inv_cache.index[sub_number] + } + + /// Returns `#named_sub`, the number of named subs of the given [NumberedIrep]. + /// It is found at `numbered_irep.start_index + #sub + 2` in the inverse cache. + fn nof_named_sub(&self, numbered_irep: &NumberedIrep) -> usize { + self.inv_cache.keys[numbered_irep.start_index + self.nof_sub(numbered_irep) + 2] + } + + /// Returns the pair of [NumberedString] and [NumberedIrep] for the named + /// sub number `i` of this [NumberedIrep]. + fn named_sub( + &self, + numbered_irep: &NumberedIrep, + named_sub_idx: usize, + ) -> (NumberedString, NumberedIrep) { + let start_index = + numbered_irep.start_index + self.nof_sub(numbered_irep) + 2 * named_sub_idx + 3; + ( + self.inv_string_cache[self.inv_cache.keys[start_index]], + self.inv_cache.index[self.inv_cache.keys[start_index + 1]], + ) + } +} + +/// A uniquely numbered [Irep]. +/// A NumberedIrep can be viewed as a generational index into an +/// [IrepNumbering] instance. +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] +struct NumberedIrep { + /// The unique number of this NumberedIrep. + number: usize, + /// Start index of the [IrepKey] of this [NumberedIrep] in the inverse cache + /// of the [IrepNumbering] that produced it. + start_index: usize, +} + +/// GOTO binary serializer. +struct GotoBinarySerializer<'a, W> +where + W: Write, +{ + writer: &'a mut W, + + /// In-memory temporary buffer, contents get flushed after each object + buf: Vec, + + /// Numbering used for structural sharing. + numbering: IrepNumbering, + + /// Counts how many times a given irep was written (indexed by the Irep's unique id). + irep_count: Vec, + + /// Counts how many times a given string was written (indexed by the strings's unique id). + string_count: Vec, +} + +impl<'a, W> GotoBinarySerializer<'a, W> +where + W: Write, +{ + /// Constructor. + fn new(writer: &'a mut W) -> Self { + GotoBinarySerializer { + writer, + buf: Vec::new(), + numbering: IrepNumbering::new(), + irep_count: Vec::new(), + string_count: Vec::new(), + } + } + + /// Adds an InternedString uid to the "written" cache, returns true iff was never written before. + fn is_first_write_string(&mut self, u: usize) -> bool { + if u >= self.string_count.len() { + self.string_count.resize(u + 1, 0); + } + let count = self.string_count[u]; + self.string_count[u] = count.checked_add(1).unwrap(); + count == 0 + } + + /// Adds an Irep uid to the "written" cache, returns true iff it was never written before. + fn is_first_write_irep(&mut self, u: usize) -> bool { + if u >= self.irep_count.len() { + self.irep_count.resize(u + 1, 0); + } + let count = self.irep_count[u]; + self.irep_count[u] = count.checked_add(1).unwrap(); + count == 0 + } + + /// Flushes the temporary buffer to the external writer, + /// flushes the writer and clears the temporary buffer. + fn flush(&mut self) -> io::Result<()> { + self.writer.write_all(&self.buf)?; + self.buf.clear(); + Ok(()) + } + + /// Writes a single byte to the temporary buffer. + fn write_u8(&mut self, u: u8) -> io::Result<()> { + self.buf.push(u); + Ok(()) + } + + /// Writes a usize to the temporary buffer using 7-bit variable length + /// encoding. A usize value gets serialized as a list of u8. The usize value + /// gets shifted right in place, 7 bits at a time, the shifted bits are + /// stored in the LSBs of a u8. The MSB of the u8 is used to indicate the + /// continuation or the end of the encoding: + /// - it is set to true if some true bits remain in the usize value, + /// - it is set to zero all remaining bits of the usize value are zero. + fn write_usize_varenc(&mut self, mut u: usize) -> io::Result<()> { + loop { + let mut v: u8 = (u & 0x7f) as u8; + u >>= 7; + if u == 0 { + // all remaining bits of u are zero + self.buf.push(v); + break; + } + // there are more bits in u, set the 8th bit to indicate continuation + v |= 0x80; + self.buf.push(v); + } + Ok(()) + } + + /// Writes a numbered string to the buffer. Writes the unique number of + /// the string, and writes the actual string only if was never written before. + fn write_numbered_string_ref(&mut self, numbered_string: &NumberedString) -> io::Result<()> { + let num = numbered_string.number; + self.write_usize_varenc(num)?; + if self.is_first_write_string(num) { + // first occurrence + numbered_string.string.map(|raw_str| { + for c in raw_str.chars() { + if c.is_ascii() { + // add escape character for backslashes and 0 + if c == '0' || c == '\\' { + self.buf.push(b'\\'); + } + self.buf.push(c as u8); + } else { + let mut buf = [0; 4]; + c.encode_utf8(&mut buf); + for u in buf { + if u == 0 { + break; + } + self.buf.push(u); + } + } + } + // write terminator + self.buf.push(0u8); + }); + } + self.flush()?; + Ok(()) + } + + /// Writes a numbered irep to the buffer. Writes the unique number of the + /// irep, and writes the actual irep contents only if was never written + /// before. + fn write_numbered_irep_ref(&mut self, irep: &NumberedIrep) -> io::Result<()> { + let num = irep.number; + self.write_usize_varenc(num)?; + + if self.is_first_write_irep(num) { + let id = &self.numbering.id(&irep); + self.write_numbered_string_ref(id)?; + + for sub_idx in 0..(self.numbering.nof_sub(&irep)) { + self.write_u8(b'S')?; + self.write_numbered_irep_ref(&self.numbering.sub(&irep, sub_idx))?; + } + + for named_sub_idx in 0..(self.numbering.nof_named_sub(&irep)) { + self.write_u8(b'N')?; + let (k, v) = self.numbering.named_sub(&irep, named_sub_idx); + self.write_numbered_string_ref(&k)?; + self.write_numbered_irep_ref(&v)?; + } + + self.write_u8(0)?; // terminator + } + self.flush()?; + Ok(()) + } + + /// Translates the string to its numbered version and serializes it. + fn write_string_ref(&mut self, str: &InternedString) -> io::Result<()> { + let numbered_string = &self.numbering.number_string(str); + self.write_numbered_string_ref(numbered_string) + } + + /// Translates the irep to its numbered version and serializes it. + fn write_irep_ref(&mut self, irep: &Irep) -> io::Result<()> { + let numbered_irep = self.numbering.number_irep(irep); + self.write_numbered_irep_ref(&numbered_irep) + } + + /// Writes a symbol to the byte stream. + fn write_symbol(&mut self, symbol: &Symbol) -> io::Result<()> { + self.write_irep_ref(&symbol.typ)?; + self.write_irep_ref(&symbol.value)?; + self.write_irep_ref(&symbol.location)?; + self.write_string_ref(&symbol.name)?; + self.write_string_ref(&symbol.module)?; + self.write_string_ref(&symbol.base_name)?; + self.write_string_ref(&symbol.mode)?; + self.write_string_ref(&symbol.pretty_name)?; + self.write_u8(0)?; + + let mut flags: usize = 0; + flags = (flags << 1) | (symbol.is_weak) as usize; + flags = (flags << 1) | (symbol.is_type) as usize; + flags = (flags << 1) | (symbol.is_property) as usize; + flags = (flags << 1) | (symbol.is_macro) as usize; + flags = (flags << 1) | (symbol.is_exported) as usize; + flags = (flags << 1) | (symbol.is_input) as usize; + flags = (flags << 1) | (symbol.is_output) as usize; + flags = (flags << 1) | (symbol.is_state_var) as usize; + flags = (flags << 1) | (symbol.is_parameter) as usize; + flags = (flags << 1) | (symbol.is_auxiliary) as usize; + // deprecated sym.binding but remains present for compatibility + flags = (flags << 1) | (false) as usize; + flags = (flags << 1) | (symbol.is_lvalue) as usize; + flags = (flags << 1) | (symbol.is_static_lifetime) as usize; + flags = (flags << 1) | (symbol.is_thread_local) as usize; + flags = (flags << 1) | (symbol.is_file_local) as usize; + flags = (flags << 1) | (symbol.is_extern) as usize; + flags = (flags << 1) | (symbol.is_volatile) as usize; + + self.write_usize_varenc(flags)?; + self.flush()?; + Ok(()) + } + + /// Writes a symbol table to the byte stream. + fn write_symbol_table(&mut self, symbol_table: &SymbolTable) -> io::Result<()> { + // Write symbol table size + self.write_usize_varenc(symbol_table.symbol_table.len())?; + + // Write symbols + for symbol in symbol_table.symbol_table.values() { + self.write_symbol(symbol)?; + } + + self.flush()?; + Ok(()) + } + + /// Writes an empty function map to the byte stream. + fn write_function_map(&mut self) -> io::Result<()> { + // Write empty GOTO functions map + self.write_usize_varenc(0)?; + self.flush()?; + Ok(()) + } + + /// Writes a GOTO binary file header to the byte stream. + fn write_header(&mut self) -> io::Result<()> { + // Write header + self.write_u8(0x7f)?; + self.write_u8(b'G')?; + self.write_u8(b'B')?; + self.write_u8(b'F')?; + + // Write goto binary version + self.write_usize_varenc(5)?; + self.flush()?; + Ok(()) + } + + /// Writes the symbol table using the GOTO binary file format to the byte stream. + fn write_file(&mut self, symbol_table: &SymbolTable) -> io::Result<()> { + self.write_header()?; + self.write_symbol_table(symbol_table)?; + self.write_function_map()?; + self.flush()?; + Ok(()) + } +} + +/// GOTO binary deserializer. Reads GOTO constructs from the byte stream of a reader. +struct GotoBinaryDeserializer +where + R: Read, +{ + /// Stream of bytes from which GOTO objects are read. + bytes: Bytes, + + /// Numbering for ireps + numbering: IrepNumbering, + + /// Counts how many times a given irep was read. + irep_count: Vec, + + /// Maps the irep number used in the binary stream to the new one generated + /// by our own numbering. + irep_map: Vec>, + + /// Counts how many times a given string was read. + string_count: Vec, + + /// Maps the string number used in the binary stream to the new one generated + /// by our own numbering. + string_map: Vec>, +} + +impl GotoBinaryDeserializer +where + R: Read, +{ + /// Constructor. The reader is moved into this object and cannot be used + /// afterwards. + fn new(reader: R) -> Self { + GotoBinaryDeserializer { + bytes: reader.bytes(), + numbering: IrepNumbering::new(), + string_count: Vec::new(), + string_map: Vec::new(), + irep_count: Vec::new(), + irep_map: Vec::new(), + } + } + + // #[cfg(test)] + // /// Returns memory consumption and sharing statistics about the deserializer. + // fn get_stats(&self) -> GotoBinarySharingStats { + // GotoBinarySharingStats::from_deserializer(self) + // } + + /// Returns Err if the found value is not the expected value. + fn expect(found: T, expected: T) -> io::Result { + if found != expected { + return Err(Error::new( + ErrorKind::Other, + format!("expected {expected} in byte stream, found {found} instead)"), + )); + } + Ok(found) + } + + /// Adds an InternedString unique number to the "read" cache, returns true + /// iff was never read before. + fn is_first_read_string(&mut self, u: usize) -> bool { + if u >= self.string_count.len() { + self.string_count.resize(u.checked_add(1).unwrap(), 0); + } + let count = self.string_count[u]; + self.string_count[u] = count.checked_add(1).unwrap(); + count == 0 + } + + /// Maps a string number used in the byte stream to the number generated by + /// our own numbering for that string. + fn add_string_mapping(&mut self, num_binary: usize, num: usize) { + if num_binary >= self.string_map.len() { + self.string_map.resize(num_binary + 1, None); + } + let old = self.string_map[num_binary]; + if old.is_some() { + panic!("string number already mapped"); + } + self.string_map[num_binary] = Some(num); + } + + /// Adds an Irep unique number to the "read" cache, returns true iff it was + /// never read before. + fn is_first_read_irep(&mut self, u: usize) -> bool { + if u >= self.irep_count.len() { + self.irep_count.resize(u.checked_add(1).unwrap(), 0); + } + let count = self.irep_count[u]; + self.irep_count[u] = count.checked_add(1).unwrap(); + count == 0 + } + + /// Maps an Irep number used in the byte stream to the number generated by + /// our own numbering for that Irep. + fn add_irep_mapping(&mut self, num_binary: usize, num: usize) { + if num_binary >= self.irep_map.len() { + self.irep_map.resize(num_binary.checked_add(1).unwrap(), None); + } + let old = self.irep_map[num_binary]; + if old.is_some() { + panic!("irep number already mapped"); + } + self.irep_map[num_binary] = Some(num); + } + + /// Reads a u8 from the byte stream. + fn read_u8(&mut self) -> io::Result { + match self.bytes.next() { + Some(Ok(u)) => Ok(u), + Some(Err(error)) => Err(error), + None => Err(Error::new(ErrorKind::Other, "unexpected end of input")), + } + } + + /// Reads a usize from the byte stream assuming it is encoded using 7-bit + /// variable length encoding ([GotoBinarySerializer::write_usize_varenc]). + fn read_usize_varenc(&mut self) -> io::Result { + let mut result: usize = 0; + let mut shift: usize = 0; + let max_shift: usize = std::mem::size_of::() * 8; + loop { + match self.bytes.next() { + Some(Ok(u)) => { + if shift >= max_shift { + return Err(Error::new( + ErrorKind::Other, + "serialized value is too large to fit in usize", + )); + }; + result |= ((u & 0x7f) as usize) << shift; + shift = shift.checked_add(7).unwrap(); + if u & (0x80_u8) == 0 { + return Ok(result); + } + } + Some(Err(error)) => { + return Err(error); + } + None => { + return Err(Error::new(ErrorKind::Other, "unexpected end of input")); + } + } + } + } + + /// Reads a reference encoded string from the byte stream. + fn read_numbered_string_ref(&mut self) -> io::Result { + let string_number_result = self.read_usize_varenc(); + let string_number = match string_number_result { + Ok(number) => number, + Err(error) => return Err(error), + }; + if self.is_first_read_string(string_number) { + // read raw string + let mut string_buf: Vec = Vec::new(); + loop { + match self.bytes.next() { + Some(Ok(u)) => { + match u { + 0 => { + // Reached end of string + match String::from_utf8(string_buf) { + Ok(str) => { + let numbered = self.numbering.number_string(&str.intern()); + self.add_string_mapping(string_number, numbered.number); + return Ok(numbered); + } + Err(error) => { + return Err(Error::new( + ErrorKind::Other, + error.to_string(), + )); + } + } + } + b'\\' => { + // Found escape symbol, read the next char + match self.bytes.next() { + Some(Ok(c)) => { + string_buf.push(c); + } + Some(Err(error)) => { + return Err(error); + } + None => { + return Err(Error::new( + ErrorKind::Other, + "unexpected end of input", + )); + } + } + } + c => { + // Found normal char, push to buffer + string_buf.push(c); + } + } + } + Some(Err(error)) => { + // Could not read from byte stream, propagate + return Err(error); + } + None => { + // No more bytes left + return Err(Error::new(ErrorKind::Other, "unexpected end of input")); + } + } + } + } else { + // We already read this irep, fetch it from the numbering + Ok(self + .numbering + .numbered_string_from_number(self.string_map[string_number].unwrap()) + .unwrap()) + } + } + + /// Reads a NumberedIrep from the byte stream. + fn read_numbered_irep_ref(&mut self) -> io::Result { + let irep_number_result = self.read_usize_varenc(); + let irep_number = match irep_number_result { + Ok(number) => number, + Err(error) => return Err(error), + }; + + if self.is_first_read_irep(irep_number) { + let id = self.read_numbered_string_ref()?.number; + let mut sub_done = false; + let mut sub: Vec = Vec::new(); + let mut named_sub: Vec<(usize, usize)> = Vec::new(); + loop { + // read subs and named subs one by one until the 0 terminator is found + let c = self.read_u8()?; + match c { + b'S' => { + if sub_done { + return Err(Error::new(ErrorKind::Other, "incorrect binary structure")); + } + let decoded_sub = self.read_numbered_irep_ref()?; + sub.push(decoded_sub.number); + } + b'N' => { + sub_done = true; + let decoded_name = self.read_numbered_string_ref()?; + let decoded_sub = self.read_numbered_irep_ref()?; + named_sub.push((decoded_name.number, decoded_sub.number)); + } + 0 => { + // Reached the end of this irep + // Build the key + let key = IrepKey::new(id, &sub, &named_sub); + + // Insert key in the numbering + let numbered = self.numbering.get_or_insert(&key); + + // Map number from the binary to new number + self.add_irep_mapping(irep_number, numbered.number); + return Ok(numbered); + } + other => { + return Err(Error::new( + ErrorKind::Other, + format!("unexpected character in input stream {}", other as char), + )); + } + } + } + } else { + Ok(self + .numbering + .numbered_irep_from_number(self.irep_map[irep_number].unwrap()) + .unwrap()) + } + } + + /// Reads a Symbol from the byte stream. + fn read_symbol(&mut self) -> io::Result<()> { + // Read Irep attributes of the symbol + let _typ = self.read_numbered_irep_ref()?; + let _value = self.read_numbered_irep_ref()?; + let _location = self.read_numbered_irep_ref()?; + + // Read string attributes of the symbol + let _name = self.read_numbered_string_ref()?; + let _module = self.read_numbered_string_ref()?; + let _base_name = self.read_numbered_string_ref()?; + let _mode = self.read_numbered_string_ref()?; + let _pretty_name = self.read_numbered_string_ref()?; + + // obsolete: symordering + let symordering = self.read_u8()?; + Self::expect(symordering, 0)?; + + // Decode the bit-packed flags and extract bits one by one + let flags: usize = self.read_usize_varenc()?; + + let _is_weak = (flags & (1 << 16)) != 0; + let _is_type = (flags & (1 << 15)) != 0; + let _is_property = (flags & (1 << 14)) != 0; + let _is_macro = (flags & (1 << 13)) != 0; + let _is_exported = (flags & (1 << 12)) != 0; + let _is_input = (flags & (1 << 11)) != 0; + let _is_output = (flags & (1 << 10)) != 0; + let _is_state_var = (flags & (1 << 9)) != 0; + let _is_parameter = (flags & (1 << 8)) != 0; + let _is_auxiliary = (flags & (1 << 7)) != 0; + // deprecated sym.binding but remains present for compatibility + let _is_binding = (flags & (1 << 6)) != 0; + let _is_lvalue = (flags & (1 << 5)) != 0; + let _is_static_lifetime = (flags & (1 << 4)) != 0; + let _is_thread_local = (flags & (1 << 3)) != 0; + let _is_file_local = (flags & (1 << 2)) != 0; + let _is_extern = (flags & (1 << 1)) != 0; + let _is_volatile = (flags & 1) != 0; + + let shifted_flags = flags >> 16; + + if shifted_flags != 0 { + return Err(Error::new( + ErrorKind::Other, + "incorrect binary format: true bits remain in decoded symbol flags", + )); + } + Ok(()) + } + + /// Reads a whole SymbolTable from the byte stream. + fn read_symbol_table(&mut self) -> io::Result<()> { + // Write symbol table size + let symbol_table_len = self.read_usize_varenc()?; + + // Write symbols + for _ in 0..symbol_table_len { + self.read_symbol()?; + } + + Ok(()) + } + + /// Reads an empty function map from the byte stream. + fn read_function_map(&mut self) -> io::Result<()> { + let goto_function_len = self.read_usize_varenc()?; + Self::expect(goto_function_len, 0)?; + Ok(()) + } + + /// Reads a GOTO binary header from the byte stream. + fn read_header(&mut self) -> io::Result<()> { + // Read header + Self::expect(self.read_u8().unwrap(), 0x7f)?; + Self::expect(self.read_u8().unwrap(), b'G')?; + Self::expect(self.read_u8().unwrap(), b'B')?; + Self::expect(self.read_u8().unwrap(), b'F')?; + + // Read goto binary version + let goto_binary_version = self.read_usize_varenc()?; + if goto_binary_version != 5 { + return Err(Error::new( + ErrorKind::Other, + format!( + "unsupported GOTO binary version: {}. Supported version: {}", + goto_binary_version, 5 + ), + )); + } + Ok(()) + } + + /// Read a GOTO binary file from the byte stream. + fn read_file(&mut self) -> io::Result<()> { + self.read_header()?; + self.read_symbol_table()?; + self.read_function_map()?; + Ok(()) + } +} + +//////////////////////////////////////// +//// Dynamic memory usage computation +//////////////////////////////////////// + +#[cfg(test)] +mod sharing_stats { + use super::GotoBinaryDeserializer; + use super::GotoBinarySerializer; + use super::IrepKey; + use super::IrepNumbering; + use super::IrepNumberingInv; + use super::NumberedIrep; + use super::NumberedString; + use crate::InternedString; + use memuse::DynamicUsage; + use std::io::{Read, Write}; + + impl DynamicUsage for NumberedIrep { + fn dynamic_usage(&self) -> usize { + std::mem::size_of::() + } + + fn dynamic_usage_bounds(&self) -> (usize, Option) { + let s = std::mem::size_of::(); + (s, Some(s)) + } + } + + impl DynamicUsage for IrepKey { + fn dynamic_usage(&self) -> usize { + std::mem::size_of::() + self.numbers.dynamic_usage() + } + + fn dynamic_usage_bounds(&self) -> (usize, Option) { + let (lower, upper) = self.numbers.dynamic_usage_bounds(); + let s = std::mem::size_of::(); + (lower + s, upper.map(|x| x + s)) + } + } + + impl DynamicUsage for IrepNumberingInv { + fn dynamic_usage(&self) -> usize { + std::mem::size_of::() + self.index.dynamic_usage() + self.keys.dynamic_usage() + } + + fn dynamic_usage_bounds(&self) -> (usize, Option) { + let (lindex, uindex) = self.index.dynamic_usage_bounds(); + let (lkeys, ukeys) = self.keys.dynamic_usage_bounds(); + let s = std::mem::size_of::(); + (lindex + lkeys + s, uindex.and_then(|s1| ukeys.map(|s2| s1 + s2 + s))) + } + } + + impl DynamicUsage for InternedString { + fn dynamic_usage(&self) -> usize { + std::mem::size_of::() + } + fn dynamic_usage_bounds(&self) -> (usize, Option) { + let s = std::mem::size_of::(); + (s, Some(s)) + } + } + + impl DynamicUsage for NumberedString { + fn dynamic_usage(&self) -> usize { + std::mem::size_of::() + } + fn dynamic_usage_bounds(&self) -> (usize, Option) { + let s = std::mem::size_of::(); + (s, Some(s)) + } + } + + impl DynamicUsage for IrepNumbering { + fn dynamic_usage(&self) -> usize { + std::mem::size_of::() + + self.string_cache.dynamic_usage() + + self.inv_string_cache.dynamic_usage() + + self.cache.dynamic_usage() + + self.inv_cache.dynamic_usage() + } + + fn dynamic_usage_bounds(&self) -> (usize, Option) { + let s = std::mem::size_of::(); + let (l1, u1) = self.string_cache.dynamic_usage_bounds(); + let (l2, u2) = self.inv_string_cache.dynamic_usage_bounds(); + let (l3, u3) = self.cache.dynamic_usage_bounds(); + let (l4, u4) = self.inv_cache.dynamic_usage_bounds(); + let l = l1 + l2 + l3 + l4 + s; + let u = u1.and_then(|u1| { + u2.and_then(|u2| u3.and_then(|u3| u4.map(|u4| u1 + u2 + u3 + u4 + s))) + }); + (l, u) + } + } + + impl<'a, W> DynamicUsage for GotoBinarySerializer<'a, W> + where + W: Write, + { + fn dynamic_usage(&self) -> usize { + std::mem::size_of::() + + self.buf.dynamic_usage() + + self.numbering.dynamic_usage() + + self.irep_count.dynamic_usage() + + self.string_count.dynamic_usage() + } + + fn dynamic_usage_bounds(&self) -> (usize, Option) { + let s = std::mem::size_of::(); + let (l1, u1) = self.buf.dynamic_usage_bounds(); + let (l2, u2) = self.numbering.dynamic_usage_bounds(); + let (l3, u3) = self.irep_count.dynamic_usage_bounds(); + let (l4, u4) = self.string_count.dynamic_usage_bounds(); + let l = l1 + l2 + l3 + l4 + s; + let u = u1.and_then(|u1| { + u2.and_then(|u2| u3.and_then(|u3| u4.map(|u4| u1 + u2 + u3 + u4 + s))) + }); + (l, u) + } + } + + impl DynamicUsage for GotoBinaryDeserializer + where + R: Read, + { + fn dynamic_usage(&self) -> usize { + std::mem::size_of::() + + self.numbering.dynamic_usage() + + self.irep_count.dynamic_usage() + + self.irep_map.dynamic_usage() + + self.string_count.dynamic_usage() + + self.string_map.dynamic_usage() + } + + fn dynamic_usage_bounds(&self) -> (usize, Option) { + let s = std::mem::size_of::(); + let (l1, u1) = self.numbering.dynamic_usage_bounds(); + let (l2, u2) = self.irep_count.dynamic_usage_bounds(); + let (l3, u3) = self.irep_map.dynamic_usage_bounds(); + let (l4, u4) = self.string_count.dynamic_usage_bounds(); + let (l5, u5) = self.string_map.dynamic_usage_bounds(); + let l = l1 + l2 + l3 + l4 + l5 + s; + let u = u1.and_then(|u1| { + u2.and_then(|u2| { + u3.and_then(|u3| u4.and_then(|u4| u5.map(|u5| u1 + u2 + u3 + u4 + u5 + s))) + }) + }); + (l, u) + } + } + + #[derive(Debug)] + /// Structural sharing statistics + pub struct SharingStats { + // Number of structurally unique objects + _nof_unique: usize, + + // Minimum count for a unique object + _min_count: usize, + + // Unique identifier of the min count object + _min_id: Option, + + // Maximum count for a unique object + _max_count: usize, + + // Unique identifier of the max count object + _max_id: Option, + + // Average count for objects + _avg_count: f64, + } + + impl SharingStats { + fn new(elems: &[usize]) -> Self { + let mut nof_unique: usize = 0; + let mut min_count: usize = usize::MAX; + let mut min_id: Option = None; + let mut max_count: usize = 0; + let mut max_id: Option = None; + let mut avg_count: f64 = 0.0; + + for (id, count) in elems.iter().enumerate() { + if *count == 0 { + continue; + } + if *count < min_count { + min_count = *count; + min_id = Some(id); + }; + if *count > max_count { + max_count = *count; + max_id = Some(id); + }; + nof_unique = nof_unique + 1; + let incr = (*count as f64 - avg_count) / (nof_unique as f64); + avg_count = avg_count + incr; + } + SharingStats { + _nof_unique: nof_unique, + _min_count: min_count, + _min_id: min_id, + _max_count: max_count, + _max_id: max_id, + _avg_count: avg_count, + } + } + } + + /// Statistics for GotoBinarySerializer. + #[derive(Debug)] + pub struct GotoBinarySharingStats { + /// Number of bytes used by the serializer + _allocated_bytes: usize, + + /// Sharing statistics for NumberedStrings + _string_stats: SharingStats, + + /// Sharing statistics for NumberedIreps + _irep_stats: SharingStats, + } + + impl GotoBinarySharingStats { + fn from_serializer<'a, W: Write>(s: &GotoBinarySerializer<'a, W>) -> Self { + GotoBinarySharingStats { + _allocated_bytes: s.dynamic_usage(), + _string_stats: SharingStats::new(&s.string_count), + _irep_stats: SharingStats::new(&s.irep_count), + } + } + + fn from_deserializer(s: &GotoBinaryDeserializer) -> Self { + GotoBinarySharingStats { + _allocated_bytes: s.dynamic_usage(), + _string_stats: SharingStats::new(&s.string_count), + _irep_stats: SharingStats::new(&s.irep_count), + } + } + } + + impl<'a, W> GotoBinarySerializer<'a, W> + where + W: Write, + { + /// Returns memory consumption and sharing statistics about the serializer. + pub fn get_stats(&self) -> GotoBinarySharingStats { + GotoBinarySharingStats::from_serializer(self) + } + } + + impl GotoBinaryDeserializer + where + R: Read, + { + /// Returns memory consumption and sharing statistics about the deserializer. + pub fn get_stats(&self) -> GotoBinarySharingStats { + GotoBinarySharingStats::from_deserializer(self) + } + } +} + +/// Unit tests for GOTO binary serialization/deserialization. +#[cfg(test)] +mod tests { + use super::GotoBinarySerializer; + use super::IrepNumbering; + use crate::cbmc_string::InternString; + use crate::irep::goto_binary_serde::GotoBinaryDeserializer; + use crate::irep::Irep; + use crate::irep::IrepId; + use crate::linear_map; + use crate::InternedString; + use linear_map::LinearMap; + + /// Utility function : creates a Irep representing a single symbol. + fn make_symbol_expr(identifier: &str) -> Irep { + Irep { + id: IrepId::Symbol, + sub: vec![], + named_sub: linear_map![(IrepId::Identifier, Irep::just_string_id(identifier),)], + } + } + + /// Utility function: creates an expression by folding the symbol expressions with the given operator. + fn fold_with_op(identifiers: &Vec<&str>, id: IrepId) -> Irep { + identifiers.iter().fold(make_symbol_expr("dummy"), |acc, identifier| Irep { + id: id.clone(), + sub: vec![acc, make_symbol_expr(identifier)], + named_sub: LinearMap::new(), + }) + } + + #[test] + /// Create two structurally identical ireps and check that they get the same number. + fn test_irep_numbering_eq() { + let mut numbering = IrepNumbering::new(); + let identifiers = vec![ + "foo", "bar", "baz", "zab", "rab", "oof", "foo", "bar", "baz", "zab", "rab", "oof", + ]; + let num1 = numbering.number_irep(&fold_with_op(&identifiers, IrepId::And)); + let num2 = numbering.number_irep(&fold_with_op(&identifiers, IrepId::And)); + assert_eq!(num1, num2); + } + + #[test] + /// Create two ireps with different named subs and check that they get different numbers. + fn test_irep_numbering_ne_named_sub() { + let mut numbering = IrepNumbering::new(); + + let identifiers1 = vec![ + "foo", "bar", "baz", "zab", "rab", "oof", "foo", "bar", "baz", "zab", "rab", "oof", + ]; + let num1 = numbering.number_irep(&fold_with_op(&identifiers1, IrepId::And)); + + let identifiers2 = vec![ + "foo", "bar", "HERE", "zab", "rab", "oof", "foo", "bar", "baz", "zab", "rab", "oof", + ]; + let num2 = numbering.number_irep(&fold_with_op(&identifiers2, IrepId::And)); + assert_ne!(num1, num2); + } + + #[test] + /// Create two ireps with different ids and check that they get different numbers. + fn test_irep_numbering_ne_id() { + let mut numbering = IrepNumbering::new(); + + let identifiers = vec![ + "foo", "bar", "baz", "zab", "rab", "oof", "foo", "bar", "baz", "zab", "rab", "oof", + ]; + let num1 = numbering.number_irep(&fold_with_op(&identifiers, IrepId::And)); + let num2 = numbering.number_irep(&fold_with_op(&identifiers, IrepId::Or)); + + assert_ne!(num1, num2); + } + + #[test] + /// Write and read back all possible u8 values. + fn test_write_u8() { + let mut vec: Vec = Vec::new(); + let mut serializer = GotoBinarySerializer::new(&mut vec); + + // write all possible u8 values + for u in std::u8::MIN..std::u8::MAX { + serializer.write_u8(u).unwrap(); + } + serializer.flush().unwrap(); + + // read back from byte stream + for u in std::u8::MIN..std::u8::MAX { + assert_eq!(vec[u as usize], u); + } + } + + #[test] + /// Write and read back usize values covering the whole usize bit-width. + fn test_write_usize() { + // Generate all powers of two to cover the whole bitwidth + let mut powers_of_two: Vec = Vec::new(); + powers_of_two.push(0); + for i in 0..usize::BITS { + let num = 1usize << i; + powers_of_two.push(num); + } + powers_of_two.push(usize::MAX); + + // Serialize using variable length encoding + let mut vec: Vec = Vec::new(); + let mut serializer = GotoBinarySerializer::new(&mut vec); + for number in powers_of_two.iter() { + serializer.write_usize_varenc(*number).unwrap(); + } + serializer.flush().unwrap(); + + // Deserialize byte stream and check equality + let mut deserializer = GotoBinaryDeserializer::new(std::io::Cursor::new(vec)); + for number in powers_of_two.iter() { + let decoded = deserializer.read_usize_varenc().unwrap(); + assert_eq!(decoded, *number); + } + } + + #[test] + /// Write and read back unique strings. + fn test_write_read_unique_string_ref() { + let strings: Vec = vec![ + "some_string".intern(), + "some other string".intern(), + "some string containing 0 and some other things".intern(), + "some string containing \\ and some other things".intern(), + "some string containing \\ and # and $ and % and \n \t and 1231231".intern(), + ]; + + // Serialize unique strings + let mut vec: Vec = Vec::new(); + let mut serializer = GotoBinarySerializer::new(&mut vec); + for string in strings.iter() { + serializer.write_string_ref(string).unwrap(); + } + serializer.flush().unwrap(); + + // Deserialize contents one by one and check equality + let mut deserializer = GotoBinaryDeserializer::new(std::io::Cursor::new(vec)); + for string in strings.iter() { + let decoded = deserializer.read_numbered_string_ref().unwrap().string; + assert_eq!(decoded, *string); + } + } + + #[test] + /// Write and read back repeated strings. + fn test_write_read_multiple_string_ref() { + let mut vec: Vec = Vec::new(); + let foo = String::from("foo").intern(); + let bar = String::from("bar").intern(); + let baz = String::from("baz").intern(); + let strings = vec![foo, bar, foo, bar, foo, baz, baz, bar, foo]; + + // Serialize the same strings several times in arbitrary order + let mut serializer = GotoBinarySerializer::new(&mut vec); + for string in strings.iter() { + serializer.write_string_ref(&string).unwrap(); + } + println!("Serializer stats {:?}", serializer.get_stats()); + + // Deserialize the byte stream and check equality + let mut deserializer = GotoBinaryDeserializer::new(std::io::Cursor::new(vec)); + for string in strings.iter() { + let decoded = deserializer.read_numbered_string_ref().unwrap().string; + assert_eq!(decoded.to_string(), string.to_string()); + } + println!("Deserializer stats {:?}", deserializer.get_stats()); + } + + #[test] + /// Write and read back distinct ireps. + fn test_write_irep_ref() { + let identifiers1 = vec!["foo", "bar", "baz", "same", "zab", "rab", "oof"]; + let irep1 = &fold_with_op(&identifiers1, IrepId::And); + + let mut vec: Vec = Vec::new(); + let mut serializer = GotoBinarySerializer::new(&mut vec); + + // Number an irep + let num1 = serializer.numbering.number_irep(&irep1); + + // Number an structurally different irep + let identifiers2 = vec!["foo", "bar", "baz", "different", "zab", "rab", "oof"]; + let irep2 = &fold_with_op(&identifiers2, IrepId::And); + let num2 = serializer.numbering.number_irep(&irep2); + + // Check that they have the different numbers. + assert_ne!(num1, num2); + + // write both numbered ireps + serializer.write_numbered_irep_ref(&num1).unwrap(); + serializer.write_numbered_irep_ref(&num2).unwrap(); + + // check that the serializer knows it wrote the same irep twice + assert!(serializer.irep_count[num1.number] == 1); + assert!(serializer.irep_count[num2.number] == 1); + println!("Serializer stats {:?}", serializer.get_stats()); + + // Deserialize two ireps from the byte stream + let mut deserializer = GotoBinaryDeserializer::new(std::io::Cursor::new(vec)); + let num3 = deserializer.read_numbered_irep_ref().unwrap(); + let num4 = deserializer.read_numbered_irep_ref().unwrap(); + // println!("Deserializer stats {:?}", deserializer.get_stats()); + + // Check that they have different numbers. + assert_ne!(num3, num4); + } + + #[test] + /// Write and read back several identical ireps. + fn test_write_read_irep_ref() { + let identifiers = vec![ + "foo", "bar", "baz", "zab", "rab", "oof", "foo", "bar", "baz", "zab", "rab", "oof", + ]; + + let mut vec: Vec = Vec::new(); + { + // Write two structurally identical ireps + let mut serializer = GotoBinarySerializer::new(&mut vec); + let irep1 = &fold_with_op(&identifiers, IrepId::And); + let irep2 = &fold_with_op(&identifiers, IrepId::And); + serializer.write_irep_ref(irep1).unwrap(); + serializer.write_irep_ref(irep2).unwrap(); + serializer.write_irep_ref(irep1).unwrap(); + serializer.write_irep_ref(irep2).unwrap(); + serializer.write_irep_ref(irep1).unwrap(); + serializer.write_irep_ref(irep1).unwrap(); + println!("Serializer stats {:?}", serializer.get_stats()); + } + + { + // Deserialize the byte stream and check that we get the same numbered ireps + let mut deserializer = GotoBinaryDeserializer::new(std::io::Cursor::new(vec)); + let irep1 = deserializer.read_numbered_irep_ref().unwrap(); + let irep2 = deserializer.read_numbered_irep_ref().unwrap(); + let irep3 = deserializer.read_numbered_irep_ref().unwrap(); + let irep4 = deserializer.read_numbered_irep_ref().unwrap(); + let irep5 = deserializer.read_numbered_irep_ref().unwrap(); + let irep6 = deserializer.read_numbered_irep_ref().unwrap(); + println!("Deserializer stats {:?}", deserializer.get_stats()); + assert_eq!(irep1, irep2); + assert_eq!(irep1, irep3); + assert_eq!(irep1, irep4); + assert_eq!(irep1, irep5); + assert_eq!(irep1, irep6); + } + } +} diff --git a/cprover_bindings/src/irep/mod.rs b/cprover_bindings/src/irep/mod.rs index 8e1513686e37..29c585bdb42b 100644 --- a/cprover_bindings/src/irep/mod.rs +++ b/cprover_bindings/src/irep/mod.rs @@ -14,6 +14,7 @@ //! TODO: Parser for json symbol tables into the internal irep format //! TODO: Investigate memory usage, and consider using sharing to reduce memory usage +pub mod goto_binary_serde; #[allow(clippy::module_inception)] mod irep; mod irep_id; diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index 60df0b0faa31..545c7ea4a270 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -14,6 +14,9 @@ - [Nondeterministic variables](./tutorial-nondeterministic-variables.md) - [Debugging verification failures](./debugging-verification-failures.md) +- [Reference](./reference.md) + - [Stubbing](./reference/stubbing.md) + - [Application](./application.md) - [Comparison with other tools](./tool-comparison.md) - [Where to start on real code](./tutorial-real-code.md) @@ -37,3 +40,7 @@ - [Overrides](./overrides.md) - [Crates Documentation](./crates/index.md) + +--- + +- [FAQ](./faq.md) diff --git a/docs/src/application.md b/docs/src/application.md index e41b75050549..fb527679e2c6 100644 --- a/docs/src/application.md +++ b/docs/src/application.md @@ -6,9 +6,8 @@ You may be interested in applying Kani if you're in this situation: 2. You've already invested heavily in testing to ensure correctness. 3. You want to invest further, to gain a much higher degree of assurance. -> If you haven't already, we recommend techniques like property testing (e.g. with [`proptest`](https://github.com/AltSysrq/proptest)) before attempting model checking. -> These yield good results, are very cheap to apply, and are often easier to adopt and debug. -> Kani is a next step: a tool that can be applied once cheaper tactics are no longer yielding results, or once the easier to detect issues have already been dealt with. +> If you haven't already, we also recommend techniques like property testing and fuzzing (e.g. with [`bolero`](https://github.com/camshaft/bolero/)). +> These yield good results, are very cheap to apply, and are often easy to adopt and debug. In this section, we explain [how Kani compares with other tools](./tool-comparison.md) and suggest [where to start applying Kani in real code](./tutorial-real-code.md). diff --git a/docs/src/conventions.md b/docs/src/conventions.md index 31ee2e1ed242..cedce17445e9 100644 --- a/docs/src/conventions.md +++ b/docs/src/conventions.md @@ -70,11 +70,15 @@ Make sure to add user-friendly errors for constructs that we can't handle. For example, Kani cannot handle the panic unwind strategy, and it will fail compilation if the crate uses this configuration. +In general, it's preferred that error messages follow [these guidelines](https://rustc-dev-guide.rust-lang.org/diagnostics.html#diagnostic-output-style-guide) used for `rustc` development. +If the errors are being emitted from `kani-compiler`, you should use the compiler error message utilities (e.g., the `Session::span_err` method). However, if the +errors are being emitted from `kani-driver`, you should use the functions provided in the `util` module in `kani-driver`. + ### Internal compiler errors Even though this doesn't provide users the best experience, you are encouraged to add checks in the compiler for any assumptions you make during development. -Those check can be on the form of `assert!()` or `unreachable!()` +Those checks can be on the form of `assert!()` or `unreachable!()` statement. Please provide a meaningful message to help user understand why something failed, and try to explain, at least with a comment, why this is the case. diff --git a/docs/src/faq.md b/docs/src/faq.md new file mode 100644 index 000000000000..2306320b7512 --- /dev/null +++ b/docs/src/faq.md @@ -0,0 +1,59 @@ +# FAQs + +This section collects frequently asked questions about Kani. +Please consider [opening an issue](https://github.com/model-checking/kani/issues/new/choose) if you have a question that would like to see here. + +## Questions + +
+Kani doesn't fail after kani::assume(false). Why? +
+ +`kani::assume(false)` (or `kani::assume(cond)` where `cond` is a condition that results in `false` in the context of the program), won't cause errors in Kani. +Instead, such an assumption has the effect of blocking all the symbolic execution paths from the assumption. +Therefore, all checks after the assumption should appear as [`UNREACHABLE`](#../../verification-results.md). +That's the expected behavior for `kani::assume(false)` in Kani. + +If you didn't expect certain checks in a harness to be `UNREACHABLE`, we recommend using the [`kani::cover` macro](#../../verification-results.md#cover-property-results) to determine what conditions are possible in case you've over-constrained the harness. +
+ +
+I implemented the kani::Arbitrary trait for a type that's not from my crate, and got the error +only traits defined in the current crate can be implemented for types defined outside of the crate. +What does this mean? What can I do? +
+ +This error is due to a violation of Rust's orphan rules for trait implementations, which are explained [here](https://doc.rust-lang.org/error_codes/E0117.html). +In that case, you'll need to write a function that builds an object from non-deterministic variables. +Inside this function you would simply return an arbitrary value by generating arbitrary values for its components. + +For example, let's assume the type you're working with is this enum: + +```rust +#[derive(Copy, Clone)] +pub enum Rating { + One, + Two, + Three, +} +``` + +Then, you can match on a non-deterministic integer (supplied by `kani::any`) to return non-deterministic `Rating` variants: + +```rust + pub fn any_rating() -> Rating { + match kani::any() { + 0 => Rating::One, + 1 => Rating::Two, + _ => Rating::Three, + } + } +``` + +More details about this option, which also useful in other cases, can be found [here](https://model-checking.github.io/kani/tutorial-nondeterministic-variables.html#custom-nondeterministic-types). + +If the type comes from `std` (Rust's standard library), you can [open a request](https://github.com/model-checking/kani/issues/new?assignees=&labels=%5BC%5D+Feature+%2F+Enhancement&template=feature_request.md&title=) for adding `Arbitrary` implementations to the Kani library. +Otherwise, there are more involved options to consider: + 1. Importing a copy of the external crate that defines the type, then implement `Arbitrary` there. + 2. Contributing the `Arbitrary` implementation to the external crate that defines the type. +
diff --git a/docs/src/getting-started.md b/docs/src/getting-started.md index 3f43d31f29d9..4377c0b91106 100644 --- a/docs/src/getting-started.md +++ b/docs/src/getting-started.md @@ -1,9 +1,11 @@ # Getting started -Kani is an open-source verification tool that uses automated reasoning to analyze Rust programs. +Kani is an open-source verification tool that uses [model checking](./tool-comparison.md) to analyze Rust programs. Kani is particularly useful for verifying unsafe code in Rust, where many of the Rust’s usual guarantees are no longer checked by the compiler. Some example properties you can prove with Kani include memory safety properties (e.g., null pointer dereferences, use-after-free, etc.), the absence of certain runtime errors (i.e., index out of bounds, panics), and the absence of some types of unexpected behavior (e.g., arithmetic overflows). Kani can also prove custom properties provided in the form of user-specified assertions. +As Kani uses model checking, Kani will either prove the property, disprove the +property (with a counterexample), or may run out of resources. Kani uses proof harnesses to analyze programs. Proof harnesses are similar to test harnesses, especially property-based test harnesses. @@ -15,7 +17,7 @@ Releases are published [here](https://github.com/model-checking/kani/releases). Major changes to Kani are documented in the [RFC Book](https://model-checking.github.io/kani/rfc). There is support for a fair amount of Rust language features, but not all (e.g., concurrency). -Please see [Limitations - Rust feature support](./rust-feature-support.md) for a detailed list of supported features. +Please see [Limitations](./limitations.md) for a detailed list of supported features. Kani releases every two weeks. As part of every release, Kani will synchronize with a recent nightly release of Rust, and so is generally up-to-date with the latest Rust language features. diff --git a/docs/src/limitations.md b/docs/src/limitations.md index 993d7cecd48c..46be19c73fb6 100644 --- a/docs/src/limitations.md +++ b/docs/src/limitations.md @@ -1,7 +1,8 @@ # Limitations Like other tools, Kani comes with some limitations. In some cases, these -limitations are inherent because of the techniques it's based on. In other +limitations are inherent because of the techniques it's based on, or the +undecidability of the properties that Kani seeks to prove. In other cases, it's just a matter of time and effort to remove these limitations (e.g., specific unsupported Rust language features). diff --git a/docs/src/reference.md b/docs/src/reference.md new file mode 100644 index 000000000000..b5c637d4c9e2 --- /dev/null +++ b/docs/src/reference.md @@ -0,0 +1,4 @@ +# Reference + +This section is the main reference for Kani. +It contains sections that informally describe its main features. diff --git a/docs/src/reference/stubbing.md b/docs/src/reference/stubbing.md new file mode 100644 index 000000000000..c919b3c2f168 --- /dev/null +++ b/docs/src/reference/stubbing.md @@ -0,0 +1,212 @@ +# Stubbing + +Stubbing (or mocking) is an unstable feature which allows users to specify that certain items should be replaced with stubs (mocks) of those items during verification. +At present, the only items where stubbing can be applied are functions and methods (see [limitations](#limitations) for more details). + +## When to consider stubbing + +In general, we have identified three reasons where users may consider stubbing: + - **Unsupported features:** The code under verification contains features that Kani does not support, such as inline assembly. + - **Bad performance:** The code under verification contains features that Kani supports, but it leads to bad verification performance (for example, deserialization code). + - **Compositional reasoning:** The code under verification contains code that has been verified separately. + Stubbing the code that has already been verified with a less complex version that mimics its behavior can result in reduced verification workloads. + +In most cases, stubbing enables users to verify code that otherwise would be impractical to verify. +Although definitions for *mocking* (normally used in testing) and *stubbing* may slightly differ depending on who you ask, we often use both terms interchangeably. + +## Components + +The stubbing feature can be enabled by using the `--enable-stubbing` option when calling Kani. +Since it's an unstable feature, it requires passing the `--enable-unstable` option in addition to `--enable-stubbing`. + +At present, the only component of the stubbing feature is [the `#[kani::stub(, )]` attribute](#the-kanistub-attribute), +which allows you to specify the pair of functions/methods that must be stubbed in a harness. + + + +## The `#[kani::stub(...)]` attribute + +The stub attribute `#[kani::stub(, )]` is the main tool of the stubbing feature. + +It indicates to Kani that the function/method with name `` should be replaced with the function/method with name `` during the compilation step. +The names of these functions/methods are **resolved using Rust's standard name resolution rules**. +This includes support for imports like `use foo::bar as baz`, as well as imports of multiple versions of the same crate. + +**This attribute must be specified on a per-harness basis**. This provides a high degree of flexibility for users, since they are given the option to stub the same item with different replacements (or not use stubbing at all) depending on the proof harness. In addition, **the attribute can be specified multiple times per harness**, so that multiple (non-conflicting) stub pairings are supported. + +### An example: stubbing `random` + +Let's see a simple example where we use the [`rand::random`](https://docs.rs/rand/latest/rand/fn.random.html) function +to generate an encryption key. + +```rust +#[cfg(kani)] +#[kani::proof] +fn encrypt_then_decrypt_is_identity() { + let data: u32 = kani::any(); + let encryption_key: u32 = rand::random(); + let encrypted_data = data ^ encryption_key; + let decrypted_data = encrypted_data ^ encryption_key; + assert_eq!(data, decrypted_data); +} + +``` + +At present, Kani fails to verify this example due to [issue #1781](https://github.com/model-checking/kani/issues/1781). + +However, we can work around this limitation thanks to the stubbing feature: + +```rust +#[cfg(kani)] +fn mock_random() -> T { + kani::any() +} + +#[cfg(kani)] +#[kani::proof] +#[kani::stub(rand::random, mock_random)] +fn encrypt_then_decrypt_is_identity() { + let data: u32 = kani::any(); + let encryption_key: u32 = rand::random(); + let encrypted_data = data ^ encryption_key; + let decrypted_data = encrypted_data ^ encryption_key; + assert_eq!(data, decrypted_data); +} +``` + +Here, the `#[kani::stub(rand::random, mock_random)]` attribute indicates to Kani that it should replace `rand::random` with the stub `mock_random`. +Note that this is a fair assumption to do: `rand::random` is expected to return any `u32` value, just like `kani::any`. + +Now, let's run it through Kani: + +```bash +cargo kani --enable-unstable --enable-stubbing --harness random_cannot_be_zero +``` + +The verification result is composed of a single check: the assertion corresponding to `assert_eq!(data, decrypted_data)`. + +``` +RESULTS: +Check 1: encrypt_then_decrypt_is_identity.assertion.1 + - Status: SUCCESS + - Description: "assertion failed: data == decrypted_data" + - Location: src/main.rs:18:5 in function encrypt_then_decrypt_is_identity + + +SUMMARY: + ** 0 of 1 failed + +VERIFICATION:- SUCCESSFUL +``` + +Kani shows that the assertion is successful, avoiding any issues that appear if we attempt to verify the code without stubbing. + +## Limitations + +In the following, we describe all the limitations of the stubbing feature. + +### Usage restrictions + +The usage of stubbing is limited to the verification of a single harness. +Therefore, users are **required to pass the `--harness` option** when using the stubbing feature. + +In addition, this feature **isn't compatible with [concrete playback](../debugging-verification-failures.md#concrete-playback)**. + +### Support + +Support for stubbing is currently **limited to functions and methods**. All other items aren't supported. + +The following are examples of items that could be good candidates for stubbing, but aren't supported: +- Types +- Macros +- Traits +- Intrinsics + +We acknowledge that support for method stubbing isn't as ergonomic as it could be. +A common problem when attempting to define method stubs is that we don't have access to the private fields of an object (i.e., the fields in `self`). +One workaround is to use the unsafe function `std::mem::transmute`, as in this example: + + ```rust + struct Foo { + x: u32, + } + + impl Foo { + pub fn m(&self) -> u32 { + 0 + } + } + + struct MockFoo { + pub x: u32, + } + + fn mock_m(foo: &Foo) -> u32 { + let mock: &MockFoo = unsafe { std::mem::transmute(foo) }; + return mock.x; + } + + #[cfg(kani)] + #[kani::proof] + #[kani::stub(Foo::m, mock_m)] + fn my_harness() { ... } + ``` + +However, this isn't recommended since it's unsafe and error-prone. +In general, we don't recommend stubbing for private functions/methods. +Doing so can lead to brittle proofs: private functions/methods are subject to change or removal even in version minor upgrades (they aren't part of the APIs). +Therefore, proofs that rely on stubbing for private functions/methods might incur a high maintenance burden. + +## Error conditions + +Given a set of `original`-`replacement` pairs, Kani will exit with an error if: + 1. a specified `original` function does not exist; + 2. a specified `replacement` stub does not exist; + 3. the user specifies conflicting stubs for the same harness (e.g., if the same `original` function is mapped to multiple `replacement` functions); or + 4. the signature of the `replacement` stub is not compatible with the signature of the `original` function/method (see next section). + +### Stub compatibility and validation + +We consider a stub and a function/method to be compatible if all the following conditions are met: + +- They have the same number of parameters. +- They have the same return type. +- Each parameter in the stub has the same type as the corresponding parameter in the original function/method. +- The stub must have the same number of generic parameters as the original function/method. +However, a generic parameter in the stub is allowed to have a different name than the corresponding parameter in the original function/method. +For example, the stub `bar(x: A, y: B) -> B` is considered to have a type compatible with the function `foo(x: S, y: T) -> T`. +- The bounds for each type parameter don't need to match; however, all calls to the original function must also satisfy the bounds of the stub. + +The final point is the most subtle. +We don't require that a type parameter in the signature of the stub implements the same traits as the corresponding type parameter in the signature of the original function/method. +However, Kani will reject a stub if a trait mismatch leads to a situation where a statically dispatched call to a trait method cannot be resolved during monomorphization. +For example, this restriction rules out the following harness: + +```rust +fn foo(_x: T) -> bool { + false +} + +trait DoIt { + fn do_it(&self) -> bool; +} + +fn bar(x: T) -> bool { + x.do_it() +} + +#[kani::proof] +#[kani::stub(foo, bar)] +fn harness() { + assert!(foo("hello")); +} +``` + +The call to the trait method `DoIt::do_it` is unresolvable in the stub `bar` when the type parameter `T` is instantiated with the type `&str`. +On the other hand, this approach provides some flexibility, such as allowing our earlier example of mocking `rand::random`: +both `rand::random` and `my_random` have type `() -> T`, but in the first case `T` is restricted such that the type `Standard` implements `Distribution`, +whereas in the latter case `T` has to implement `kani::Arbitrary`. +This trait mismatch is allowed because at this call site `T` is instantiated with `u32`, which implements `kani::Arbitrary`. diff --git a/docs/src/usage.md b/docs/src/usage.md index b9131f3832fb..77def63d3651 100644 --- a/docs/src/usage.md +++ b/docs/src/usage.md @@ -28,7 +28,7 @@ Common to both `kani` and `cargo kani` are many command-line flags: If used with `print`, Kani will only print the unit test to stdout. If used with `inplace`, Kani will automatically add the unit test to the user's source code, next to the proof harness. For more detailed instructions, see the [debugging verification failures](./debugging-verification-failures.md) section. - * `--visualize`: Generates an HTML report showing coverage information and providing traces (i.e., counterexamples) for each failure found by Kani. + * `--visualize`: _Experimental_, `--enable-unstable` feature that generates an HTML report providing traces (i.e., counterexamples) for each failure found by Kani. * `--tests`: Build in "[test mode](https://doc.rust-lang.org/rustc/tests/index.html)", i.e. with `cfg(test)` set and `dev-dependencies` available (when using `cargo kani`). diff --git a/kani-compiler/Cargo.toml b/kani-compiler/Cargo.toml index 66f3459a4790..fd12c16c79db 100644 --- a/kani-compiler/Cargo.toml +++ b/kani-compiler/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "kani-compiler" -version = "0.19.0" +version = "0.23.0" edition = "2021" license = "MIT OR Apache-2.0" publish = false @@ -11,12 +11,12 @@ publish = false [dependencies] ar = { version = "0.9.0", optional = true } atty = "0.2.14" -bitflags = { version = "1.0", optional = true } cbmc = { path = "../cprover_bindings", package = "cprover_bindings", optional = true } -clap = { version = "4.0.25", features = ["cargo"] } +clap = { version = "4.1.3", features = ["cargo"] } home = "0.5" +itertools = "0.10" kani_queries = {path = "kani_queries"} -kani_metadata = { path = "../kani_metadata", optional = true } +kani_metadata = {path = "../kani_metadata"} lazy_static = "1.4.0" libc = { version = "0.2", optional = true } num = { version = "0.4.0", optional = true } @@ -35,9 +35,10 @@ tracing-tree = "0.2.2" # Future proofing: enable backend dependencies using feature. [features] default = ['cprover'] -cprover = ['ar', 'bitflags', 'cbmc', 'kani_metadata', 'libc', 'num', 'object', 'rustc-demangle', 'serde', +cprover = ['ar', 'cbmc', 'libc', 'num', 'object', 'rustc-demangle', 'serde', 'serde_json', "strum", "strum_macros"] unsound_experiments = ["kani_queries/unsound_experiments"] +write_json_symtab = [] [package.metadata.rust-analyzer] # This package uses rustc crates. diff --git a/kani-compiler/kani_queries/Cargo.toml b/kani-compiler/kani_queries/Cargo.toml index e7fc6a0cc92f..60b2c9e2bf01 100644 --- a/kani-compiler/kani_queries/Cargo.toml +++ b/kani-compiler/kani_queries/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "kani_queries" -version = "0.19.0" +version = "0.23.0" edition = "2021" license = "MIT OR Apache-2.0" publish = false diff --git a/kani-compiler/kani_queries/src/lib.rs b/kani-compiler/kani_queries/src/lib.rs index b3993595eb00..11df541e3b03 100644 --- a/kani-compiler/kani_queries/src/lib.rs +++ b/kani-compiler/kani_queries/src/lib.rs @@ -1,19 +1,14 @@ // Copyright Kani Contributors // SPDX-License-Identifier: Apache-2.0 OR MIT -use std::sync::atomic::{AtomicBool, Ordering}; -#[cfg(not(feature = "unsound_experiments"))] -use std::sync::Mutex; +use std::sync::{Arc, Mutex}; use strum_macros::{AsRefStr, EnumString, EnumVariantNames}; #[cfg(feature = "unsound_experiments")] mod unsound_experiments; #[cfg(feature = "unsound_experiments")] -use { - crate::unsound_experiments::UnsoundExperiments, - std::sync::{Arc, Mutex}, -}; +use crate::unsound_experiments::UnsoundExperiments; #[derive(Debug, Default, Clone, Copy, AsRefStr, EnumString, EnumVariantNames, PartialEq, Eq)] #[strum(serialize_all = "snake_case")] @@ -44,6 +39,9 @@ pub trait UserInput { fn set_ignore_global_asm(&mut self, global_asm: bool); fn get_ignore_global_asm(&self) -> bool; + fn set_write_json_symtab(&mut self, write_json_symtab: bool); + fn get_write_json_symtab(&self) -> bool; + fn set_reachability_analysis(&mut self, reachability: ReachabilityType); fn get_reachability_analysis(&self) -> ReachabilityType; @@ -51,60 +49,81 @@ pub trait UserInput { fn get_stubbing_enabled(&self) -> bool; #[cfg(feature = "unsound_experiments")] - fn get_unsound_experiments(&self) -> Arc>; + fn get_unsound_experiments(&self) -> UnsoundExperiments; + #[cfg(feature = "unsound_experiments")] + fn set_unsound_experiments(&mut self, experiments: UnsoundExperiments); } -#[derive(Debug, Default)] +/// This structure should only be used behind a synchronized reference or a snapshot. +#[derive(Debug, Clone)] pub struct QueryDb { - check_assertion_reachability: AtomicBool, - emit_vtable_restrictions: AtomicBool, - json_pretty_print: AtomicBool, - ignore_global_asm: AtomicBool, - reachability_analysis: Mutex, + check_assertion_reachability: bool, + emit_vtable_restrictions: bool, + json_pretty_print: bool, + ignore_global_asm: bool, + /// When set, instructs the compiler to produce the symbol table for CBMC in JSON format and use symtab2gb. + write_json_symtab: bool, + reachability_analysis: ReachabilityType, stubbing_enabled: bool, #[cfg(feature = "unsound_experiments")] - unsound_experiments: Arc>, + unsound_experiments: UnsoundExperiments, +} + +impl QueryDb { + pub fn new() -> Arc> { + Arc::new(Mutex::new(QueryDb { + check_assertion_reachability: false, + emit_vtable_restrictions: false, + json_pretty_print: false, + ignore_global_asm: false, + write_json_symtab: false, + reachability_analysis: ReachabilityType::None, + stubbing_enabled: false, + #[cfg(feature = "unsound_experiments")] + unsound_experiments: unsound_experiments::UnsoundExperiments { zero_init_vars: false }, + })) + } } impl UserInput for QueryDb { fn set_emit_vtable_restrictions(&mut self, restrictions: bool) { - self.emit_vtable_restrictions.store(restrictions, Ordering::Relaxed); + self.emit_vtable_restrictions = restrictions; } fn get_emit_vtable_restrictions(&self) -> bool { - self.emit_vtable_restrictions.load(Ordering::Relaxed) + self.emit_vtable_restrictions } fn set_check_assertion_reachability(&mut self, reachability: bool) { - self.check_assertion_reachability.store(reachability, Ordering::Relaxed); + self.check_assertion_reachability = reachability; } fn get_check_assertion_reachability(&self) -> bool { - self.check_assertion_reachability.load(Ordering::Relaxed) + self.check_assertion_reachability } fn set_output_pretty_json(&mut self, pretty_json: bool) { - self.json_pretty_print.store(pretty_json, Ordering::Relaxed); + self.json_pretty_print = pretty_json; } fn get_output_pretty_json(&self) -> bool { - self.json_pretty_print.load(Ordering::Relaxed) + self.json_pretty_print } fn set_ignore_global_asm(&mut self, global_asm: bool) { - self.ignore_global_asm.store(global_asm, Ordering::Relaxed); + self.ignore_global_asm = global_asm; } fn get_ignore_global_asm(&self) -> bool { - self.ignore_global_asm.load(Ordering::Relaxed) + self.ignore_global_asm } fn set_reachability_analysis(&mut self, reachability: ReachabilityType) { - *self.reachability_analysis.get_mut().unwrap() = reachability; + self.reachability_analysis = reachability; } fn get_reachability_analysis(&self) -> ReachabilityType { - *self.reachability_analysis.lock().unwrap() + self.reachability_analysis } fn set_stubbing_enabled(&mut self, stubbing_enabled: bool) { @@ -115,8 +134,21 @@ impl UserInput for QueryDb { self.stubbing_enabled } + fn set_write_json_symtab(&mut self, write_json_symtab: bool) { + self.write_json_symtab = write_json_symtab; + } + + fn get_write_json_symtab(&self) -> bool { + self.write_json_symtab + } + + #[cfg(feature = "unsound_experiments")] + fn get_unsound_experiments(&self) -> UnsoundExperiments { + self.unsound_experiments + } + #[cfg(feature = "unsound_experiments")] - fn get_unsound_experiments(&self) -> Arc> { - self.unsound_experiments.clone() + fn set_unsound_experiments(&mut self, experiments: UnsoundExperiments) { + self.unsound_experiments = experiments } } diff --git a/kani-compiler/kani_queries/src/unsound_experiments.rs b/kani-compiler/kani_queries/src/unsound_experiments.rs index d2de9b4473fa..6d8095bf85cf 100644 --- a/kani-compiler/kani_queries/src/unsound_experiments.rs +++ b/kani-compiler/kani_queries/src/unsound_experiments.rs @@ -3,7 +3,7 @@ #![cfg(feature = "unsound_experiments")] -#[derive(Debug, Default)] +#[derive(Debug, Clone, Copy, Default)] pub struct UnsoundExperiments { /// Zero initilize variables. /// This is useful for experiments to see whether assigning constant values produces better diff --git a/kani-compiler/src/codegen_cprover_gotoc/codegen/block.rs b/kani-compiler/src/codegen_cprover_gotoc/codegen/block.rs index 9298c8851eea..b015be5b401c 100644 --- a/kani-compiler/src/codegen_cprover_gotoc/codegen/block.rs +++ b/kani-compiler/src/codegen_cprover_gotoc/codegen/block.rs @@ -3,6 +3,7 @@ use crate::codegen_cprover_gotoc::GotocCtx; use rustc_middle::mir::{BasicBlock, BasicBlockData}; +use tracing::debug; impl<'tcx> GotocCtx<'tcx> { /// Generates Goto-C for a basic block. @@ -12,6 +13,7 @@ impl<'tcx> GotocCtx<'tcx> { /// This function does not return a value, but mutates state with /// `self.current_fn_mut().push_onto_block(...)` pub fn codegen_block(&mut self, bb: BasicBlock, bbd: &BasicBlockData<'tcx>) { + debug!(?bb, "Codegen basicblock"); self.current_fn_mut().set_current_bb(bb); let label: String = self.current_fn().find_label(&bb); // the first statement should be labelled. if there is no statements, then the diff --git a/kani-compiler/src/codegen_cprover_gotoc/codegen/function.rs b/kani-compiler/src/codegen_cprover_gotoc/codegen/function.rs index ced7e00479c8..b7c98a4a2314 100644 --- a/kani-compiler/src/codegen_cprover_gotoc/codegen/function.rs +++ b/kani-compiler/src/codegen_cprover_gotoc/codegen/function.rs @@ -4,19 +4,14 @@ //! This file contains functions related to codegenning MIR functions into gotoc use crate::codegen_cprover_gotoc::GotocCtx; -use crate::kani_middle::attributes::{extract_integer_argument, partition_kanitool_attributes}; +use crate::kani_middle::attributes::{extract_harness_attributes, is_test_harness_closure}; use cbmc::goto_program::{Expr, Stmt, Symbol}; use cbmc::InternString; -use kani_metadata::HarnessMetadata; -use kani_queries::UserInput; -use rustc_ast::Attribute; -use rustc_hir::def::DefKind; -use rustc_hir::def_id::DefId; +use kani_metadata::{HarnessAttributes, HarnessMetadata}; +use rustc_middle::mir::traversal::reverse_postorder; use rustc_middle::mir::{Body, HasLocalDecls, Local}; -use rustc_middle::ty::layout::FnAbiOf; use rustc_middle::ty::{self, Instance}; use std::collections::BTreeMap; -use std::convert::TryInto; use std::iter::FromIterator; use tracing::{debug, debug_span}; @@ -88,14 +83,14 @@ impl<'tcx> GotocCtx<'tcx> { self.codegen_function_prelude(); self.codegen_declare_variables(); - mir.basic_blocks.iter_enumerated().for_each(|(bb, bbd)| self.codegen_block(bb, bbd)); + reverse_postorder(mir).for_each(|(bb, bbd)| self.codegen_block(bb, bbd)); let loc = self.codegen_span(&mir.span); let stmts = self.current_fn_mut().extract_block(); let body = Stmt::block(stmts, loc); self.symbol_table.update_fn_declaration_with_definition(&name, body); - self.handle_kanitool_attributes(); + self.record_kani_attributes(); self.record_test_harness_metadata(); } self.reset_current_fn(); @@ -250,90 +245,6 @@ impl<'tcx> GotocCtx<'tcx> { self.reset_current_fn(); } - /// Check that if an item is tagged with a proof_attribute, it is a valid harness. - fn check_proof_attribute(&self, def_id: DefId, proof_attributes: Vec<&Attribute>) { - assert!(!proof_attributes.is_empty()); - let span = proof_attributes.first().unwrap().span; - if proof_attributes.len() > 1 { - self.tcx.sess.span_warn(proof_attributes[0].span, "Duplicate attribute"); - } - - if self.tcx.def_kind(def_id) != DefKind::Fn { - self.tcx - .sess - .span_err(span, "The kani::proof attribute can only be applied to functions."); - } else if self.tcx.generics_of(def_id).requires_monomorphization(self.tcx) { - self.tcx - .sess - .span_err(span, "The proof attribute cannot be applied to generic functions."); - } else { - let instance = Instance::mono(self.tcx, def_id); - if !self.fn_abi_of_instance(instance, ty::List::empty()).args.is_empty() { - self.tcx - .sess - .span_err(span, "Functions used as harnesses can not have any arguments."); - } - } - } - - pub fn is_proof_harness(&self, def_id: DefId) -> bool { - let all_attributes = self.tcx.get_attrs_unchecked(def_id); - let (proof_attributes, _) = partition_kanitool_attributes(all_attributes); - !proof_attributes.is_empty() - } - - /// Check that all attributes assigned to an item is valid. - /// Errors will be added to the session. Invoke self.tcx.sess.abort_if_errors() to terminate - /// the session in case of an error. - pub fn check_attributes(&self, def_id: DefId) { - let all_attributes = self.tcx.get_attrs_unchecked(def_id); - let (proof_attributes, other_attributes) = partition_kanitool_attributes(all_attributes); - if !proof_attributes.is_empty() { - self.check_proof_attribute(def_id, proof_attributes); - } else if !other_attributes.is_empty() { - self.tcx.sess.span_err( - other_attributes[0].1.span, - format!( - "The {} attribute also requires the '#[kani::proof]' attribute", - other_attributes[0].0 - ) - .as_str(), - ); - } - } - - /// Does this `def_id` have `#[rustc_test_marker]`? - pub fn is_test_harness_description(&self, def_id: DefId) -> bool { - let attrs = self.tcx.get_attrs_unchecked(def_id); - - self.tcx.sess.contains_name(attrs, rustc_span::symbol::sym::rustc_test_marker) - } - /// Is this the closure inside of a test description const (i.e. macro expanded from a `#[test]`)? - /// - /// We're trying to detect the closure (`||`) inside code like: - /// - /// ```ignore - /// #[rustc_test_marker] - /// pub const check_2: test::TestDescAndFn = test::TestDescAndFn { - /// desc: ..., - /// testfn: test::StaticTestFn(|| test::assert_test_result(check_2())), - /// }; - /// ``` - pub fn is_test_harness_closure(&self, def_id: DefId) -> bool { - if !def_id.is_local() { - return false; - } - - let local_def_id = def_id.expect_local(); - let hir_id = self.tcx.hir().local_def_id_to_hir_id(local_def_id); - - // The parent item of the closure appears to reliably be the `const` declaration item. - let parent_id = self.tcx.hir().get_parent_item(hir_id); - let parent_def_id = parent_id.to_def_id(); - - self.is_test_harness_description(parent_def_id) - } - /// We record test harness information in kani-metadata, just like we record /// proof harness information. This is used to support e.g. cargo-kani assess. /// @@ -343,19 +254,8 @@ impl<'tcx> GotocCtx<'tcx> { /// as it add asserts for tests that return `Result` types. fn record_test_harness_metadata(&mut self) { let def_id = self.current_fn().instance().def_id(); - if self.is_test_harness_closure(def_id) { - let loc = self.codegen_span(&self.current_fn().mir().span); - self.test_harnesses.push(HarnessMetadata { - pretty_name: self.current_fn().readable_name().to_owned(), - mangled_name: self.current_fn().name(), - crate_name: self.current_fn().krate(), - original_file: loc.filename().unwrap(), - original_start_line: loc.start_line().unwrap() as usize, - original_end_line: loc.end_line().unwrap() as usize, - unwind_value: None, - // We record the actual path after codegen before we dump the metadata into a file. - goto_file: None, - }) + if is_test_harness_closure(self.tcx, def_id) { + self.test_harnesses.push(self.generate_metadata(None)) } } @@ -363,42 +263,16 @@ impl<'tcx> GotocCtx<'tcx> { /// attributes. /// /// Handle all attributes i.e. `#[kani::x]` (which kani_macros translates to `#[kanitool::x]` for us to handle here) - fn handle_kanitool_attributes(&mut self) { + fn record_kani_attributes(&mut self) { let def_id = self.current_fn().instance().def_id(); - let all_attributes = self.tcx.get_attrs_unchecked(def_id); - let (proof_attributes, other_attributes) = partition_kanitool_attributes(all_attributes); - if !proof_attributes.is_empty() { - self.create_proof_harness(other_attributes); - } - } - - /// Create the proof harness struct using the handler methods for various attributes - fn create_proof_harness(&mut self, other_attributes: Vec<(String, &Attribute)>) { - let mut harness = self.default_kanitool_proof(); - for attr in other_attributes.iter() { - match attr.0.as_str() { - "stub" => { - if !self.queries.get_stubbing_enabled() { - self.tcx.sess.span_warn( - attr.1.span, - "Stubbing is not enabled; attribute `kani::stub` will be ignored", - ) - } - } - "unwind" => self.handle_kanitool_unwind(attr.1, &mut harness), - _ => { - self.tcx.sess.span_err( - attr.1.span, - format!("Unsupported Annotation -> {}", attr.0.as_str()).as_str(), - ); - } - } + let attributes = extract_harness_attributes(self.tcx, def_id); + if attributes.is_some() { + self.proof_harnesses.push(self.generate_metadata(attributes)); } - self.proof_harnesses.push(harness); } /// Create the default proof harness for the current function - fn default_kanitool_proof(&mut self) -> HarnessMetadata { + fn generate_metadata(&self, attributes: Option) -> HarnessMetadata { let current_fn = self.current_fn(); let pretty_name = current_fn.readable_name().to_owned(); let mangled_name = current_fn.name(); @@ -411,37 +285,9 @@ impl<'tcx> GotocCtx<'tcx> { original_file: loc.filename().unwrap(), original_start_line: loc.start_line().unwrap() as usize, original_end_line: loc.end_line().unwrap() as usize, - unwind_value: None, + attributes: attributes.unwrap_or_default(), // We record the actual path after codegen before we dump the metadata into a file. goto_file: None, } } - - /// Updates the proof harness with new unwind value - fn handle_kanitool_unwind(&mut self, attr: &Attribute, harness: &mut HarnessMetadata) { - // If some unwind value already exists, then the current unwind being handled is a duplicate - if harness.unwind_value.is_some() { - self.tcx.sess.span_err(attr.span, "Only one '#[kani::unwind]' allowed"); - return; - } - // Get Attribute value and if it's not none, assign it to the metadata - match extract_integer_argument(attr) { - None => { - // There are no integers or too many arguments given to the attribute - self.tcx - .sess - .span_err(attr.span, "Exactly one Unwind Argument as Integer accepted"); - } - Some(unwind_integer_value) => { - let val: Result = unwind_integer_value.try_into(); - if val.is_err() { - self.tcx - .sess - .span_err(attr.span, "Value above maximum permitted value - u32::MAX"); - return; - } - harness.unwind_value = Some(val.unwrap()); - } - } - } } diff --git a/kani-compiler/src/codegen_cprover_gotoc/compiler_interface.rs b/kani-compiler/src/codegen_cprover_gotoc/compiler_interface.rs index 406a92e587ea..100fba0ef281 100644 --- a/kani-compiler/src/codegen_cprover_gotoc/compiler_interface.rs +++ b/kani-compiler/src/codegen_cprover_gotoc/compiler_interface.rs @@ -5,13 +5,17 @@ use crate::codegen_cprover_gotoc::archive::ArchiveBuilder; use crate::codegen_cprover_gotoc::GotocCtx; +use crate::kani_middle::attributes::is_proof_harness; +use crate::kani_middle::attributes::is_test_harness_description; +use crate::kani_middle::check_crate_items; use crate::kani_middle::provide; use crate::kani_middle::reachability::{ collect_reachable_items, filter_closures_in_const_crate_items, filter_crate_items, }; -use bitflags::_core::any::Any; use cbmc::goto_program::Location; +use cbmc::irep::goto_binary_serde::write_goto_binary_file; use cbmc::{InternedString, MachineModel}; +use kani_metadata::CompilerArtifactStub; use kani_metadata::{ArtifactType, HarnessMetadata, KaniMetadata}; use kani_queries::{QueryDb, ReachabilityType, UserInput}; use rustc_codegen_ssa::back::metadata::create_wrapper_file; @@ -20,7 +24,6 @@ use rustc_codegen_ssa::{CodegenResults, CrateInfo}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::temp_dir::MaybeTempDir; use rustc_errors::ErrorGuaranteed; -use rustc_hir::def::DefKind; use rustc_hir::def_id::LOCAL_CRATE; use rustc_metadata::fs::{emit_wrapper_file, METADATA_FILENAME}; use rustc_metadata::EncodedMetadata; @@ -36,6 +39,7 @@ use rustc_session::Session; use rustc_span::def_id::DefId; use rustc_target::abi::Endian; use rustc_target::spec::PanicStrategy; +use std::any::Any; use std::collections::BTreeMap; use std::ffi::OsString; use std::fmt::Write; @@ -45,19 +49,23 @@ use std::io::Write as IoWrite; use std::iter::FromIterator; use std::path::{Path, PathBuf}; use std::process::Command; -use std::rc::Rc; +use std::sync::{Arc, Mutex}; use std::time::Instant; use tempfile::Builder as TempFileBuilder; use tracing::{debug, error, info}; #[derive(Clone)] pub struct GotocCodegenBackend { - queries: Rc, + /// The query is shared with `KaniCompiler` and it is initialized as part of `rustc` + /// initialization, which may happen after this object is created. + /// Since we don't have any guarantees on when the compiler creates the Backend object, neither + /// in which thread it will be used, we prefer to explicitly synchronize any query access. + queries: Arc>, } impl GotocCodegenBackend { - pub fn new(queries: &Rc) -> Self { - GotocCodegenBackend { queries: Rc::clone(queries) } + pub fn new(queries: Arc>) -> Self { + GotocCodegenBackend { queries } } } @@ -67,13 +75,17 @@ impl CodegenBackend for GotocCodegenBackend { } fn provide(&self, providers: &mut Providers) { - provide::provide(providers, &self.queries); + provide::provide(providers, &self.queries.lock().unwrap()); } fn provide_extern(&self, providers: &mut ty::query::ExternProviders) { provide::provide_extern(providers); } + fn print_version(&self) { + println!("Kani-goto version: {}", env!("CARGO_PKG_VERSION")); + } + fn codegen_crate( &self, tcx: TyCtxt, @@ -84,10 +96,10 @@ impl CodegenBackend for GotocCodegenBackend { // Follow rustc naming convention (cx is abbrev for context). // https://rustc-dev-guide.rust-lang.org/conventions.html#naming-conventions - let mut gcx = GotocCtx::new(tcx, self.queries.clone()); + let mut gcx = GotocCtx::new(tcx, (*self.queries.lock().unwrap()).clone()); check_target(tcx.sess); check_options(tcx.sess); - check_crate_items(&gcx); + check_crate_items(gcx.tcx, gcx.queries.get_ignore_global_asm()); let items = with_timer(|| collect_codegen_items(&gcx), "codegen reachability analysis"); if items.is_empty() { @@ -153,6 +165,10 @@ impl CodegenBackend for GotocCodegenBackend { // Print compilation report. print_report(&gcx, tcx); + // Map from name to prettyName for all symbols + let pretty_name_map: BTreeMap> = + BTreeMap::from_iter(gcx.symbol_table.iter().map(|(k, s)| (*k, s.pretty_name))); + // Map MIR types to GotoC types let type_map: BTreeMap = BTreeMap::from_iter(gcx.type_map.iter().map(|(k, v)| (*k, v.to_string().into()))); @@ -170,15 +186,23 @@ impl CodegenBackend for GotocCodegenBackend { if !tcx.sess.opts.unstable_opts.no_codegen && tcx.sess.opts.output_types.should_codegen() { let outputs = tcx.output_filenames(()); let base_filename = outputs.output_path(OutputType::Object); - let pretty = self.queries.get_output_pretty_json(); - write_file(&base_filename, ArtifactType::SymTab, &gcx.symbol_table, pretty); + let pretty = self.queries.lock().unwrap().get_output_pretty_json(); + write_file(&base_filename, ArtifactType::PrettyNameMap, &pretty_name_map, pretty); + if gcx.queries.get_write_json_symtab() { + write_file(&base_filename, ArtifactType::SymTab, &gcx.symbol_table, pretty); + symbol_table_to_gotoc(&tcx, &base_filename); + } else { + write_goto_binary_file( + &base_filename.with_extension(ArtifactType::SymTabGoto), + &gcx.symbol_table, + ); + } write_file(&base_filename, ArtifactType::TypeMap, &type_map, pretty); write_file(&base_filename, ArtifactType::Metadata, &metadata, pretty); // If they exist, write out vtable virtual call function pointer restrictions if let Some(restrictions) = vtable_restrictions { write_file(&base_filename, ArtifactType::VTableRestriction, &restrictions, pretty); } - symbol_table_to_gotoc(&tcx, &base_filename); } codegen_results(tcx, rustc_metadata, gcx.symbol_table.machine_model()) } @@ -194,7 +218,7 @@ impl CodegenBackend for GotocCodegenBackend { .unwrap()) } - /// Emit `rlib` files during the link stage if it was requested. + /// Emit output files during the link stage if it was requested. /// /// We need to emit `rlib` files normally if requested. Cargo expects these in some /// circumstances and sends them to subsequent builds with `-L`. @@ -204,6 +228,10 @@ impl CodegenBackend for GotocCodegenBackend { /// Types such as `bin`, `cdylib`, `dylib` will trigger the native linker. /// /// Thus, we manually build the rlib file including only the `rmeta` file. + /// + /// For cases where no metadata file was requested, we stub the file requested by writing the + /// path of the `kani-metadata.json` file so `kani-driver` can safely find the latest metadata. + /// See for more details. fn link( &self, sess: &Session, @@ -211,27 +239,37 @@ impl CodegenBackend for GotocCodegenBackend { outputs: &OutputFilenames, ) -> Result<(), ErrorGuaranteed> { let requested_crate_types = sess.crate_types(); - if !requested_crate_types.contains(&CrateType::Rlib) { - // Quit successfully if we don't need an `rlib`: - return Ok(()); + for crate_type in requested_crate_types { + let out_path = out_filename( + sess, + *crate_type, + outputs, + codegen_results.crate_info.local_crate_name, + ); + debug!(?crate_type, ?out_path, "link"); + if *crate_type == CrateType::Rlib { + // Emit the `rlib` that contains just one file: `.rmeta` + let mut builder = ArchiveBuilder::new(sess); + let tmp_dir = TempFileBuilder::new().prefix("kani").tempdir().unwrap(); + let path = MaybeTempDir::new(tmp_dir, sess.opts.cg.save_temps); + let (metadata, _metadata_position) = create_wrapper_file( + sess, + b".rmeta".to_vec(), + codegen_results.metadata.raw_data(), + ); + let metadata = emit_wrapper_file(sess, &metadata, &path, METADATA_FILENAME); + builder.add_file(&metadata); + builder.build(&out_path); + } else { + // Write the location of the kani metadata file in the requested compiler output file. + let base_filename = outputs.output_path(OutputType::Object); + let content_stub = CompilerArtifactStub { + metadata_path: base_filename.with_extension(ArtifactType::Metadata), + }; + let out_file = File::create(out_path).unwrap(); + serde_json::to_writer(out_file, &content_stub).unwrap(); + } } - - // Emit the `rlib` that contains just one file: `.rmeta` - let mut builder = ArchiveBuilder::new(sess); - let tmp_dir = TempFileBuilder::new().prefix("kani").tempdir().unwrap(); - let path = MaybeTempDir::new(tmp_dir, sess.opts.cg.save_temps); - let (metadata, _metadata_position) = - create_wrapper_file(sess, b".rmeta".to_vec(), codegen_results.metadata.raw_data()); - let metadata = emit_wrapper_file(sess, &metadata, &path, METADATA_FILENAME); - builder.add_file(&metadata); - - let rlib = out_filename( - sess, - CrateType::Rlib, - outputs, - codegen_results.crate_info.local_crate_name, - ); - builder.build(&rlib); Ok(()) } } @@ -292,34 +330,6 @@ fn check_options(session: &Session) { session.abort_if_errors(); } -/// Check that all crate items are supported and there's no misconfiguration. -/// This method will exhaustively print any error / warning and it will abort at the end if any -/// error was found. -fn check_crate_items(gcx: &GotocCtx) { - let tcx = gcx.tcx; - for item in tcx.hir_crate_items(()).items() { - let def_id = item.owner_id.def_id.to_def_id(); - gcx.check_attributes(def_id); - if tcx.def_kind(def_id) == DefKind::GlobalAsm { - if !gcx.queries.get_ignore_global_asm() { - let error_msg = format!( - "Crate {} contains global ASM, which is not supported by Kani. Rerun with \ - `--enable-unstable --ignore-global-asm` to suppress this error \ - (**Verification results may be impacted**).", - gcx.short_crate_name() - ); - tcx.sess.err(&error_msg); - } else { - tcx.sess.warn(format!( - "Ignoring global ASM in crate {}. Verification results may be impacted.", - gcx.short_crate_name() - )); - } - } - } - tcx.sess.abort_if_errors(); -} - /// Prints a report at the end of the compilation. fn print_report(ctx: &GotocCtx, tcx: TyCtxt) { // Print all unsupported constructs. @@ -395,14 +405,14 @@ fn collect_codegen_items<'tcx>(gcx: &GotocCtx<'tcx>) -> Vec> { } ReachabilityType::Harnesses => { // Cross-crate collecting of all items that are reachable from the crate harnesses. - let harnesses = filter_crate_items(tcx, |_, def_id| gcx.is_proof_harness(def_id)); + let harnesses = filter_crate_items(tcx, |_, def_id| is_proof_harness(gcx.tcx, def_id)); collect_reachable_items(tcx, &harnesses).into_iter().collect() } ReachabilityType::Tests => { // We're iterating over crate items here, so what we have to codegen is the "test description" containing the // test closure that we want to execute let harnesses = filter_closures_in_const_crate_items(tcx, |_, def_id| { - gcx.is_test_harness_description(def_id) + is_test_harness_description(gcx.tcx, def_id) }); collect_reachable_items(tcx, &harnesses).into_iter().collect() } diff --git a/kani-compiler/src/codegen_cprover_gotoc/context/goto_ctx.rs b/kani-compiler/src/codegen_cprover_gotoc/context/goto_ctx.rs index cdf3c5923a6c..1d9d2fbaa403 100644 --- a/kani-compiler/src/codegen_cprover_gotoc/context/goto_ctx.rs +++ b/kani-compiler/src/codegen_cprover_gotoc/context/goto_ctx.rs @@ -42,13 +42,13 @@ use rustc_target::abi::Endian; use rustc_target::abi::{HasDataLayout, TargetDataLayout}; use rustc_target::spec::Target; use std::path::Path; -use std::rc::Rc; pub struct GotocCtx<'tcx> { /// the typing context pub tcx: TyCtxt<'tcx>, - /// the query system for kani - pub queries: Rc, + /// a snapshot of the query values. The queries shouldn't change at this point, + /// so we just keep a copy. + pub queries: QueryDb, /// the generated symbol table for gotoc pub symbol_table: SymbolTable, pub hooks: GotocHooks<'tcx>, @@ -79,7 +79,7 @@ pub struct GotocCtx<'tcx> { /// Constructor impl<'tcx> GotocCtx<'tcx> { - pub fn new(tcx: TyCtxt<'tcx>, queries: Rc) -> GotocCtx<'tcx> { + pub fn new(tcx: TyCtxt<'tcx>, queries: QueryDb) -> GotocCtx<'tcx> { let fhks = fn_hooks(); let mm = machine_model_from_session(tcx.sess); let symbol_table = SymbolTable::new(mm); @@ -131,10 +131,10 @@ impl<'tcx> GotocCtx<'tcx> { // We likely (and should) have no instances of // calling `codegen_unimplemented` without file/line. // So while we map out of `Option` here, we expect them to always be `Some` - ( - l.filename().unwrap_or_default(), - l.start_line().map(|x| x.to_string()).unwrap_or_default(), - ) + kani_metadata::Location { + filename: l.filename().unwrap_or_default(), + start_line: l.start_line().unwrap_or_default(), + } }) .collect(), }) diff --git a/kani-compiler/src/codegen_cprover_gotoc/utils/names.rs b/kani-compiler/src/codegen_cprover_gotoc/utils/names.rs index e9d809931f9e..b2c4345109f1 100644 --- a/kani-compiler/src/codegen_cprover_gotoc/utils/names.rs +++ b/kani-compiler/src/codegen_cprover_gotoc/utils/names.rs @@ -14,11 +14,6 @@ use rustc_middle::ty::{Instance, TyCtxt}; use tracing::debug; impl<'tcx> GotocCtx<'tcx> { - /// The short crate name without versioning information. - pub fn short_crate_name(&self) -> String { - self.tcx.crate_name(LOCAL_CRATE).to_string() - } - /// The full crate name including versioning info pub fn full_crate_name(&self) -> &str { &self.full_crate_name diff --git a/kani-compiler/src/kani_compiler.rs b/kani-compiler/src/kani_compiler.rs new file mode 100644 index 000000000000..8af75fb8eb1e --- /dev/null +++ b/kani-compiler/src/kani_compiler.rs @@ -0,0 +1,196 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +//! This module defines all compiler extensions that form the Kani compiler. +//! +//! The [KaniCompiler] can be used across multiple rustc driver runs ([RunCompiler::run()]), +//! which is used to implement stubs. +//! +//! In the first run, [KaniCompiler::config] will implement the compiler configuration and it will +//! also collect any stubs that may need to be applied. This method will be a no-op for any +//! subsequent runs. The [KaniCompiler] will parse options that are passed via `-C llvm-args`. +//! +//! If no stubs need to be applied, the compiler will proceed to generate goto code, and it won't +//! need any extra runs. However, if stubs are required, we will have to restart the rustc driver +//! in order to apply the stubs. For the subsequent runs, we add the stub configuration to +//! `-C llvm-args`. + +#[cfg(feature = "cprover")] +use crate::codegen_cprover_gotoc::GotocCodegenBackend; +use crate::kani_middle::stubbing; +use crate::parser::{self, KaniCompilerParser}; +use crate::session::init_session; +use clap::ArgMatches; +use itertools::Itertools; +use kani_queries::{QueryDb, ReachabilityType, UserInput}; +use rustc_codegen_ssa::traits::CodegenBackend; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_driver::{Callbacks, Compilation, RunCompiler}; +use rustc_hir::definitions::DefPathHash; +use rustc_interface::Config; +use rustc_middle::ty::TyCtxt; +use rustc_session::config::ErrorOutputType; +use std::process::ExitCode; +use std::sync::{Arc, Mutex}; +use tracing::debug; + +/// Run the Kani flavour of the compiler. +/// This may require multiple runs of the rustc driver ([RunCompiler::run]). +pub fn run(mut args: Vec) -> ExitCode { + let mut kani_compiler = KaniCompiler::new(); + while !args.is_empty() { + let queries = kani_compiler.queries.clone(); + let mut compiler = RunCompiler::new(&args, &mut kani_compiler); + compiler.set_make_codegen_backend(Some(Box::new(move |_cfg| backend(queries)))); + if compiler.run().is_err() { + return ExitCode::FAILURE; + } + + args = kani_compiler.post_process(args).unwrap_or_default(); + debug!("Finish driver run. {}", if args.is_empty() { "Done" } else { "Run again" }); + } + ExitCode::SUCCESS +} + +/// Configure the cprover backend that generate goto-programs. +#[cfg(feature = "cprover")] +fn backend(queries: Arc>) -> Box { + Box::new(GotocCodegenBackend::new(queries)) +} + +/// Fallback backend. It will trigger an error if no backend has been enabled. +#[cfg(not(feature = "cprover"))] +fn backend(queries: Arc>) -> Box { + compile_error!("No backend is available. Only supported value today is `cprover`"); +} + +/// This object controls the compiler behavior. +/// +/// It is responsible for initializing the query database, as well as controlling the compiler +/// state machine. For stubbing, we may require multiple iterations of the rustc driver, which is +/// controlled and configured via KaniCompiler. +struct KaniCompiler { + /// Store the queries database. The queries should be initialized as part of `config`. + pub queries: Arc>, + /// Store the stubs that shall be applied if any. + stubs: Option>, + /// Store the arguments for kani compiler. + args: Option, +} + +impl KaniCompiler { + /// Create a new [KaniCompiler] instance. + pub fn new() -> KaniCompiler { + KaniCompiler { queries: QueryDb::new(), stubs: None, args: None } + } + + /// Method to be invoked after a rustc driver run. + /// It will return a list of arguments that should be used in a subsequent call to rustc + /// driver. It will return None if it has finished compiling everything. + pub fn post_process(&mut self, old_args: Vec) -> Option> { + let stubs = self.stubs.replace(FxHashMap::default()).unwrap_or_default(); + if stubs.is_empty() { + None + } else { + let mut new_args = old_args; + new_args.push(stubbing::mk_rustc_arg(&stubs)); + Some(new_args) + } + } + + /// Collect the stubs that shall be applied in the next run. + fn collect_stubs(&self, tcx: TyCtxt) -> FxHashMap { + let all_stubs = stubbing::collect_stub_mappings(tcx); + if all_stubs.is_empty() { + FxHashMap::default() + } else if let Some(harnesses) = + self.args.as_ref().unwrap().get_many::(parser::HARNESS) + { + let mappings = filter_stub_mapping(harnesses.collect(), all_stubs); + if mappings.len() > 1 { + tcx.sess.err(format!( + "Failed to apply stubs. Harnesses with stubs must be verified separately. Found: `{}`", + mappings.into_keys().join("`, `"))); + FxHashMap::default() + } else { + mappings.into_values().next().unwrap_or_default() + } + } else { + // No harness was provided. Nothing to do. + FxHashMap::default() + } + } +} + +/// Use default function implementations. +impl Callbacks for KaniCompiler { + fn config(&mut self, config: &mut Config) { + if self.args.is_none() { + let mut args = vec!["kani-compiler".to_string()]; + args.extend(config.opts.cg.llvm_args.iter().cloned()); + let matches = parser::parser().get_matches_from(&args); + init_session( + &matches, + matches!(config.opts.error_format, ErrorOutputType::Json { .. }), + ); + + // Configure queries. + let queries = &mut (*self.queries.lock().unwrap()); + queries.set_emit_vtable_restrictions(matches.get_flag(parser::RESTRICT_FN_PTRS)); + queries + .set_check_assertion_reachability(matches.get_flag(parser::ASSERTION_REACH_CHECKS)); + queries.set_output_pretty_json(matches.get_flag(parser::PRETTY_OUTPUT_FILES)); + queries.set_ignore_global_asm(matches.get_flag(parser::IGNORE_GLOBAL_ASM)); + queries.set_write_json_symtab( + cfg!(feature = "write_json_symtab") || matches.get_flag(parser::WRITE_JSON_SYMTAB), + ); + queries.set_reachability_analysis(matches.reachability_type()); + + #[cfg(feature = "unsound_experiments")] + crate::unsound_experiments::arg_parser::add_unsound_experiment_args_to_queries( + queries, &matches, + ); + + // If appropriate, collect and set the stub mapping. + if matches.get_flag(parser::ENABLE_STUBBING) + && queries.get_reachability_analysis() == ReachabilityType::Harnesses + { + queries.set_stubbing_enabled(true); + } + self.args = Some(matches); + debug!(?queries, "config end"); + } + } + + /// Collect stubs and return whether we should restart rustc's driver or not. + fn after_analysis<'tcx>( + &mut self, + _compiler: &rustc_interface::interface::Compiler, + rustc_queries: &'tcx rustc_interface::Queries<'tcx>, + ) -> Compilation { + if self.stubs.is_none() && self.queries.lock().unwrap().get_stubbing_enabled() { + rustc_queries.global_ctxt().unwrap().enter(|tcx| { + let stubs = self.stubs.insert(self.collect_stubs(tcx)); + debug!(?stubs, "after_analysis"); + if stubs.is_empty() { Compilation::Continue } else { Compilation::Stop } + }) + } else { + // There is no need to initialize stubs, keep compiling. + Compilation::Continue + } + } +} + +/// Find the stub mapping for the given harnesses. +/// +/// This function is necessary because Kani currently allows a harness to be +/// specified as a filter, whereas stub mappings use fully qualified names. +fn filter_stub_mapping( + harnesses: FxHashSet<&String>, + mut stub_mappings: FxHashMap>, +) -> FxHashMap> { + stub_mappings.retain(|name, _| { + harnesses.contains(name) || harnesses.iter().any(|harness| name.contains(*harness)) + }); + stub_mappings +} diff --git a/kani-compiler/src/kani_middle/attributes.rs b/kani-compiler/src/kani_middle/attributes.rs index 45748b873753..cf94b518d06d 100644 --- a/kani-compiler/src/kani_middle/attributes.rs +++ b/kani-compiler/src/kani_middle/attributes.rs @@ -2,33 +2,292 @@ // SPDX-License-Identifier: Apache-2.0 OR MIT //! This module contains code for processing Rust attributes (like `kani::proof`). -use rustc_ast::{AttrKind, Attribute, LitKind, MetaItem}; +use std::collections::BTreeMap; -/// Partition all the attributes into two buckets, proof_attributes and other_attributes -pub fn partition_kanitool_attributes( - all_attributes: &[Attribute], -) -> (Vec<&Attribute>, Vec<(String, &Attribute)>) { - let mut proof_attributes = vec![]; - let mut other_attributes = vec![]; +use kani_metadata::{CbmcSolver, HarnessAttributes, Stub}; +use rustc_ast::{AttrKind, Attribute, LitKind, MetaItem, MetaItemKind, NestedMetaItem}; +use rustc_hir::{def::DefKind, def_id::DefId}; +use rustc_middle::ty::{self, Instance, TyCtxt}; +use rustc_span::Span; +use std::str::FromStr; +use strum_macros::{AsRefStr, EnumString}; - for attr in all_attributes { +use rustc_middle::ty::layout::FnAbiOf; +use tracing::{debug, trace}; + +use crate::kani_middle::CompilerHelpers; + +use super::resolve; + +#[derive(Debug, Clone, Copy, AsRefStr, EnumString, PartialEq, Eq, PartialOrd, Ord)] +#[strum(serialize_all = "snake_case")] +enum KaniAttributeKind { + Proof, + Solver, + Stub, + Unwind, +} + +/// Check that all attributes assigned to an item is valid. +/// Errors will be added to the session. Invoke self.tcx.sess.abort_if_errors() to terminate +/// the session and emit all errors found. +pub(super) fn check_attributes(tcx: TyCtxt, def_id: DefId) { + let attributes = extract_kani_attributes(tcx, def_id); + if let Some(proof_attributes) = attributes.get(&KaniAttributeKind::Proof) { + check_proof_attribute(tcx, def_id, proof_attributes); + } else if let Some((kind, attrs)) = attributes.first_key_value() { + tcx.sess.span_err( + attrs[0].span, + format!( + "the `{}` attribute also requires the '#[kani::proof]' attribute", + kind.as_ref() + ) + .as_str(), + ); + } +} + +pub fn is_proof_harness(tcx: TyCtxt, def_id: DefId) -> bool { + let attributes = extract_kani_attributes(tcx, def_id); + attributes.contains_key(&KaniAttributeKind::Proof) +} + +/// Does this `def_id` have `#[rustc_test_marker]`? +pub fn is_test_harness_description(tcx: TyCtxt, def_id: DefId) -> bool { + let attrs = tcx.get_attrs_unchecked(def_id); + tcx.sess.contains_name(attrs, rustc_span::symbol::sym::rustc_test_marker) +} + +/// Is this the closure inside of a test description const (i.e. macro expanded from a `#[test]`)? +/// +/// We're trying to detect the closure (`||`) inside code like: +/// +/// ```ignore +/// #[rustc_test_marker] +/// pub const check_2: test::TestDescAndFn = test::TestDescAndFn { +/// desc: ..., +/// testfn: test::StaticTestFn(|| test::assert_test_result(check_2())), +/// }; +/// ``` +pub fn is_test_harness_closure(tcx: TyCtxt, def_id: DefId) -> bool { + if !def_id.is_local() { + return false; + } + + let local_def_id = def_id.expect_local(); + let hir_id = tcx.hir().local_def_id_to_hir_id(local_def_id); + + // The parent item of the closure appears to reliably be the `const` declaration item. + let parent_id = tcx.hir().get_parent_item(hir_id); + let parent_def_id = parent_id.to_def_id(); + + is_test_harness_description(tcx, parent_def_id) +} + +/// Extract all Kani attributes for a given `def_id` if any exists. +/// We only extract attributes for harnesses that are local to the current crate. +pub fn extract_harness_attributes(tcx: TyCtxt, def_id: DefId) -> Option { + // Abort if not local. + def_id.as_local()?; + let attributes = extract_kani_attributes(tcx, def_id); + trace!(?def_id, ?attributes, "extract_harness_attributes"); + if attributes.contains_key(&KaniAttributeKind::Proof) { + Some(attributes.into_iter().fold( + HarnessAttributes::default(), + |mut harness, (kind, attributes)| { + match kind { + KaniAttributeKind::Solver => { + // Make sure the solver is not already set + harness.solver = parse_solver(tcx, expect_single(tcx, kind, &attributes)); + } + KaniAttributeKind::Stub => { + harness.stubs = parse_stubs(tcx, def_id, attributes); + } + KaniAttributeKind::Unwind => { + harness.unwind_value = + parse_unwind(tcx, expect_single(tcx, kind, &attributes)) + } + KaniAttributeKind::Proof => harness.proof = true, + }; + harness + }, + )) + } else { + None + } +} + +fn expect_single<'a>( + tcx: TyCtxt, + kind: KaniAttributeKind, + attributes: &'a Vec<&'a Attribute>, +) -> &'a Attribute { + let attr = attributes + .first() + .expect(&format!("expected at least one attribute {} in {attributes:?}", kind.as_ref())); + if attributes.len() > 1 { + tcx.sess.span_err( + attr.span, + &format!("only one '#[kani::{}]' attribute is allowed per harness", kind.as_ref()), + ); + } + attr +} + +/// Check that if an item is tagged with a proof_attribute, it is a valid harness. +fn check_proof_attribute(tcx: TyCtxt, def_id: DefId, proof_attributes: &Vec<&Attribute>) { + assert!(!proof_attributes.is_empty()); + let span = proof_attributes.first().unwrap().span; + if proof_attributes.len() > 1 { + tcx.sess.span_warn(proof_attributes[0].span, "duplicate attribute"); + } + + if tcx.def_kind(def_id) != DefKind::Fn { + tcx.sess.span_err(span, "the `proof` attribute can only be applied to functions"); + } else if tcx.generics_of(def_id).requires_monomorphization(tcx) { + tcx.sess.span_err(span, "the `proof` attribute cannot be applied to generic functions"); + } else { + let instance = Instance::mono(tcx, def_id); + let helper = CompilerHelpers { tcx }; + if !helper.fn_abi_of_instance(instance, ty::List::empty()).args.is_empty() { + tcx.sess.span_err(span, "functions used as harnesses cannot have any arguments"); + } + } +} + +/// Partition all the attributes according to their kind. +fn extract_kani_attributes( + tcx: TyCtxt, + def_id: DefId, +) -> BTreeMap> { + let all_attributes = tcx.get_attrs_unchecked(def_id); + all_attributes.iter().fold(BTreeMap::default(), |mut result, attribute| { // Get the string the appears after "kanitool::" in each attribute string. // Ex - "proof" | "unwind" etc. - if let Some(attribute_string) = kanitool_attr_name(attr).as_deref() { - if attribute_string == "proof" { - proof_attributes.push(attr); + if let Some(kind) = attr_kind(tcx, attribute) { + result.entry(kind).or_default().push(attribute) + } + result + }) +} + +/// Return the unwind value from the given attribute. +fn parse_unwind(tcx: TyCtxt, attr: &Attribute) -> Option { + // Get Attribute value and if it's not none, assign it to the metadata + match parse_integer(attr) { + None => { + // There are no integers or too many arguments given to the attribute + tcx.sess.span_err( + attr.span, + "invalid argument for `unwind` attribute, expected an integer", + ); + None + } + Some(unwind_integer_value) => { + if let Ok(val) = unwind_integer_value.try_into() { + Some(val) } else { - other_attributes.push((attribute_string.to_string(), attr)); + tcx.sess.span_err(attr.span, "value above maximum permitted value - u32::MAX"); + None } } } +} + +fn parse_stubs(tcx: TyCtxt, harness: DefId, attributes: Vec<&Attribute>) -> Vec { + let current_module = tcx.parent_module_from_def_id(harness.expect_local()); + let check_resolve = |attr: &Attribute, name: &str| { + let result = resolve::resolve_fn(tcx, current_module, name); + if let Err(err) = result { + tcx.sess.span_err(attr.span, format!("failed to resolve `{name}`: {err}")); + } + }; + attributes + .iter() + .filter_map(|attr| match parse_paths(attr) { + Ok(paths) => match paths.as_slice() { + [orig, replace] => { + check_resolve(attr, orig); + check_resolve(attr, replace); + Some(Stub { original: orig.clone(), replacement: replace.clone() }) + } + _ => { + tcx.sess.span_err( + attr.span, + format!( + "attribute `kani::stub` takes two path arguments; found {}", + paths.len() + ), + ); + None + } + }, + Err(error_span) => { + tcx.sess.span_err( + error_span, + "attribute `kani::stub` takes two path arguments; found argument that is not a path", + ); + None + } + }) + .collect() +} + +fn parse_solver(tcx: TyCtxt, attr: &Attribute) -> Option { + // TODO: Argument validation should be done as part of the `kani_macros` crate + // + const ATTRIBUTE: &str = "#[kani::solver]"; + let invalid_arg_err = |attr: &Attribute| { + tcx.sess.span_err( + attr.span, + format!("invalid argument for `{ATTRIBUTE}` attribute, expected one of the supported solvers (e.g. `kissat`) or a SAT solver binary (e.g. `bin=\"\"`)") + ) + }; - (proof_attributes, other_attributes) + let attr_args = attr.meta_item_list().unwrap(); + if attr_args.len() != 1 { + tcx.sess.span_err( + attr.span, + format!( + "the `{ATTRIBUTE}` attribute expects a single argument. Got {} arguments.", + attr_args.len() + ), + ); + return None; + } + let attr_arg = &attr_args[0]; + let meta_item = attr_arg.meta_item(); + if meta_item.is_none() { + invalid_arg_err(attr); + return None; + } + let meta_item = meta_item.unwrap(); + let ident = meta_item.ident().unwrap(); + let ident_str = ident.as_str(); + match &meta_item.kind { + MetaItemKind::Word => { + let solver = CbmcSolver::from_str(ident_str); + match solver { + Ok(solver) => Some(solver), + Err(_) => { + tcx.sess.span_err(attr.span, format!("unknown solver `{ident_str}`")); + None + } + } + } + MetaItemKind::NameValue(lit) if ident_str == "bin" && lit.kind.is_str() => { + Some(CbmcSolver::Binary(lit.symbol.to_string())) + } + _ => { + invalid_arg_err(attr); + None + } + } } /// Extracts the integer value argument from the attribute provided /// For example, `unwind(8)` return `Some(8)` -pub fn extract_integer_argument(attr: &Attribute) -> Option { +fn parse_integer(attr: &Attribute) -> Option { // Vector of meta items , that contain the arguments given the attribute let attr_args = attr.meta_item_list()?; // Only extracts one integer value as argument @@ -46,28 +305,22 @@ pub fn extract_integer_argument(attr: &Attribute) -> Option { } /// Extracts a vector with the path arguments of an attribute. -/// The length of the returned vector is equal to the number of arguments in the -/// attribute; an entry is `None` if the argument is not syntactically a path, -/// and `Some()` otherwise. Paths are returned as strings. -/// -/// For example, on `stub(foo::bar, 42, baz)`, this returns -/// `vec![Some("foo::bar"), None, Some("baz")]`. -pub fn extract_path_arguments(attr: &Attribute) -> Vec> { +/// Emits an error if it couldn't convert any of the arguments. +fn parse_paths(attr: &Attribute) -> Result, Span> { let attr_args = attr.meta_item_list(); - if attr_args.is_none() { - return vec![]; - } - let mut paths = Vec::new(); - for arg in attr_args.unwrap() { - let entry = arg.meta_item().and_then(extract_path); - paths.push(entry) - } - paths + attr_args + .unwrap_or_default() + .iter() + .map(|arg| match arg { + NestedMetaItem::Lit(item) => Err(item.span), + NestedMetaItem::MetaItem(item) => parse_path(item).ok_or(item.span), + }) + .collect() } /// Extracts a path from an attribute item, returning `None` if the item is not /// syntactically a path. -fn extract_path(meta_item: &MetaItem) -> Option { +fn parse_path(meta_item: &MetaItem) -> Option { if meta_item.is_word() { Some( meta_item @@ -84,12 +337,20 @@ fn extract_path(meta_item: &MetaItem) -> Option { } /// If the attribute is named `kanitool::name`, this extracts `name` -fn kanitool_attr_name(attr: &Attribute) -> Option { +fn attr_kind(tcx: TyCtxt, attr: &Attribute) -> Option { match &attr.kind { AttrKind::Normal(normal) => { let segments = &normal.item.path.segments; if (!segments.is_empty()) && segments[0].ident.as_str() == "kanitool" { - Some(segments[1].ident.as_str().to_string()) + assert_eq!(segments.len(), 2, "Unexpected kani attribute {segments:?}"); + let ident_str = segments[1].ident.as_str(); + KaniAttributeKind::try_from(ident_str) + .map_err(|err| { + debug!(?err, "attr_kind_failed"); + tcx.sess.span_err(attr.span, format!("unknown solver `{ident_str}`")); + err + }) + .ok() } else { None } diff --git a/kani-compiler/src/kani_middle/mod.rs b/kani-compiler/src/kani_middle/mod.rs index 0dceab01d1f4..ccef03a5bced 100644 --- a/kani-compiler/src/kani_middle/mod.rs +++ b/kani-compiler/src/kani_middle/mod.rs @@ -2,9 +2,113 @@ // SPDX-License-Identifier: Apache-2.0 OR MIT //! This module contains code that are backend agnostic. For example, MIR analysis //! and transformations. + +use rustc_hir::{def::DefKind, def_id::LOCAL_CRATE}; +use rustc_middle::span_bug; +use rustc_middle::ty::layout::{ + FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, + TyAndLayout, +}; +use rustc_middle::ty::{self, Ty, TyCtxt}; +use rustc_span::source_map::respan; +use rustc_span::Span; +use rustc_target::abi::call::FnAbi; +use rustc_target::abi::{HasDataLayout, TargetDataLayout}; + +use self::attributes::check_attributes; + pub mod attributes; pub mod coercion; pub mod provide; pub mod reachability; pub mod resolve; pub mod stubbing; + +/// Check that all crate items are supported and there's no misconfiguration. +/// This method will exhaustively print any error / warning and it will abort at the end if any +/// error was found. +pub fn check_crate_items(tcx: TyCtxt, ignore_asm: bool) { + let krate = tcx.crate_name(LOCAL_CRATE); + for item in tcx.hir_crate_items(()).items() { + let def_id = item.owner_id.def_id.to_def_id(); + check_attributes(tcx, def_id); + if tcx.def_kind(def_id) == DefKind::GlobalAsm { + if !ignore_asm { + let error_msg = format!( + "Crate {krate} contains global ASM, which is not supported by Kani. Rerun with \ + `--enable-unstable --ignore-global-asm` to suppress this error \ + (**Verification results may be impacted**).", + ); + tcx.sess.err(&error_msg); + } else { + tcx.sess.warn(format!( + "Ignoring global ASM in crate {krate}. Verification results may be impacted.", + )); + } + } + } + tcx.sess.abort_if_errors(); +} + +struct CompilerHelpers<'tcx> { + tcx: TyCtxt<'tcx>, +} + +impl<'tcx> HasParamEnv<'tcx> for CompilerHelpers<'tcx> { + fn param_env(&self) -> ty::ParamEnv<'tcx> { + ty::ParamEnv::reveal_all() + } +} + +impl<'tcx> HasTyCtxt<'tcx> for CompilerHelpers<'tcx> { + fn tcx(&self) -> TyCtxt<'tcx> { + self.tcx + } +} + +impl<'tcx> HasDataLayout for CompilerHelpers<'tcx> { + fn data_layout(&self) -> &TargetDataLayout { + self.tcx.data_layout() + } +} + +impl<'tcx> LayoutOfHelpers<'tcx> for CompilerHelpers<'tcx> { + type LayoutOfResult = TyAndLayout<'tcx>; + + #[inline] + fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { + span_bug!(span, "failed to get layout for `{}`: {}", ty, err) + } +} + +/// Implement error handling for extracting function ABI information. +impl<'tcx> FnAbiOfHelpers<'tcx> for CompilerHelpers<'tcx> { + type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>; + + #[inline] + fn handle_fn_abi_err( + &self, + err: FnAbiError<'tcx>, + span: Span, + fn_abi_request: FnAbiRequest<'tcx>, + ) -> ! { + if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err { + self.tcx.sess.emit_fatal(respan(span, err)) + } else { + match fn_abi_request { + FnAbiRequest::OfFnPtr { sig, extra_args } => { + span_bug!( + span, + "Error: {err}\n while running `fn_abi_of_fn_ptr. ({sig}, {extra_args:?})`", + ); + } + FnAbiRequest::OfInstance { instance, extra_args } => { + span_bug!( + span, + "Error: {err}\n while running `fn_abi_of_instance. ({instance}, {extra_args:?})`", + ); + } + } + } + } +} diff --git a/kani-compiler/src/kani_middle/resolve.rs b/kani-compiler/src/kani_middle/resolve.rs index 514a73510031..cb36ec1ef32d 100644 --- a/kani-compiler/src/kani_middle/resolve.rs +++ b/kani-compiler/src/kani_middle/resolve.rs @@ -6,426 +6,404 @@ //! //! TODO: Extend this logic to support resolving qualified paths. //! +//! +//! Note that glob use statements can form loops. The paths can also walk through the loop. -use std::collections::VecDeque; +use std::collections::HashSet; +use std::fmt; +use std::iter::Peekable; use rustc_hir::def::{DefKind, Res}; use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE}; use rustc_hir::{ItemKind, UseKind}; use rustc_middle::ty::TyCtxt; +use tracing::debug; -/// Attempts to resolve a simple path (in the form of a string) to a `DefId`. -/// The current module is provided as an argument in order to resolve relative -/// paths. +/// Attempts to resolve a simple path (in the form of a string) to a function / method `DefId`. /// /// TODO: Extend this implementation to handle qualified paths and simple paths /// corresponding to trait methods. /// -pub fn resolve_path(tcx: TyCtxt, current_module: LocalDefId, path_str: &str) -> Option { - let span = tracing::span!(tracing::Level::DEBUG, "path_resolution"); - let _enter = span.enter(); - - let path = to_path(tcx, current_module, path_str)?; - match &path.base { - Base::ExternPrelude => resolve_external(tcx, path.segments), - Base::LocalModule { id, may_be_external_path } => { - // Try to resolve it as a relative path first; if this fails and the - // path might be external (it wasn't qualified with `self`, etc.) - // and the current module does not have a submodule with the same - // first segment, try resolving it as an external path. - resolve_relative(tcx, *id, path.segments.clone()).or_else(|| { - if *may_be_external_path - && !has_submodule_with_name(tcx, current_module, path.segments.front()?) - { - resolve_external(tcx, path.segments) - } else { - None - } - }) +pub fn resolve_fn<'tcx>( + tcx: TyCtxt<'tcx>, + current_module: LocalDefId, + path_str: &str, +) -> Result> { + let result = resolve_path(tcx, current_module, path_str); + match result { + Ok(def_id) => { + let def_kind = tcx.def_kind(def_id); + if matches!(def_kind, DefKind::AssocFn | DefKind::Fn) { + Ok(def_id) + } else { + Err(ResolveError::UnexpectedType { + tcx, + item: def_id, + expected: "function / method", + }) + } } + err => err, } } -/// The segments of a path. -type Segments = VecDeque; +/// Attempts to resolve a simple path (in the form of a string) to a `DefId`. +/// The current module is provided as an argument in order to resolve relative +/// paths. +/// +/// Note: This function was written to be generic, however, it has only been tested for functions. +fn resolve_path<'tcx>( + tcx: TyCtxt<'tcx>, + current_module: LocalDefId, + path_str: &str, +) -> Result> { + let _span = tracing::span!(tracing::Level::DEBUG, "path_resolution").entered(); -/// Generates the string representation of the path composed of these segments -/// (it is inefficient, but we use it only in debugging output). -fn segments_to_string(segments: &Segments) -> String { - segments.iter().cloned().collect::>().join("::") + let path = resolve_prefix(tcx, current_module, path_str)?; + path.segments.into_iter().try_fold(path.base, |base, name| { + debug!(?base, ?name, "resolve_path"); + let def_kind = tcx.def_kind(base); + let next_item = match def_kind { + DefKind::ForeignMod | DefKind::Mod => resolve_in_module(tcx, base, &name), + DefKind::Struct | DefKind::Enum | DefKind::Union => resolve_in_type(tcx, base, &name), + kind => { + debug!(?base, ?kind, "resolve_path: unexpected item"); + Err(ResolveError::UnexpectedType { tcx, item: base, expected: "module" }) + } + }; + next_item + }) } -/// The "starting point" for a path. -#[derive(Debug)] -enum Base { - /// Indicates an external path. - ExternPrelude, - /// Indicates a path that may be local (and must be local if - /// `may_be_external_path` is false) and should be resolved relative to the - /// module identified by `id`. - LocalModule { id: LocalDefId, may_be_external_path: bool }, +/// Provide information about where the resolution failed. +/// Todo: Add error message. +pub enum ResolveError<'tcx> { + /// Ambiguous glob resolution. + AmbiguousGlob { tcx: TyCtxt<'tcx>, name: String, base: DefId, candidates: Vec }, + /// Use super past the root of a crate. + ExtraSuper, + /// Invalid path. + InvalidPath { msg: String }, + /// Unable to find an item. + MissingItem { tcx: TyCtxt<'tcx>, base: DefId, unresolved: String }, + /// Error triggered when the identifier points to an item with unexpected type. + UnexpectedType { tcx: TyCtxt<'tcx>, item: DefId, expected: &'static str }, } +impl<'tcx> fmt::Display for ResolveError<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ResolveError::ExtraSuper => { + write!(f, "there are too many leading `super` keywords") + } + ResolveError::AmbiguousGlob { tcx, base, name, candidates } => { + let location = description(*tcx, *base); + write!( + f, + "`{name}` is ambiguous because of multiple glob imports in {location}. Found:\n{}", + candidates + .iter() + .map(|def_id| tcx.def_path_str(*def_id)) + .intersperse("\n".to_string()) + .collect::() + ) + } + ResolveError::InvalidPath { msg } => write!(f, "{msg}"), + ResolveError::UnexpectedType { tcx, item: def_id, expected } => write!( + f, + "expected {expected}, found {} `{}`", + tcx.def_kind(def_id).descr(*def_id), + tcx.def_path_str(*def_id) + ), + ResolveError::MissingItem { tcx, base, unresolved } => { + let def_desc = description(*tcx, *base); + write!(f, "unable to find `{unresolved}` inside {def_desc}") + } + } + } +} + +/// The segments of a path. +type Segments = Vec; + /// A path consisting of a starting point and a bunch of segments. If `base` /// matches `Base::LocalModule { id: _, may_be_external_path : true }`, then /// `segments` cannot be empty. -#[derive(Debug)] +#[derive(Debug, Hash)] struct Path { - base: Base, - segments: Segments, + pub base: DefId, + pub segments: Segments, } -impl Path { - fn new(base: Base, segments: Segments) -> Self { - Path { base, segments } - } -} +/// Identifier for the top module of the crate. +const CRATE: &str = "crate"; +/// rustc represents initial `::` as `{{root}}`. +const ROOT: &str = "{{root}}"; +/// Identifier for the current module. +const SELF: &str = "self"; +/// Identifier for the parent of the current module. +const SUPER: &str = "super"; /// Takes a string representation of a path and turns it into a `Path` data -/// structure, resolving qualifiers (like `crate`, etc.) along the way. -fn to_path(tcx: TyCtxt, current_module: LocalDefId, name: &str) -> Option { - tracing::debug!("Normalizing path `{name}`"); - - const CRATE: &str = "crate"; - // rustc represents initial `::` as `{{root}}`. - const ROOT: &str = "{{root}}"; - const SELF: &str = "self"; - const SUPER: &str = "super"; +/// structure, resolving prefix qualifiers (like `crate`, `self`, etc.) along the way. +fn resolve_prefix<'tcx>( + tcx: TyCtxt<'tcx>, + current_module: LocalDefId, + name: &str, +) -> Result> { + debug!(?name, ?current_module, "resolve_prefix"); // Split the string into segments separated by `::`. - let mut segments: Segments = name.split("::").map(str::to_string).collect(); - if segments.is_empty() { - return Some(Path::new( - Base::LocalModule { id: current_module, may_be_external_path: false }, - segments, - )); - } + let mut segments = name.split("::").map(str::to_string).peekable(); + assert!(segments.peek().is_some(), "expected identifier, found `{name}`"); // Resolve qualifiers `crate`, initial `::`, and `self`. The qualifier // `self` may be followed be `super` (handled below). - let first = segments[0].as_str(); - let may_be_external_path = !matches!(first, CRATE | SELF | SUPER); + let first = segments.peek().unwrap().as_str(); match first { ROOT => { - segments.pop_front(); - return Some(Path::new(Base::ExternPrelude, segments)); + // Skip root and get the external crate from the name that follows `::`. + let next = segments.nth(1); + if let Some(next_name) = next { + let result = resolve_external(tcx, &next_name); + if let Some(def_id) = result { + Ok(Path { base: def_id, segments: segments.collect() }) + } else { + Err(ResolveError::MissingItem { + tcx, + base: current_module.to_def_id(), + unresolved: next_name, + }) + } + } else { + Err(ResolveError::InvalidPath { msg: "expected identifier after `::`".to_string() }) + } } CRATE => { - segments.pop_front(); + segments.next(); // Find the module at the root of the crate. let current_module_hir_id = tcx.hir().local_def_id_to_hir_id(current_module); let crate_root = match tcx.hir().parent_iter(current_module_hir_id).last() { None => current_module, Some((hir_id, _)) => tcx.hir().local_def_id(hir_id), }; - return Some(Path::new( - Base::LocalModule { id: crate_root, may_be_external_path }, - segments, - )); + Ok(Path { base: crate_root.to_def_id(), segments: segments.collect() }) } SELF => { - segments.pop_front(); + segments.next(); + resolve_super(tcx, current_module, segments) + } + SUPER => resolve_super(tcx, current_module, segments), + _ => { + // No special key word was used. Try local first otherwise try external name. + let next_name = segments.next().unwrap(); + let def_id = + resolve_in_module(tcx, current_module.to_def_id(), &next_name).or_else(|err| { + if matches!(err, ResolveError::MissingItem { .. }) { + // Only try external if we couldn't find anything. + resolve_external(tcx, &next_name).ok_or(err) + } else { + Err(err) + } + })?; + Ok(Path { base: def_id, segments: segments.collect() }) } - _ => (), } +} - // Pop up the module stack until we account for all the `super` prefixes. +/// Pop up the module stack until we account for all the `super` prefixes. +/// This method will error out if it tries to backtrace from the root crate. +fn resolve_super<'tcx, I>( + tcx: TyCtxt, + current_module: LocalDefId, + mut segments: Peekable, +) -> Result> +where + I: Iterator, +{ let current_module_hir_id = tcx.hir().local_def_id_to_hir_id(current_module); let mut parents = tcx.hir().parent_iter(current_module_hir_id); let mut base_module = current_module; - while segments.front().map(String::as_str) == Some(SUPER) { - segments.pop_front(); - let parent = parents.next().map(|p| p.0).or_else(|| { - tracing::debug!("Unable to normalize path `{name}`: too many `super` qualifiers"); - None - })?; - base_module = tcx.hir().local_def_id(parent); + while segments.next_if(|segment| segment == SUPER).is_some() { + if let Some((parent, _)) = parents.next() { + debug!("parent: {parent:?}"); + base_module = tcx.hir().local_def_id(parent); + } else { + return Err(ResolveError::ExtraSuper); + } } - - Some(Path::new(Base::LocalModule { id: base_module, may_be_external_path }, segments)) + debug!("base: {base_module:?}"); + Ok(Path { base: base_module.to_def_id(), segments: segments.collect() }) } -/// Resolves an external path. -fn resolve_external(tcx: TyCtxt, mut segments: Segments) -> Option { - tracing::debug!("Resolving `{}` in the external prelude", segments_to_string(&segments)); - let first = segments.pop_front().or_else(|| { - tracing::debug!("Unable to resolve the empty path"); - None - })?; - for crate_num in tcx.crates(()) { +/// Resolves an external crate name. +fn resolve_external(tcx: TyCtxt, name: &str) -> Option { + debug!(?name, "resolve_external"); + tcx.crates(()).iter().find_map(|crate_num| { let crate_name = tcx.crate_name(*crate_num); - if crate_name.as_str() == first { - let crate_def_id = DefId { index: CRATE_DEF_INDEX, krate: *crate_num }; - return resolve_in_foreign_module(tcx, crate_def_id, segments); + if crate_name.as_str() == name { + Some(DefId { index: CRATE_DEF_INDEX, krate: *crate_num }) + } else { + None } - } - tracing::debug!("Unable to resolve `{first}` as an external crate"); - None + }) } /// Resolves a path relative to a foreign module. -fn resolve_in_foreign_module( - tcx: TyCtxt, - foreign_mod: DefId, - mut segments: Segments, -) -> Option { - tracing::debug!( - "Resolving `{}` in foreign module `{}`", - segments_to_string(&segments), - tcx.def_path_str(foreign_mod) - ); - - let first = segments.front().or_else(|| { - tracing::debug!("Unable to resolve the empty path"); - None - })?; - for child in tcx.module_children(foreign_mod) { - match child.res { - Res::Def(DefKind::Fn, def_id) => { - if first == child.ident.as_str() && segments.len() == 1 { - tracing::debug!( - "Resolved `{first}` as a function in foreign module `{}`", - tcx.def_path_str(foreign_mod) - ); - return Some(def_id); - } - } - Res::Def(DefKind::Mod, inner_mod_id) => { - if first == child.ident.as_str() { - segments.pop_front(); - return resolve_in_foreign_module(tcx, inner_mod_id, segments); - } - } - Res::Def(DefKind::Struct | DefKind::Enum | DefKind::Union, type_id) => { - if first == child.ident.as_str() && segments.len() == 2 { - return resolve_in_type(tcx, type_id, &segments[1]); - } - } - _ => {} - } - } - - tracing::debug!( - "Unable to resolve `{first}` as an item in foreign module `{}`", - tcx.def_path_str(foreign_mod) - ); - None +fn resolve_in_foreign_module(tcx: TyCtxt, foreign_mod: DefId, name: &str) -> Option { + debug!(?name, ?foreign_mod, "resolve_in_foreign_module"); + tcx.module_children(foreign_mod) + .iter() + .find_map(|item| if item.ident.as_str() == name { item.res.opt_def_id() } else { None }) } -/// Generates a more friendly string representation of a local module's name +/// Generates a more friendly string representation of a def_id including kind and name. /// (the default representation for the crate root is the empty string). -fn module_to_string(tcx: TyCtxt, current_module: LocalDefId) -> String { - let def_id = current_module.to_def_id(); +fn description(tcx: TyCtxt, def_id: DefId) -> String { + let def_kind = tcx.def_kind(def_id); + let kind_name = def_kind.descr(def_id); if def_id.is_crate_root() { - format!("module `{}`", tcx.crate_name(LOCAL_CRATE)) + format!("{kind_name} `{}`", tcx.crate_name(LOCAL_CRATE)) } else { - format!("module `{}`", tcx.def_path_str(def_id)) + format!("{kind_name} `{}`", tcx.def_path_str(def_id)) } } +/// The possible result of trying to resolve the name relative to a local module. +enum RelativeResolution { + /// Return the item that user requested. + Found(DefId), + /// Return all globs that may define the item requested. + Globs(Vec), +} + /// Resolves a path relative to a local module. -fn resolve_relative( - tcx: TyCtxt, - current_module: LocalDefId, - mut segments: Segments, -) -> Option { - tracing::debug!( - "Resolving `{}` in local {}", - segments_to_string(&segments), - module_to_string(tcx, current_module) - ); +fn resolve_relative(tcx: TyCtxt, current_module: LocalDefId, name: &str) -> RelativeResolution { + debug!(?name, ?current_module, "resolve_relative"); - let first = segments.front().or_else(|| { - tracing::debug!("Unable to resolve the empty path"); - None - })?; - let mut glob_imports = Vec::new(); - for item_id in tcx.hir().module_items(current_module) { + let mut glob_imports = vec![]; + let result = tcx.hir().module_items(current_module).find_map(|item_id| { let item = tcx.hir().item(item_id); - let def_id = item.owner_id.def_id.to_def_id(); - match item.kind { - ItemKind::Fn(..) => { - if first == item.ident.as_str() && segments.len() == 1 { - tracing::debug!( - "Resolved `{first}` as a function in local {}", - module_to_string(tcx, current_module) - ); - return Some(def_id); - } - } - ItemKind::Mod(..) => { - if first == item.ident.as_str() { - segments.pop_front(); - return resolve_relative(tcx, def_id.expect_local(), segments); - } - } - ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Union(..) => { - if first == item.ident.as_str() && segments.len() == 2 { - return resolve_in_type(tcx, def_id, &segments[1]); - } - } - ItemKind::Use(use_path, UseKind::Single) => { - if first == item.ident.as_str() { - segments.pop_front(); - return resolve_in_use(tcx, use_path, segments); - } + if item.ident.as_str() == name { + match item.kind { + ItemKind::Use(use_path, UseKind::Single) => use_path.res[0].opt_def_id(), + ItemKind::ExternCrate(orig_name) => resolve_external( + tcx, + orig_name.as_ref().map(|sym| sym.as_str()).unwrap_or(name), + ), + _ => Some(item.owner_id.def_id.to_def_id()), } - ItemKind::Use(use_path, UseKind::Glob) => { + } else { + if let ItemKind::Use(use_path, UseKind::Glob) = item.kind { // Do not immediately try to resolve the path using this glob, // since paths resolved via non-globs take precedence. - glob_imports.push(use_path); + glob_imports.push(use_path.res[0]); } - ItemKind::ExternCrate(orig_name_opt) => { - if first == item.ident.as_str() { - if let Some(orig_name) = orig_name_opt { - segments[0] = orig_name.to_string(); - } - return resolve_external(tcx, segments); - } - } - _ => (), + None } - } - - resolve_in_glob_uses(tcx, current_module, glob_imports, &segments).or_else(|| { - tracing::debug!( - "Unable to resolve `{first}` as an item in local {}", - module_to_string(tcx, current_module) - ); - None - }) + }); + result.map_or(RelativeResolution::Globs(glob_imports), RelativeResolution::Found) } /// Resolves a path relative to a local or foreign module. -fn resolve_in_module(tcx: TyCtxt, current_module: DefId, segments: Segments) -> Option { +/// For local modules, if no module item matches the name we also have to traverse the list of glob +/// imports. For foreign modules, that list should've been flatten already. +fn resolve_in_module<'tcx>( + tcx: TyCtxt<'tcx>, + current_module: DefId, + name: &str, +) -> Result> { match current_module.as_local() { - None => resolve_in_foreign_module(tcx, current_module, segments), - Some(local_id) => resolve_relative(tcx, local_id, segments), - } -} - -/// Resolves a path by exploring a non-glob use statement. -fn resolve_in_use(tcx: TyCtxt, use_path: &rustc_hir::UsePath, segments: Segments) -> Option { - if let Res::Def(def_kind, def_id) = use_path.res[0] { - tracing::debug!( - "Resolving `{}` via `use` import of `{}`", - segments_to_string(&segments), - tcx.def_path_str(def_id) - ); - match def_kind { - DefKind::Fn => { - if segments.is_empty() { - tracing::debug!( - "Resolved `{}` to a function via `use` import of `{}`", - segments_to_string(&segments), - tcx.def_path_str(def_id) - ); - return Some(def_id); + None => resolve_in_foreign_module(tcx, current_module, name).ok_or_else(|| { + ResolveError::MissingItem { tcx, base: current_module, unresolved: name.to_string() } + }), + Some(local_id) => { + let result = resolve_relative(tcx, local_id, name); + match result { + RelativeResolution::Found(def_id) => Ok(def_id), + RelativeResolution::Globs(globs) => { + resolve_in_glob_uses(tcx, local_id, globs, name) } } - DefKind::Mod => return resolve_in_module(tcx, def_id, segments), - DefKind::Struct | DefKind::Enum | DefKind::Union => { - if segments.len() == 1 { - return resolve_in_type(tcx, def_id, &segments[0]); - } - } - _ => (), } } - tracing::debug!("Unable to resolve `{}` via `use` import", segments_to_string(&segments)); - None } /// Resolves a path by exploring glob use statements. -fn resolve_in_glob_uses( - tcx: TyCtxt, +/// Note that there could be loops in glob use statements, so we need to track modules that have +/// been visited. +fn resolve_in_glob_uses<'tcx>( + tcx: TyCtxt<'tcx>, current_module: LocalDefId, - glob_imports: Vec<&rustc_hir::UsePath>, - segments: &Segments, -) -> Option { - let glob_resolves = glob_imports - .iter() - .filter_map(|use_path| { - let span = tracing::span!(tracing::Level::DEBUG, "glob_resolution"); - let _enter = span.enter(); - resolve_in_glob_use(tcx, use_path, segments.clone()) - }) - .collect::>(); - if glob_resolves.len() == 1 { - return glob_resolves.first().copied(); - } - if glob_resolves.len() > 1 { - // Raise an error if it's ambiguous which glob import a function comes - // from. rustc will also raise an error in this case if the ambiguous - // function is present in code (and not just as an attribute argument). - // TODO: We should make this consistent with error handling for other - // cases (see ). - let location = module_to_string(tcx, current_module); - let mut msg = format!( - "glob imports in local {location} make it impossible to \ - unambiguously resolve path; the possibilities are:" - ); - for def_id in glob_resolves { - msg.push_str("\n\t"); - msg.push_str(&tcx.def_path_str(def_id)); + mut glob_resolutions: Vec, + name: &str, +) -> Result> { + let mut visited = HashSet::::default(); + let mut matches = vec![]; + while let Some(res) = glob_resolutions.pop() { + if !visited.contains(&res) { + visited.insert(res); + let result = resolve_in_glob_use(tcx, &res, name); + match result { + RelativeResolution::Found(def_id) => matches.push(def_id), + RelativeResolution::Globs(mut other_globs) => { + glob_resolutions.append(&mut other_globs) + } + } } - tcx.sess.err(msg); } - None + match matches.len() { + 0 => Err(ResolveError::MissingItem { + tcx, + base: current_module.to_def_id(), + unresolved: name.to_string(), + }), + 1 => Ok(matches.pop().unwrap()), + _ => Err(ResolveError::AmbiguousGlob { + tcx, + base: current_module.to_def_id(), + name: name.to_string(), + candidates: matches, + }), + } } /// Resolves a path by exploring a glob use statement. -fn resolve_in_glob_use( - tcx: TyCtxt, - use_path: &rustc_hir::UsePath, - segments: Segments, -) -> Option { - if let Res::Def(DefKind::Mod, def_id) = use_path.res[0] { - resolve_in_module(tcx, def_id, segments) +fn resolve_in_glob_use(tcx: TyCtxt, res: &Res, name: &str) -> RelativeResolution { + if let Res::Def(DefKind::Mod, def_id) = res { + if let Some(local_id) = def_id.as_local() { + resolve_relative(tcx, local_id, name) + } else { + resolve_in_foreign_module(tcx, *def_id, name) + .map_or(RelativeResolution::Globs(vec![]), RelativeResolution::Found) + } } else { - None + // This shouldn't happen. Only module imports can use globs. + RelativeResolution::Globs(vec![]) } } /// Resolves a method in a type. It currently does not resolve trait methods /// (see ). -fn resolve_in_type(tcx: TyCtxt, type_id: DefId, name: &str) -> Option { - tracing::debug!("Resolving `{name}` in type `{}`", tcx.def_path_str(type_id)); +fn resolve_in_type<'tcx>( + tcx: TyCtxt<'tcx>, + type_id: DefId, + name: &str, +) -> Result> { + debug!(?name, ?type_id, "resolve_in_type"); // Try the inherent `impl` blocks (i.e., non-trait `impl`s). - for impl_ in tcx.inherent_impls(type_id) { - let maybe_resolved = resolve_in_impl(tcx, *impl_, name); - if maybe_resolved.is_some() { - return maybe_resolved; - } - } - tracing::debug!("Unable to resolve `{name}` in type `{}`", tcx.def_path_str(type_id)); - None -} - -/// Resolves a name in an `impl` block. -fn resolve_in_impl(tcx: TyCtxt, impl_id: DefId, name: &str) -> Option { - tracing::debug!("Resolving `{name}` in impl block `{}`", tcx.def_path_str(impl_id)); - for assoc_item in tcx.associated_item_def_ids(impl_id) { - let item_path = tcx.def_path_str(*assoc_item); - let last = item_path.split("::").last().unwrap(); - if last == name { - tracing::debug!("Resolved `{name}` in impl block `{}`", tcx.def_path_str(impl_id)); - return Some(*assoc_item); - } - } - tracing::debug!("Unable to resolve `{name}` in impl block `{}`", tcx.def_path_str(impl_id)); - None -} - -/// Does the current module have a (direct) submodule with the given name? -fn has_submodule_with_name(tcx: TyCtxt, current_module: LocalDefId, name: &str) -> bool { - for item_id in tcx.hir().module_items(current_module) { - let item = tcx.hir().item(item_id); - if let ItemKind::Mod(..) = item.kind { - if name == item.ident.as_str() { - return true; - } - } - } - false + tcx.inherent_impls(type_id) + .iter() + .flat_map(|impl_id| tcx.associated_item_def_ids(impl_id)) + .cloned() + .find(|item| { + let item_path = tcx.def_path_str(*item); + let last = item_path.split("::").last().unwrap(); + last == name + }) + .ok_or_else(|| ResolveError::MissingItem { + tcx, + base: type_id, + unresolved: name.to_string(), + }) } diff --git a/kani-compiler/src/kani_middle/stubbing/annotations.rs b/kani-compiler/src/kani_middle/stubbing/annotations.rs index cc4befdcae5d..d03853b1866e 100644 --- a/kani-compiler/src/kani_middle/stubbing/annotations.rs +++ b/kani-compiler/src/kani_middle/stubbing/annotations.rs @@ -2,108 +2,63 @@ // SPDX-License-Identifier: Apache-2.0 OR MIT //! This file contains code for extracting stubbing-related attributes. -use rustc_ast::Attribute; +use kani_metadata::Stub; use rustc_data_structures::fx::FxHashMap; -use rustc_driver::RunCompiler; -use rustc_driver::{Callbacks, Compilation}; -use rustc_errors::ErrorGuaranteed; use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_hir::definitions::DefPathHash; -use rustc_interface::interface::Compiler; -use rustc_interface::Queries; use rustc_middle::ty::TyCtxt; -use crate::kani_middle::attributes::{extract_path_arguments, partition_kanitool_attributes}; -use crate::kani_middle::resolve::resolve_path; +use crate::kani_middle::attributes::extract_harness_attributes; +use crate::kani_middle::resolve::resolve_fn; /// Collects the stubs from the harnesses in a crate, running rustc (to /// expansion) with the supplied arguments `rustc_args`. pub fn collect_stub_mappings( - rustc_args: &[String], -) -> Result>, ErrorGuaranteed> { - let mut callbacks = CollectorCallbacks::default(); - let compiler = RunCompiler::new(rustc_args, &mut callbacks); - compiler.run().map(|_| callbacks.stub_mapping) -} - -/// A rustc callback that is used to collect the stub mappings specified for -/// each harness. -#[derive(Default)] -struct CollectorCallbacks { - stub_mapping: FxHashMap>, -} - -impl Callbacks for CollectorCallbacks { - /// The main callback, invoked after the HIR has been created. - fn after_expansion<'tcx>( - &mut self, - _compiler: &Compiler, - queries: &'tcx Queries<'tcx>, - ) -> Compilation { - queries.global_ctxt().unwrap().enter(|tcx| { - for item in tcx.hir_crate_items(()).items() { - let local_def_id = item.owner_id.def_id; - let def_id = local_def_id.to_def_id(); - let (proof, other) = partition_kanitool_attributes(tcx.get_attrs_unchecked(def_id)); - // Ignore anything that is not a harness - if proof.is_empty() { - continue; - } + tcx: TyCtxt, +) -> FxHashMap> { + tcx.hir_crate_items(()) + .items() + .filter_map(|item| { + let local_def_id = item.owner_id.def_id; + let def_id = local_def_id.to_def_id(); + let attributes = extract_harness_attributes(tcx, def_id); + // This currently runs before we validate all items. Abort if any error was found. + tcx.sess.abort_if_errors(); + attributes.map(|attrs| { + // TODO: Use collect instead. let mut stub_pairs = FxHashMap::default(); - for (name, attr) in other { - if name == "stub" { - update_stub_mapping(tcx, local_def_id, attr, &mut stub_pairs); - } + for stubs in attrs.stubs { + update_stub_mapping(tcx, local_def_id, stubs, &mut stub_pairs); } let harness_name = tcx.def_path_str(def_id); - self.stub_mapping.insert(harness_name, stub_pairs); - } - tcx.sess.abort_if_errors(); - // We do not need to continue compilation after we've collected the stub mappings - Compilation::Stop + (harness_name, stub_pairs) + }) }) - } + .collect() } /// Given a `kani::stub` attribute, tries to extract a pair of paths (the /// original function/method, and its stub). Returns `None` and errors if the /// attribute's arguments are not two paths. -fn extract_stubbing_pair( - tcx: TyCtxt, - harness: LocalDefId, - attr: &Attribute, -) -> Option<(DefId, DefId)> { - // Extract the attribute arguments - let args = extract_path_arguments(attr); - if args.len() != 2 { - tcx.sess.span_err( - attr.span, - format!("Attribute `kani::stub` takes two path arguments; found {}", args.len()), - ); - return None; - } - if args.iter().any(|arg| arg.is_none()) { - tcx.sess.span_err( - attr.span, - "Attribute `kani::stub` takes two path arguments; \ - found argument that is not a path", - ); - return None; - } - +fn stub_def_ids(tcx: TyCtxt, harness: LocalDefId, stub: Stub) -> Option<(DefId, DefId)> { // Resolve the attribute arguments to `DefId`s let current_module = tcx.parent_module_from_def_id(harness); let resolve = |name: &str| -> Option { - let maybe_resolved = resolve_path(tcx, current_module, name); - if let Some(def_id) = maybe_resolved { - tracing::debug!(?def_id, "Resolved {name} to {}", tcx.def_path_str(def_id)); - } else { - tcx.sess.span_err(attr.span, format!("unable to resolve function/method: {name}")); + let maybe_resolved = resolve_fn(tcx, current_module, name); + match maybe_resolved { + Ok(def_id) => { + tracing::debug!(?def_id, "Resolved {name} to {}", tcx.def_path_str(def_id)); + Some(def_id) + } + Err(err) => { + tcx.sess + .span_err(tcx.def_span(harness), format!("failed to resolve `{name}`: {err}")); + None + } } - maybe_resolved }; - let orig = resolve(args[0].as_deref().unwrap()); - let stub = resolve(args[1].as_deref().unwrap()); + let orig = resolve(&stub.original); + let stub = resolve(&stub.replacement); Some((orig?, stub?)) } @@ -112,17 +67,17 @@ fn extract_stubbing_pair( fn update_stub_mapping( tcx: TyCtxt, harness: LocalDefId, - attr: &Attribute, + stub: Stub, stub_pairs: &mut FxHashMap, ) { - if let Some((orig_id, stub_id)) = extract_stubbing_pair(tcx, harness, attr) { + if let Some((orig_id, stub_id)) = stub_def_ids(tcx, harness, stub) { let orig_hash = tcx.def_path_hash(orig_id); let stub_hash = tcx.def_path_hash(stub_id); let other_opt = stub_pairs.insert(orig_hash, stub_hash); if let Some(other) = other_opt { if other != stub_hash { tcx.sess.span_err( - attr.span, + tcx.def_span(harness), format!( "duplicate stub mapping: {} mapped to {} and {}", tcx.def_path_str(orig_id), diff --git a/kani-compiler/src/kani_middle/stubbing/transform.rs b/kani-compiler/src/kani_middle/stubbing/transform.rs index b8d543aaa8b9..a609e5e9b0b6 100644 --- a/kani-compiler/src/kani_middle/stubbing/transform.rs +++ b/kani-compiler/src/kani_middle/stubbing/transform.rs @@ -113,7 +113,7 @@ fn check_compatibility<'a, 'tcx>( const RUSTC_ARG_PREFIX: &str = "kani_stubs="; /// Serializes the stub mapping into a rustc argument. -pub fn mk_rustc_arg(stub_mapping: FxHashMap) -> String { +pub fn mk_rustc_arg(stub_mapping: &FxHashMap) -> String { // Serialize each `DefPathHash` as a pair of `u64`s, and the whole mapping // as an association list. let mut pairs = Vec::new(); diff --git a/kani-compiler/src/main.rs b/kani-compiler/src/main.rs index 4207df95ad4a..8336fe8173ca 100644 --- a/kani-compiler/src/main.rs +++ b/kani-compiler/src/main.rs @@ -13,6 +13,7 @@ #![feature(once_cell)] #![feature(rustc_private)] #![feature(more_qualified_paths)] +#![feature(iter_intersperse)] extern crate rustc_ast; extern crate rustc_codegen_ssa; extern crate rustc_data_structures; @@ -31,282 +32,27 @@ extern crate tempfile; #[cfg(feature = "cprover")] mod codegen_cprover_gotoc; +mod kani_compiler; mod kani_middle; mod parser; mod session; mod unsound_experiments; -use crate::kani_middle::stubbing; -use crate::parser::KaniCompilerParser; -use crate::session::init_session; -use clap::ArgMatches; -use kani_queries::{QueryDb, ReachabilityType, UserInput}; -use rustc_data_structures::fx::FxHashMap; -use rustc_driver::{Callbacks, RunCompiler}; -use rustc_hir::definitions::DefPathHash; -use std::ffi::OsStr; -use std::path::PathBuf; -use std::rc::Rc; -use std::{env, fs}; - -/// This function generates all rustc configurations required by our goto-c codegen. -fn rustc_gotoc_flags(lib_path: &str) -> Vec { - // The option below provides a mechanism by which definitions in the - // standard library can be overriden. See - // https://rust-lang.zulipchat.com/#narrow/stream/182449-t-compiler.2Fhelp/topic/.E2.9C.94.20Globally.20override.20an.20std.20macro/near/268873354 - // for more details. - let kani_std_rlib = PathBuf::from(lib_path).join("libstd.rlib"); - let kani_std_wrapper = format!("noprelude:std={}", kani_std_rlib.to_str().unwrap()); - let args = vec![ - "-C", - "overflow-checks=on", - "-C", - "panic=abort", - "-Z", - "unstable-options", - "-Z", - "panic_abort_tests=yes", - "-Z", - "trim-diagnostic-paths=no", - "-Z", - "human_readable_cgu_names", - "-Z", - "always-encode-mir", - "--cfg=kani", - "-Z", - "crate-attr=feature(register_tool)", - "-Z", - "crate-attr=register_tool(kanitool)", - "-L", - lib_path, - "--extern", - "kani", - "--extern", - kani_std_wrapper.as_str(), - ]; - args.iter().map(|s| s.to_string()).collect() -} +use rustc_driver::{RunCompiler, TimePassesCallbacks}; +use std::env; +use std::process::ExitCode; /// Main function. Configure arguments and run the compiler. -fn main() -> Result<(), &'static str> { - let args = parser::command_arguments(&env::args().collect()); - let matches = parser::parser().get_matches_from(args); - init_session(&matches); - - // Configure queries. - let mut queries = QueryDb::default(); - queries.set_emit_vtable_restrictions(matches.get_flag(parser::RESTRICT_FN_PTRS)); - queries.set_check_assertion_reachability(matches.get_flag(parser::ASSERTION_REACH_CHECKS)); - queries.set_output_pretty_json(matches.get_flag(parser::PRETTY_OUTPUT_FILES)); - queries.set_ignore_global_asm(matches.get_flag(parser::IGNORE_GLOBAL_ASM)); - queries.set_reachability_analysis(matches.reachability_type()); - #[cfg(feature = "unsound_experiments")] - crate::unsound_experiments::arg_parser::add_unsound_experiment_args_to_queries( - &mut queries, - &matches, - ); - - // Generate rustc args. - let mut rustc_args = generate_rustc_args(&matches); - - // If appropriate, collect and set the stub mapping. - if matches.get_flag(parser::ENABLE_STUBBING) - && queries.get_reachability_analysis() == ReachabilityType::Harnesses - { - queries.set_stubbing_enabled(true); - let all_stub_mappings = - stubbing::collect_stub_mappings(&rustc_args).or(Err("Failed to compile crate"))?; - let harness = matches.get_one::(parser::HARNESS).unwrap(); - let mapping = find_harness_stub_mapping(harness, all_stub_mappings).unwrap_or_default(); - rustc_args.push(stubbing::mk_rustc_arg(mapping)); - } +fn main() -> ExitCode { + session::init_panic_hook(); + let (kani_compiler, rustc_args) = parser::is_kani_compiler(env::args().collect()); // Configure and run compiler. - let mut callbacks = KaniCallbacks {}; - let mut compiler = RunCompiler::new(&rustc_args, &mut callbacks); - if matches.get_flag("goto-c") { - if cfg!(feature = "cprover") { - compiler.set_make_codegen_backend(Some(Box::new(move |_cfg| { - Box::new(codegen_cprover_gotoc::GotocCodegenBackend::new(&Rc::new(queries))) - }))); - } else { - return Err("Kani was configured without 'cprover' feature. You must enable this \ - feature in order to use --goto-c argument."); - } - } - compiler.run().or(Err("Failed to compile crate.")) -} - -/// Empty struct since we don't support any callbacks yet. -struct KaniCallbacks {} - -/// Use default function implementations. -impl Callbacks for KaniCallbacks {} - -/// The Kani root folder has all binaries inside bin/ and libraries inside lib/. -/// This folder can also be used as a rustc sysroot. -fn kani_root() -> PathBuf { - match env::current_exe() { - Ok(exe_path) => { - let mut path = fs::canonicalize(&exe_path).unwrap_or(exe_path); - // Current folder (bin/) - path.pop(); - // Top folder - path.pop(); - path - } - Err(e) => panic!("Failed to get current exe path: {e}"), - } -} - -/// Generate the arguments to pass to rustc_driver. -fn generate_rustc_args(args: &ArgMatches) -> Vec { - let mut rustc_args = vec![String::from("rustc")]; - if args.get_flag(parser::GOTO_C) { - let mut default_path = kani_root(); - if args.reachability_type() == ReachabilityType::Legacy { - default_path.push("legacy-lib") - } else { - default_path.push("lib"); - } - let gotoc_args = rustc_gotoc_flags( - args.get_one::(parser::KANI_LIB) - .unwrap_or(&default_path.to_str().unwrap().to_string()), - ); - rustc_args.extend_from_slice(&gotoc_args); - } - - if args.get_flag(parser::RUSTC_VERSION) { - rustc_args.push(String::from("--version")) - } - - if args.get_flag(parser::JSON_OUTPUT) { - rustc_args.push(String::from("--error-format=json")); - } - - if let Some(extra_flags) = args.get_raw(parser::RUSTC_OPTIONS) { - extra_flags.for_each(|arg| rustc_args.push(convert_arg(arg))); - } - let sysroot = sysroot_path(args); - rustc_args.push(String::from("--sysroot")); - rustc_args.push(convert_arg(sysroot.as_os_str())); - tracing::debug!(?rustc_args, "Compile"); - rustc_args -} - -/// Convert an argument from OsStr to String. -/// If conversion fails, panic with a custom message. -fn convert_arg(arg: &OsStr) -> String { - arg.to_str().expect(format!("[Error] Cannot parse argument \"{arg:?}\".").as_str()).to_string() -} - -/// Get the sysroot, for our specific version of Rust nightly. -/// -/// Rust normally finds its sysroot by looking at where itself (the `rustc` -/// executable) is located. This will fail for us because we're `kani-compiler` -/// and not located under the rust sysroot. -/// -/// We do know the actual name of the toolchain we need, however. -/// We look for our toolchain in the usual place for rustup. -/// -/// We previously used to pass `--sysroot` in `KANIFLAGS` from `kani-driver`, -/// but this failed to have effect when building a `build.rs` file. -/// This wasn't used anywhere but passing down here, so we've just migrated -/// the code to find the sysroot path directly into this function. -/// -/// This function will soon be removed. -#[deprecated] -fn toolchain_sysroot_path() -> PathBuf { - // If we're installed normally, we'll find `$KANI/toolchain` as a symlink to our desired toolchain - { - let kani_root = kani_root(); - let toolchain_path = kani_root.join("toolchain"); - if toolchain_path.exists() { - return toolchain_path; - } - } - - // rustup sets some environment variables during build, but this is not clearly documented. - // https://github.com/rust-lang/rustup/blob/master/src/toolchain.rs (search for RUSTUP_HOME) - // We're using RUSTUP_TOOLCHAIN here, which is going to be set by our `rust-toolchain.toml` file. - // This is a *compile-time* constant, not a dynamic lookup at runtime, so this is reliable. - let toolchain = env!("RUSTUP_TOOLCHAIN"); - - // We use the home crate to do a *runtime* determination of where rustup toolchains live - let rustup = home::rustup_home().expect("Couldn't find RUSTUP_HOME"); - let path = rustup.join("toolchains").join(toolchain); - - if !path.exists() { - panic!("Couldn't find Kani Rust toolchain {toolchain}. Tried: {}", path.display()); - } - path -} - -/// Get the sysroot relative to the binary location. -/// -/// Kani uses a custom sysroot. The `std` library and dependencies are compiled in debug mode and -/// include the entire MIR definitions needed by Kani. -/// -/// We do provide a `--sysroot` option that users may want to use instead. -#[allow(deprecated)] -fn sysroot_path(args: &ArgMatches) -> PathBuf { - let sysroot_arg = args.get_one::(parser::SYSROOT); - let path = if let Some(s) = sysroot_arg { - PathBuf::from(s) - } else if args.reachability_type() == ReachabilityType::Legacy || !args.get_flag(parser::GOTO_C) - { - toolchain_sysroot_path() + if kani_compiler { + kani_compiler::run(rustc_args) } else { - kani_root() - }; - - if !path.exists() { - panic!("Couldn't find Kani Rust toolchain {:?}.", path.display()); - } - tracing::debug!(?path, ?sysroot_arg, "Sysroot path."); - path -} - -/// Find the stub mapping for the given harness. -/// -/// This function is necessary because Kani currently allows a harness to be -/// specified by a partially qualified name, whereas stub mappings use fully -/// qualified names. -fn find_harness_stub_mapping( - harness: &str, - stub_mappings: FxHashMap>, -) -> Option> { - let suffix = String::from("::") + harness; - for (name, mapping) in stub_mappings { - if name == harness || name.ends_with(&suffix) { - return Some(mapping); - } - } - None -} - -#[cfg(test)] -mod args_test { - use super::*; - use crate::parser; - #[cfg(unix)] - #[test] - #[should_panic] - fn test_invalid_arg_fails() { - use std::ffi::OsString; - use std::os::unix::ffi::OsStrExt; - - // The value 0x80 is an invalid character. - let source = [0x68, 0x65, 0x6C, 0x6C, 0x80]; - let os_str = OsStr::from_bytes(&source[..]); - assert_eq!(os_str.to_str(), None); - - let matches = parser::parser().get_matches_from(vec![ - OsString::from("kani-compiler").as_os_str(), - OsString::from("--sysroot").as_os_str(), - OsString::from("any").as_os_str(), - os_str, - ]); - generate_rustc_args(&matches); + let mut callbacks = TimePassesCallbacks::default(); + let compiler = RunCompiler::new(&rustc_args, &mut callbacks); + if compiler.run().is_err() { ExitCode::FAILURE } else { ExitCode::SUCCESS } } } diff --git a/kani-compiler/src/parser.rs b/kani-compiler/src/parser.rs index 4efc8bca2844..55cee3e478ac 100644 --- a/kani-compiler/src/parser.rs +++ b/kani-compiler/src/parser.rs @@ -1,11 +1,9 @@ // Copyright Kani Contributors // SPDX-License-Identifier: Apache-2.0 OR MIT -use clap::value_parser; use clap::{builder::PossibleValuesParser, command, Arg, ArgAction, ArgMatches, Command}; use kani_queries::ReachabilityType; use std::env; -use std::ffi::OsString; use std::str::FromStr; use strum::VariantNames as _; @@ -36,12 +34,11 @@ pub const PRETTY_OUTPUT_FILES: &str = "pretty-json-files"; /// Option used for suppressing global ASM error. pub const IGNORE_GLOBAL_ASM: &str = "ignore-global-asm"; -/// Option name used to override the sysroot. -pub const SYSROOT: &str = "sysroot"; +/// Option used to write JSON symbol tables instead of GOTO binaries. +pub const WRITE_JSON_SYMTAB: &str = "write-json-symtab"; /// Option name used to select which reachability analysis to perform. pub const REACHABILITY: &str = "reachability"; -pub const REACHABILITY_FLAG: &str = "--reachability"; /// Option name used to specify which harness is the target. pub const HARNESS: &str = "harness"; @@ -49,27 +46,9 @@ pub const HARNESS: &str = "harness"; /// Option name used to enable stubbing. pub const ENABLE_STUBBING: &str = "enable-stubbing"; -/// Option name used to pass extra rustc-options. -pub const RUSTC_OPTIONS: &str = "rustc-options"; - -pub const RUSTC_VERSION: &str = "rustc-version"; - -/// Environmental variable used to retrieve extra Kani command arguments. -const KANIFLAGS_ENV_VAR: &str = "KANIFLAGS"; - -/// Flag used to indicated that we should retrieve more arguments from `KANIFLAGS' env variable. -const KANI_ARGS_FLAG: &str = "--kani-flags"; - /// Configure command options for the Kani compiler. pub fn parser() -> Command { let app = command!() - .disable_version_flag(true) - .arg( - Arg::new("kani-compiler-version") - .short('?') - .action(ArgAction::Version) - .help("Gets `kani-compiler` version."), - ) .arg( Arg::new(KANI_LIB) .long(KANI_LIB) @@ -113,32 +92,6 @@ pub fn parser() -> Command { .help("Restrict the targets of virtual table function pointer calls.") .action(ArgAction::SetTrue), ) - .arg( - Arg::new(SYSROOT) - .long(SYSROOT) - .help("Override the system root.") - .long_help( - "The \"sysroot\" is the location where Kani will look for the Rust \ - distribution.", - ) - .action(ArgAction::Set), - ) - .arg( - // TODO: Move this to a cargo wrapper. This should return kani version. - Arg::new(RUSTC_VERSION) - .short('V') - .long("version") - .help("Gets underlying rustc version.") - .action(ArgAction::SetTrue), - ) - .arg( - Arg::new(RUSTC_OPTIONS) - .help("Arguments to be passed down to rustc.") - .trailing_var_arg(true) // This allow us to fwd commands to rustc. - .allow_hyphen_values(true) - .value_parser(value_parser!(OsString)) // Allow invalid UTF-8 - .action(ArgAction::Append), - ) .arg( Arg::new(ASSERTION_REACH_CHECKS) .long(ASSERTION_REACH_CHECKS) @@ -166,6 +119,12 @@ pub fn parser() -> Command { .help("Suppress error due to the existence of global_asm in a crate") .action(ArgAction::SetTrue), ) + .arg( + Arg::new(WRITE_JSON_SYMTAB) + .long(WRITE_JSON_SYMTAB) + .help("Instruct the compiler to produce GotoC symbol tables in JSON format instead of GOTO binary format.") + .action(ArgAction::SetTrue), + ) .arg( // TODO: Remove this argument once stubbing works for multiple harnesses at a time. // @@ -173,7 +132,7 @@ pub fn parser() -> Command { .long(HARNESS) .help("Selects the harness to target.") .value_name("HARNESS") - .action(ArgAction::Set), + .action(ArgAction::Append), ) .arg( Arg::new(ENABLE_STUBBING) @@ -181,6 +140,12 @@ pub fn parser() -> Command { .help("Instruct the compiler to perform stubbing.") .requires(HARNESS) .action(ArgAction::SetTrue), + ) + .arg( + Arg::new("check-version") + .long("check-version") + .action(ArgAction::Set) + .help("Pass the kani version to the compiler to ensure cache coherence."), ); #[cfg(feature = "unsound_experiments")] let app = crate::unsound_experiments::arg_parser::add_unsound_experiments_to_parser(app); @@ -199,41 +164,28 @@ impl KaniCompilerParser for ArgMatches { } } -/// Retrieves the arguments from the command line and process hack to incorporate CARGO arguments. +/// Return whether we should run our flavour of the compiler, and which arguments to pass to rustc. /// -/// The kani-compiler requires the flags related to the kani libraries to be -/// in front of the ones that control rustc. +/// We add a `--kani-compiler` argument to run the Kani version of the compiler, which needs to be +/// filtered out before passing the arguments to rustc. /// -/// For cargo kani, cargo sometimes adds flags before the custom RUSTFLAGS, hence, -/// we use a special environment variable to set Kani specific flags. These flags -/// should only be enabled if --kani-flags is present. -/// FIXME: Remove this hack once we use cargo build-plan instead. -pub fn command_arguments(args: &Vec) -> Vec { +/// All other Kani arguments are today located inside `--llvm-args`. +pub fn is_kani_compiler(args: Vec) -> (bool, Vec) { assert!(!args.is_empty(), "Arguments should always include executable name"); - let has_kani_flags = args.iter().any(|arg| arg.eq(KANI_ARGS_FLAG)); - if has_kani_flags { - let mut filter_args = vec![KANI_ARGS_FLAG]; - let mut new_args: Vec = Vec::new(); - new_args.push(args[0].clone()); - // For cargo kani, --reachability is included as a rustc argument. - let reachability = args.iter().find(|arg| arg.starts_with(REACHABILITY_FLAG)); - if let Some(value) = reachability { - new_args.push(value.clone()); - filter_args.push(value) - } - // Add all the other kani specific arguments are inside $KANIFLAGS. - let env_flags = env::var(KANIFLAGS_ENV_VAR).unwrap_or_default(); - new_args.extend( - shell_words::split(&env_flags) - .expect(&format!("Cannot parse {KANIFLAGS_ENV_VAR} value '{env_flags}'")), - ); - // Add the leftover arguments for rustc at the end. - new_args - .extend(args[1..].iter().filter(|&arg| !filter_args.contains(&arg.as_str())).cloned()); - new_args - } else { - args.clone() - } + const KANI_COMPILER: &str = "--kani-compiler"; + let mut has_kani_compiler = false; + let new_args = args + .into_iter() + .filter(|arg| { + if arg == KANI_COMPILER { + has_kani_compiler = true; + false + } else { + true + } + }) + .collect(); + (has_kani_compiler, new_args) } #[cfg(test)] @@ -274,18 +226,19 @@ mod parser_test { fn test_cargo_kani_hack_noop() { let args = ["kani-compiler", "some/path"]; let args = args.map(String::from); - let new_args = command_arguments(&Vec::from(args.clone())); + let (is_kani, new_args) = is_kani_compiler(Vec::from(args.clone())); assert_eq!(args.as_slice(), new_args.as_slice()); + assert!(!is_kani); } #[test] fn test_cargo_kani_hack_no_args() { - env::remove_var(KANIFLAGS_ENV_VAR); - let args = ["kani-compiler", "some/path", "--kani-flags"]; + let args = ["kani_compiler", "some/path", "--kani-compiler"]; let args = args.map(String::from); - let new_args = command_arguments(&Vec::from(args.clone())); - assert_eq!(new_args.len(), 2, "New args should not include --kani-flags"); + let (is_kani, new_args) = is_kani_compiler(Vec::from(args.clone())); + assert_eq!(new_args.len(), 2, "New args should not include --kani-compiler"); assert_eq!(new_args[0], args[0]); assert_eq!(new_args[1], args[1]); + assert!(is_kani); } } diff --git a/kani-compiler/src/session.rs b/kani-compiler/src/session.rs index 460cffe364f4..9b83d55a8c34 100644 --- a/kani-compiler/src/session.rs +++ b/kani-compiler/src/session.rs @@ -5,6 +5,10 @@ use crate::parser; use clap::ArgMatches; +use rustc_errors::{ + emitter::Emitter, emitter::HumanReadableErrorType, fallback_fluent_bundle, json::JsonEmitter, + ColorConfig, Diagnostic, +}; use std::panic; use std::str::FromStr; use std::sync::LazyLock; @@ -18,7 +22,7 @@ const LOG_ENV_VAR: &str = "KANI_LOG"; const BUG_REPORT_URL: &str = "https://github.com/model-checking/kani/issues/new?labels=bug&template=bug_report.md"; -// Custom panic hook. +// Custom panic hook when running under user friendly message format. #[allow(clippy::type_complexity)] static PANIC_HOOK: LazyLock) + Sync + Send + 'static>> = LazyLock::new(|| { @@ -30,22 +34,51 @@ static PANIC_HOOK: LazyLock) + Sync + Send + 's // Print the Kani message eprintln!("Kani unexpectedly panicked during compilation."); - eprintln!( - "If you are seeing this message, please file an issue here: {BUG_REPORT_URL}" + eprintln!("Please file an issue here: {BUG_REPORT_URL}"); + })); + hook + }); + +// Custom panic hook when executing under json error format `--error-format=json`. +#[allow(clippy::type_complexity)] +static JSON_PANIC_HOOK: LazyLock) + Sync + Send + 'static>> = + LazyLock::new(|| { + let hook = panic::take_hook(); + panic::set_hook(Box::new(|info| { + // Print stack trace. + let msg = format!("Kani unexpectedly panicked at {info}.",); + let fallback_bundle = + fallback_fluent_bundle(rustc_errors::DEFAULT_LOCALE_RESOURCES, false); + let mut emitter = JsonEmitter::basic( + false, + HumanReadableErrorType::Default(ColorConfig::Never), + None, + fallback_bundle, + None, + false, + false, ); + let diagnostic = Diagnostic::new(rustc_errors::Level::Bug, msg); + emitter.emit_diagnostic(&diagnostic); + (*JSON_PANIC_HOOK)(info); })); hook }); /// Initialize compiler session. -pub fn init_session(args: &ArgMatches) { +pub fn init_session(args: &ArgMatches, json_hook: bool) { // Initialize the rustc logger using value from RUSTC_LOG. We keep the log control separate // because we cannot control the RUSTC log format unless if we match the exact tracing // version used by RUSTC. - rustc_driver::init_rustc_env_logger(); + // TODO: Enable rustc log when we upgrade the toolchain. + // + // + // rustc_driver::init_rustc_env_logger(); - // Kani panic hook. - init_panic_hook(); + // Install Kani panic hook. + if json_hook { + json_panic_hook() + } // Kani logger initialization. init_logger(args); @@ -90,7 +123,12 @@ fn hier_logs(args: &ArgMatches, filter: EnvFilter) { tracing::subscriber::set_global_default(subscriber).unwrap(); } -fn init_panic_hook() { +pub fn init_panic_hook() { // Install panic hook LazyLock::force(&PANIC_HOOK); // Install ice hook } + +pub fn json_panic_hook() { + // Install panic hook + LazyLock::force(&JSON_PANIC_HOOK); // Install ice hook +} diff --git a/kani-compiler/src/unsound_experiments/arg_parser.rs b/kani-compiler/src/unsound_experiments/arg_parser.rs index 075c2e29edb3..0fc179f4f69a 100644 --- a/kani-compiler/src/unsound_experiments/arg_parser.rs +++ b/kani-compiler/src/unsound_experiments/arg_parser.rs @@ -17,6 +17,7 @@ pub fn add_unsound_experiments_to_parser(app: Command) -> Command { } pub fn add_unsound_experiment_args_to_queries(queries: &mut QueryDb, matches: &ArgMatches) { - queries.get_unsound_experiments().lock().unwrap().zero_init_vars = - matches.get_flag(ZERO_INIT_VARS); + let mut experiments = queries.get_unsound_experiments(); + experiments.zero_init_vars = matches.get_flag(ZERO_INIT_VARS); + queries.set_unsound_experiments(experiments); } diff --git a/kani-compiler/src/unsound_experiments/zero_init.rs b/kani-compiler/src/unsound_experiments/zero_init.rs index 2b3cbba4d84e..60f1fb6e34f0 100644 --- a/kani-compiler/src/unsound_experiments/zero_init.rs +++ b/kani-compiler/src/unsound_experiments/zero_init.rs @@ -15,7 +15,7 @@ impl<'tcx> GotocCtx<'tcx> { /// Otherwise, returns `None` which leaves the variable uninitilized. /// In CBMC, this translates to a NONDET value. pub fn codegen_default_initializer(&mut self, e: &Expr) -> Option { - if self.queries.get_unsound_experiments().lock().unwrap().zero_init_vars { + if self.queries.get_unsound_experiments().zero_init_vars { Some(e.typ().zero_initializer(&self.symbol_table)) } else { None @@ -35,7 +35,7 @@ impl<'tcx> GotocCtx<'tcx> { if layout.is_zst() || dst_type.sizeof_in_bits(&self.symbol_table) == 0 { // We ignore assignment for all zero size types Stmt::skip(loc) - } else if self.queries.get_unsound_experiments().lock().unwrap().zero_init_vars { + } else if self.queries.get_unsound_experiments().zero_init_vars { let init = goto_place.typ().zero_initializer(&self.symbol_table); goto_place.assign(init, loc) } else { diff --git a/kani-dependencies b/kani-dependencies index 44cd15ce74f5..e78a9da0bfbc 100644 --- a/kani-dependencies +++ b/kani-dependencies @@ -1,4 +1,4 @@ -CBMC_VERSION="5.75.0" +CBMC_VERSION="5.78.0" # If you update this version number, remember to bump it in `src/setup.rs` too CBMC_VIEWER_VERSION="3.8" KISSAT_VERSION="3.0.0" diff --git a/kani-driver/Cargo.toml b/kani-driver/Cargo.toml index efab38d19d28..f70c11ce6509 100644 --- a/kani-driver/Cargo.toml +++ b/kani-driver/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "kani-driver" -version = "0.19.0" +version = "0.23.0" edition = "2021" description = "Build a project with Kani and run all proof harnesses" license = "MIT OR Apache-2.0" @@ -24,17 +24,21 @@ console = "0.15.1" once_cell = "1.13.0" serde = { version = "1", features = ["derive"] } serde_json = "1" -clap = { version = "4.0.26", features = ["derive"] } +clap = { version = "4.1.3", features = ["derive"] } glob = "0.3" -toml = "0.5" +toml = "0.7" regex = "1.6" rustc-demangle = "0.1.21" pathdiff = "0.2.1" rayon = "1.5.3" comfy-table = "6.0.0" +strum = {version = "0.24.0"} +strum_macros = {version = "0.24.0"} tracing = {version = "0.1", features = ["max_level_trace", "release_max_level_debug"]} tracing-subscriber = {version = "0.3.8", features = ["env-filter", "json", "fmt"]} tracing-tree = "0.2.2" +rand = "0.8" +which = "4.4.0" # A good set of suggested dependencies can be found in rustup: # https://github.com/rust-lang/rustup/blob/master/Cargo.toml diff --git a/kani-driver/src/args.rs b/kani-driver/src/args.rs index bdaf871506a6..5707571fd565 100644 --- a/kani-driver/src/args.rs +++ b/kani-driver/src/args.rs @@ -4,10 +4,17 @@ #[cfg(feature = "unsound_experiments")] use crate::unsound_experiments::UnsoundExperimentArgs; use crate::util::warning; +use kani_metadata::CbmcSolver; -use clap::{error::Error, error::ErrorKind, CommandFactory, ValueEnum}; +use clap::builder::{PossibleValue, TypedValueParser}; +use clap::{ + error::ContextKind, error::ContextValue, error::Error, error::ErrorKind, CommandFactory, + ValueEnum, +}; use std::ffi::OsString; use std::path::PathBuf; +use std::str::FromStr; +use strum::VariantNames; // By default we configure CBMC to use 16 bits to represent the object bits in pointers. const DEFAULT_OBJECT_BITS: u32 = 16; @@ -106,6 +113,10 @@ pub struct KaniArgs { #[arg(long)] pub target_dir: Option, + /// Force Kani to rebuild all packages before the verification. + #[arg(long)] + pub force_build: bool, + /// Toggle between different styles of output #[arg(long, default_value = "regular", ignore_case = true, value_enum)] pub output_format: OutputFormat, @@ -121,9 +132,15 @@ pub struct KaniArgs { /// This is an unstable feature. Consider using --harness instead #[arg(long, hide = true, requires("enable_unstable"))] pub function: Option, - /// Entry point for verification (proof harness) - #[arg(long, conflicts_with = "function")] - pub harness: Option, + /// If specified, only run harnesses that match this filter. This option can be provided + /// multiple times, which will run all tests matching any of the filters. + #[arg( + long = "harness", + conflicts_with = "function", + num_args(1), + value_name = "HARNESS_FILTER" + )] + pub harnesses: Vec, /// Link external C files referenced by Rust code. /// This is an experimental feature and requires `--enable-unstable` to be used @@ -148,8 +165,11 @@ pub struct KaniArgs { #[arg(long)] pub default_unwind: Option, /// Specify the value used for loop unwinding for the specified harness in CBMC - #[arg(long, requires("harness"))] + #[arg(long, requires("harnesses"))] pub unwind: Option, + /// Specify the CBMC solver to use. Overrides the harness `solver` attribute. + #[arg(long, value_parser = CbmcSolverValueParser::new(CbmcSolver::VARIANTS))] + pub solver: Option, /// Pass through directly to CBMC; must be the last flag. /// This feature is unstable and it requires `--enable_unstable` to be used #[arg( @@ -200,6 +220,10 @@ pub struct KaniArgs { #[arg(long, hide_short_help = true, requires("enable_unstable"))] pub ignore_global_asm: bool, + /// Write the GotoC symbol table to a file in JSON format instead of goto binary format. + #[arg(long, hide_short_help = true)] + pub write_json_symtab: bool, + /// Execute CBMC's sanity checks to ensure the goto-program we generate is correct. #[arg(long, hide_short_help = true, requires("enable_unstable"))] pub run_sanity_checks: bool, @@ -208,6 +232,16 @@ pub struct KaniArgs { #[arg(long, hide_short_help = true, requires("enable_unstable"))] pub no_slice_formula: bool, + /// Synthesize loop contracts for all loops. + #[arg( + long, + hide_short_help = true, + requires("enable_unstable"), + conflicts_with("unwind"), + conflicts_with("default_unwind") + )] + pub synthesize_loop_contracts: bool, + /// Randomize the layout of structures. This option can help catching code that relies on /// a specific layout chosen by the compiler that is not guaranteed to be stable in the future. /// If a value is given, it will be used as the seed for randomization @@ -222,7 +256,7 @@ pub struct KaniArgs { long, hide_short_help = true, requires("enable_unstable"), - requires("harness"), + requires("harnesses"), conflicts_with("concrete_playback") )] pub enable_stubbing: bool, @@ -468,6 +502,14 @@ impl KaniArgs { ); } + if self.visualize && !self.enable_unstable { + return Err(Error::raw( + ErrorKind::MissingRequiredArgument, + "Missing argument: --visualize now requires --enable-unstable + due to open issues involving incorrect results.", + )); + } + if self.mir_linker { self.print_deprecated("--mir-linker"); } @@ -549,6 +591,61 @@ impl KaniArgs { } } +/// clap parser for `CbmcSolver` +#[derive(Clone, Debug)] +pub struct CbmcSolverValueParser(Vec); + +impl CbmcSolverValueParser { + pub fn new(values: impl Into) -> Self { + values.into() + } +} + +impl TypedValueParser for CbmcSolverValueParser { + type Value = CbmcSolver; + + fn parse_ref( + &self, + cmd: &clap::builder::Command, + arg: Option<&clap::builder::Arg>, + value: &std::ffi::OsStr, + ) -> Result { + let value = value.to_str().unwrap(); + // `value` is one of the possible `CbmcSolver` values or `bin=` + let segments: Vec<&str> = value.split('=').collect(); + + let mut err = clap::Error::new(ErrorKind::InvalidValue).with_cmd(cmd); + err.insert(ContextKind::InvalidArg, ContextValue::String(arg.unwrap().to_string())); + err.insert(ContextKind::InvalidValue, ContextValue::String(value.to_string())); + + if segments.len() == 2 { + if segments[0] != "bin" { + return Err(err); + } + return Ok(CbmcSolver::Binary(segments[1].into())); + } else if segments.len() == 1 { + let solver = CbmcSolver::from_str(value); + return solver.map_err(|_| err); + } + Err(err) + } + + /// Used for the help message + fn possible_values(&self) -> Option + '_>> { + Some(Box::new(self.0.iter().cloned())) + } +} + +impl From for CbmcSolverValueParser +where + I: IntoIterator, + T: Into, +{ + fn from(values: I) -> Self { + Self(values.into_iter().map(|t| t.into()).collect()) + } +} + #[cfg(test)] mod tests { use clap::Parser; @@ -577,6 +674,25 @@ mod tests { .unwrap(); // no assertion: the above might fail if it fails to allow 0 args to cbmc-args } + + /// Ensure users can pass multiple harnesses options and that the value is accumulated. + #[test] + fn check_multiple_harnesses() { + let args = + StandaloneArgs::try_parse_from("kani input.rs --harness a --harness b".split(" ")) + .unwrap(); + assert_eq!(args.common_opts.harnesses, vec!["a".to_owned(), "b".to_owned()]); + } + + #[test] + fn check_multiple_harnesses_without_flag_fail() { + let result = StandaloneArgs::try_parse_from( + "kani input.rs --harness harness_1 harness_2".split(" "), + ); + assert!(result.is_err()); + assert_eq!(result.unwrap_err().kind(), ErrorKind::UnknownArgument); + } + #[test] fn check_multiple_packages() { // accepts repeated: @@ -590,8 +706,8 @@ mod tests { ]); // BUG: should not accept sequential: // Related: https://github.com/model-checking/kani/issues/2025 - // Currently asserting this backwards from how it should be! - assert!(!b.is_err()); + // This assert should ideally return an error, and the assertion should instead be assert!(b.is_err()) + assert!(b.is_ok()); } fn check(args: &str, require_unstable: bool, pred: fn(StandaloneArgs) -> bool) { diff --git a/kani-driver/src/args_toml.rs b/kani-driver/src/args_toml.rs index 14b5d34d3d18..c6ab7dc65c28 100644 --- a/kani-driver/src/args_toml.rs +++ b/kani-driver/src/args_toml.rs @@ -106,7 +106,8 @@ fn toml_to_args(tomldata: &str) -> Result<(Vec, Vec)> { for (flag, value) in map { if flag == "cbmc-args" { // --cbmc-args has to come last because it eats all remaining arguments - insert_arg_from_toml(&flag, &value, &mut cbmc_args)?; + cbmc_args.push("--cbmc-args".into()); + cbmc_args.append(&mut cbmc_arg_from_toml(&value)?); } else { insert_arg_from_toml(&flag, &value, &mut args)?; } @@ -129,9 +130,9 @@ fn insert_arg_from_toml(flag: &str, value: &Value, args: &mut Vec) -> } } Value::Array(a) => { - args.push(format!("--{flag}").into()); for arg in a { if let Some(arg) = arg.as_str() { + args.push(format!("--{flag}").into()); args.push(arg.into()); } else { bail!("flag {} contains non-string values", flag); @@ -149,6 +150,33 @@ fn insert_arg_from_toml(flag: &str, value: &Value, args: &mut Vec) -> Ok(()) } +/// Translates one toml entry (flag, value) into arguments and inserts it into `args` +fn cbmc_arg_from_toml(value: &Value) -> Result> { + let mut args = vec![]; + const CBMC_FLAG: &str = "--cbmc-args"; + match value { + Value::Boolean(_) => { + bail!("cannot pass boolean value to `{CBMC_FLAG}`") + } + Value::Array(a) => { + for arg in a { + if let Some(arg) = arg.as_str() { + args.push(arg.into()); + } else { + bail!("flag {CBMC_FLAG} contains non-string values"); + } + } + } + Value::String(s) => { + args.push(s.into()); + } + _ => { + bail!("Unknown key type {CBMC_FLAG}"); + } + } + Ok(args) +} + /// Take 'a.b.c' and turn it into 'start['a']['b']['c']' reliably, and interpret the result as a table fn get_table<'a>(start: &'a Value, table: &str) -> Option<&'a Table> { let mut current = start; diff --git a/kani-driver/src/assess/args.rs b/kani-driver/src/assess/args.rs index 38143c992a08..ccb69910229b 100644 --- a/kani-driver/src/assess/args.rs +++ b/kani-driver/src/assess/args.rs @@ -28,6 +28,9 @@ pub enum AssessSubcommand { /// `cargo kani assess scan` subcommand arguments #[derive(Debug, Parser)] pub struct ScanArgs { + // TODO: When assess scan is invoked using `--existing-only`, it should check if the Kani version + // from the existing metadata files matches the current version. Otherwise, the results may no + // longer hold. /// Don't run assess on found packages, just re-analyze the results from a previous run #[arg(long, hide = true)] pub existing_only: bool, diff --git a/kani-driver/src/assess/metadata.rs b/kani-driver/src/assess/metadata.rs index ab43e6d1fd47..122261ce66bb 100644 --- a/kani-driver/src/assess/metadata.rs +++ b/kani-driver/src/assess/metadata.rs @@ -27,6 +27,11 @@ use super::AssessArgs; /// This is not a stable interface. #[derive(Serialize, Deserialize)] pub struct AssessMetadata { + /// Kani version that was used to generate the metadata. + #[serde(rename = "kani_version")] + pub version: String, + /// Contains an error message in cases where it failed the execution. + pub error: Option, /// Report on the presence of `codegen_unimplemented` in the analyzed packages pub unsupported_features: TableBuilder, /// Report of the reasons why tests could not be analyzed by Kani @@ -35,32 +40,62 @@ pub struct AssessMetadata { pub promising_tests: TableBuilder, } +impl AssessMetadata { + pub fn new( + unsupported_features: TableBuilder, + failure_reasons: TableBuilder, + promising_tests: TableBuilder, + ) -> AssessMetadata { + AssessMetadata { + version: env!("CARGO_PKG_VERSION").to_string(), + error: None, + unsupported_features, + failure_reasons, + promising_tests, + } + } + + pub fn from_error(err: &dyn std::error::Error) -> AssessMetadata { + let error = Some(SessionError { + root_cause: err.source().map(|e| format!("{e:#}")), + msg: err.to_string(), + }); + AssessMetadata { + version: env!("CARGO_PKG_VERSION").to_string(), + error, + unsupported_features: TableBuilder::new(), + failure_reasons: TableBuilder::new(), + promising_tests: TableBuilder::new(), + } + } + pub fn empty() -> AssessMetadata { + AssessMetadata { + version: env!("CARGO_PKG_VERSION").to_string(), + error: None, + unsupported_features: TableBuilder::new(), + failure_reasons: TableBuilder::new(), + promising_tests: TableBuilder::new(), + } + } +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct SessionError { + pub root_cause: Option, + pub msg: String, +} + /// If given the argument to so do, write the assess metadata to the target file. -pub(crate) fn write_metadata(args: &AssessArgs, build: AssessMetadata) -> Result<()> { +pub(crate) fn write_metadata(args: &AssessArgs, metadata: AssessMetadata) -> Result<()> { if let Some(path) = &args.emit_metadata { let out_file = File::create(&path)?; let writer = BufWriter::new(out_file); // use pretty for now to keep things readable and debuggable, but this should change eventually - serde_json::to_writer_pretty(writer, &build)?; + serde_json::to_writer_pretty(writer, &metadata)?; } Ok(()) } -/// Write metadata with unsupported features only, supporting the `--only-codegen` option. -pub(crate) fn write_partial_metadata( - args: &AssessArgs, - unsupported_features: TableBuilder, -) -> Result<()> { - write_metadata( - args, - AssessMetadata { - unsupported_features, - failure_reasons: TableBuilder::new(), - promising_tests: TableBuilder::new(), - }, - ) -} - /// Read assess metadata from a file. pub(crate) fn read_metadata(path: &Path) -> Result { // this function already exists, but a proxy here helps find it :) @@ -72,11 +107,7 @@ pub(crate) fn read_metadata(path: &Path) -> Result { /// This is not a complicated operation, because the assess metadata structure is meant /// to accomodate multiple packages already, so we're just "putting it together". pub(crate) fn aggregate_metadata(metas: Vec) -> AssessMetadata { - let mut result = AssessMetadata { - unsupported_features: TableBuilder::new(), - failure_reasons: TableBuilder::new(), - promising_tests: TableBuilder::new(), - }; + let mut result = AssessMetadata::empty(); for meta in metas { for item in meta.unsupported_features.build() { result.unsupported_features.add(item.clone()); diff --git a/kani-driver/src/assess/mod.rs b/kani-driver/src/assess/mod.rs index 424e052eef06..f1105e4ca15c 100644 --- a/kani-driver/src/assess/mod.rs +++ b/kani-driver/src/assess/mod.rs @@ -1,9 +1,11 @@ // Copyright Kani Contributors // SPDX-License-Identifier: Apache-2.0 OR MIT -use anyhow::Result; +use self::metadata::{write_metadata, AssessMetadata}; +use anyhow::{bail, Result}; use kani_metadata::KaniMetadata; +use crate::assess::table_builder::TableBuilder; use crate::metadata::merge_kani_metadata; use crate::project; use crate::session::KaniSession; @@ -21,16 +23,28 @@ mod table_unsupported_features; /// `cargo kani assess` main entry point. /// /// See -pub(crate) fn cargokani_assess_main(mut session: KaniSession, args: AssessArgs) -> Result<()> { +pub(crate) fn run_assess(session: KaniSession, args: AssessArgs) -> Result<()> { if let Some(args::AssessSubcommand::Scan(args)) = &args.command { return scan::assess_scan_main(session, args); } + let result = assess_project(session); + match result { + Ok(metadata) => write_metadata(&args, metadata), + Err(err) => { + let metadata = AssessMetadata::from_error(err.as_ref()); + write_metadata(&args, metadata)?; + Err(err.context("Failed to assess project")) + } + } +} + +fn assess_project(mut session: KaniSession) -> Result { // Fix (as in "make unchanging/unchangable") some settings. // This is a temporary hack to make things work, until we get around to refactoring how arguments // work generally in kani-driver. These arguments, for instance, are all prepended to the subcommand, // which is not a nice way of taking arguments. - session.args.unwind = Some(1); + session.args.unwind = Some(session.args.default_unwind.unwrap_or(1)); session.args.tests = true; session.args.output_format = crate::args::OutputFormat::Terse; session.codegen_tests = true; @@ -40,7 +54,7 @@ pub(crate) fn cargokani_assess_main(mut session: KaniSession, args: AssessArgs) session.args.jobs = Some(None); // -j, num_cpu } - let project = project::cargo_project(&session)?; + let project = project::cargo_project(&session, true)?; let cargo_metadata = project.cargo_metadata.as_ref().expect("built with cargo"); let packages_metadata = if project.merged_artifacts { @@ -58,7 +72,15 @@ pub(crate) fn cargokani_assess_main(mut session: KaniSession, args: AssessArgs) // It would also be interesting to classify them by whether they build without warnings or not. // Tracking for the latter: https://github.com/model-checking/kani/issues/1758 - println!("Found {} packages", packages_metadata.len()); + let build_fail = project.failed_targets.as_ref().unwrap(); + match (build_fail.len(), packages_metadata.len()) { + (0, 0) => println!("No relevant data was found."), + (0, succeeded) => println!("Analyzed {succeeded} packages"), + (_failed, 0) => bail!("Failed to build all targets"), + (failed, succeeded) => { + println!("Analyzed {succeeded} packages. Failed to build {failed} targets",) + } + } let metadata = merge_kani_metadata(packages_metadata.clone()); let unsupported_features = table_unsupported_features::build(&packages_metadata); @@ -69,13 +91,16 @@ pub(crate) fn cargokani_assess_main(mut session: KaniSession, args: AssessArgs) } if session.args.only_codegen { - metadata::write_partial_metadata(&args, unsupported_features)?; - return Ok(()); + return Ok(AssessMetadata::new( + unsupported_features, + TableBuilder::new(), + TableBuilder::new(), + )); } // Done with the 'cargo-kani' part, now we're going to run *test* harnesses instead of proof: - let harnesses = metadata.test_harnesses; - let runner = crate::harness_runner::HarnessRunner { sess: &session, project }; + let harnesses = Vec::from_iter(metadata.test_harnesses.iter()); + let runner = crate::harness_runner::HarnessRunner { sess: &session, project: &project }; let results = runner.check_all_harnesses(&harnesses)?; @@ -95,12 +120,7 @@ pub(crate) fn cargokani_assess_main(mut session: KaniSession, args: AssessArgs) let promising_tests = table_promising_tests::build(&results); println!("{}", promising_tests.render()); - metadata::write_metadata( - &args, - metadata::AssessMetadata { unsupported_features, failure_reasons, promising_tests }, - )?; - - Ok(()) + Ok(AssessMetadata::new(unsupported_features, failure_reasons, promising_tests)) } /// Merges a collection of Kani metadata by figuring out which package each belongs to, from cargo metadata. @@ -149,9 +169,11 @@ fn reconstruct_metadata_structure( ) } } - let mut merged = crate::metadata::merge_kani_metadata(package_artifacts); - merged.crate_name = package.name.clone(); - package_metas.push(merged); + if !package_artifacts.is_empty() { + let mut merged = crate::metadata::merge_kani_metadata(package_artifacts); + merged.crate_name = package.name.clone(); + package_metas.push(merged); + } } if !remaining_metas.is_empty() { let remaining_names: Vec<_> = remaining_metas.into_iter().map(|x| x.crate_name).collect(); diff --git a/kani-driver/src/assess/scan.rs b/kani-driver/src/assess/scan.rs index dbe43f211f02..ddf18076a68e 100644 --- a/kani-driver/src/assess/scan.rs +++ b/kani-driver/src/assess/scan.rs @@ -8,10 +8,12 @@ use std::process::Command; use std::time::Instant; use anyhow::Result; +use cargo_metadata::Package; use crate::session::KaniSession; use super::args::ScanArgs; +use super::metadata::AssessMetadata; use super::metadata::{aggregate_metadata, read_metadata}; /// `cargo kani assess scan` is not a normal invocation of `cargo kani`: we don't directly build anything. @@ -106,17 +108,21 @@ pub(crate) fn assess_scan_main(session: KaniSession, args: &ScanArgs) -> Result< invoke_assess(&session, name, manifest, &outfile, &logfile) }; - if result.is_err() { - println!("Failed: {name}"); - failed_packages.push(package); - } else { - let meta = read_metadata(&outfile); - if let Ok(meta) = meta { - success_metas.push(meta); - } else { + let meta = read_metadata(&outfile); + if let Ok(meta) = meta { + if meta.error.is_some() { println!("Failed: {name}"); - failed_packages.push(package); + // Some execution error that we have collected. + failed_packages.push((package, Some(meta))) + } else { + success_metas.push(meta); } + } else { + println!("Failed: {name}"); + failed_packages.push(( + package, + result.err().map(|err| AssessMetadata::from_error(err.as_ref())), + )); } //TODO: cargo clean? println!( @@ -134,7 +140,9 @@ pub(crate) fn assess_scan_main(session: KaniSession, args: &ScanArgs) -> Result< failed_packages.len() ); let results = aggregate_metadata(success_metas); + print_failures(failed_packages); println!("{}", results.unsupported_features.render()); + if !session.args.only_codegen { println!("{}", results.failure_reasons.render()); println!("{}", results.promising_tests.render()); @@ -203,3 +211,22 @@ fn scan_cargo_projects(path: PathBuf, accumulator: &mut Vec) { } } } + +/// Print failures if any happened. +fn print_failures(mut failures: Vec<(&Package, Option)>) { + if !failures.is_empty() { + println!("Failed to assess packages:"); + let unknown = "Unknown".to_string(); + failures.sort_by_key(|(pkg, _)| &pkg.name); + for (pkg, meta) in failures { + println!( + " - `{}`: {}", + pkg.name, + meta.as_ref().map_or(&unknown, |md| { + md.error.as_ref().map_or(&unknown, |error| &error.msg) + }), + ); + } + println!(); + } +} diff --git a/kani-driver/src/assess/table_failure_reasons.rs b/kani-driver/src/assess/table_failure_reasons.rs index d4bedca118d9..9836aaa871cd 100644 --- a/kani-driver/src/assess/table_failure_reasons.rs +++ b/kani-driver/src/assess/table_failure_reasons.rs @@ -35,14 +35,19 @@ pub(crate) fn build(results: &[HarnessResult]) -> TableBuilder = failures.into_iter().map(|p| p.property_class()).collect(); - classes.sort(); - classes.dedup(); - classes.join(" + ") + let failures = r.result.failed_properties(); + if failures.is_empty() { + "none (success)".to_string() + } else { + let mut classes: Vec<_> = + failures.into_iter().map(|p| p.property_class()).collect(); + classes.sort(); + classes.dedup(); + classes.join(" + ") + } }; let name = r.harness.pretty_name.trim_end_matches("::{closure#0}").to_string(); diff --git a/kani-driver/src/assess/table_promising_tests.rs b/kani-driver/src/assess/table_promising_tests.rs index 7fd46fc9e7c8..8a84959a3b9b 100644 --- a/kani-driver/src/assess/table_promising_tests.rs +++ b/kani-driver/src/assess/table_promising_tests.rs @@ -29,7 +29,7 @@ use super::table_builder::{ColumnType, RenderableTableRow, TableBuilder, TableRo pub(crate) fn build(results: &[HarnessResult]) -> TableBuilder { let mut builder = TableBuilder::new(); - for r in results { + for r in results.iter().filter(|res| res.result.results.is_ok()) { // For now we're just reporting "successful" harnesses as candidates. // In the future this heuristic should be expanded. More data is required to do this, however. if r.result.failed_properties().is_empty() { diff --git a/kani-driver/src/call_cargo.rs b/kani-driver/src/call_cargo.rs index d8f46951cf9c..31e32f7c8004 100644 --- a/kani-driver/src/call_cargo.rs +++ b/kani-driver/src/call_cargo.rs @@ -2,12 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 OR MIT use crate::args::KaniArgs; -use crate::session::{KaniSession, ReachabilityMode}; +use crate::call_single_file::to_rustc_arg; +use crate::project::Artifact; +use crate::session::KaniSession; +use crate::util; use anyhow::{bail, Context, Result}; -use cargo_metadata::{Metadata, MetadataCommand, Package}; -use std::ffi::OsString; -use std::fs; -use std::path::{Path, PathBuf}; +use cargo_metadata::diagnostic::{Diagnostic, DiagnosticLevel}; +use cargo_metadata::{Message, Metadata, MetadataCommand, Package, Target}; +use kani_metadata::{ArtifactType, CompilerArtifactStub}; +use std::ffi::{OsStr, OsString}; +use std::fmt::{self, Display}; +use std::fs::{self, File}; +use std::io::BufReader; +use std::path::PathBuf; use std::process::Command; use tracing::{debug, trace}; @@ -26,19 +33,17 @@ pub struct CargoOutputs { /// The directory where compiler outputs should be directed. /// Usually 'target/BUILD_TRIPLE/debug/deps/' pub outdir: PathBuf, - /// The collection of *.symtab.json files written. - pub symtabs: Vec, - /// The location of vtable restrictions files (a directory of *.restrictions.json) - pub restrictions: Option, /// The kani-metadata.json files written by kani-compiler. - pub metadata: Vec, + pub metadata: Vec, /// Recording the cargo metadata from the build pub cargo_metadata: Metadata, + /// For build `keep_going` mode, we collect the targets that we failed to compile. + pub failed_targets: Option>, } impl KaniSession { /// Calls `cargo_build` to generate `*.symtab.json` files in `target_dir` - pub fn cargo_build(&self) -> Result { + pub fn cargo_build(&self, keep_going: bool) -> Result { let build_target = env!("TARGET"); // see build.rs let metadata = self.cargo_metadata(build_target)?; let target_dir = self @@ -50,14 +55,12 @@ impl KaniSession { .join("kani"); let outdir = target_dir.join(build_target).join("debug/deps"); - // Clean directory before building since we are unable to handle cache today. - // TODO: https://github.com/model-checking/kani/issues/1736 - if target_dir.exists() { + if self.args.force_build && target_dir.exists() { fs::remove_dir_all(&target_dir)?; } - let mut kani_args = self.kani_specific_flags(); - let rustc_args = self.kani_rustc_flags(); + let mut rustc_args = self.kani_rustc_flags(); + rustc_args.push(to_rustc_arg(self.kani_compiler_flags()).into()); let mut cargo_args: Vec = vec!["rustc".into()]; if let Some(path) = &self.args.cargo.manifest_path { @@ -81,6 +84,10 @@ impl KaniSession { cargo_args.push("--target-dir".into()); cargo_args.push(target_dir.into()); + // Configuration needed to parse cargo compilation status. + cargo_args.push("--message-format".into()); + cargo_args.push("json-diagnostic-rendered-ansi".into()); + if self.args.tests { // Use test profile in order to pull dev-dependencies and compile using `--test`. // Initially the plan was to use `--tests` but that brings in multiple targets. @@ -93,36 +100,39 @@ impl KaniSession { } // Arguments that will only be passed to the target package. - let mut pkg_args: Vec = vec![]; - match self.reachability_mode() { - ReachabilityMode::ProofHarnesses => { - pkg_args.extend(["--".into(), "--reachability=harnesses".into()]); - } - ReachabilityMode::AllPubFns => { - pkg_args.extend(["--".into(), "--reachability=pub_fns".into()]); - } - ReachabilityMode::Tests => { - pkg_args.extend(["--".into(), "--reachability=tests".into()]); - } - } - - // Only joing them at the end. All kani flags must come first. - kani_args.extend_from_slice(&rustc_args); + let mut pkg_args: Vec = vec![]; + pkg_args.extend(["--".to_string(), self.reachability_arg()]); let mut found_target = false; let packages = packages_to_verify(&self.args, &metadata); + let mut artifacts = vec![]; + let mut failed_targets = vec![]; for package in packages { - for target in package_targets(&self.args, package) { + for verification_target in package_targets(&self.args, package) { let mut cmd = Command::new("cargo"); cmd.args(&cargo_args) .args(vec!["-p", &package.name]) - .args(&target.to_args()) + .args(&verification_target.to_args()) .args(&pkg_args) .env("RUSTC", &self.kani_compiler) - .env("RUSTFLAGS", "--kani-flags") - .env("KANIFLAGS", &crate::util::join_osstring(&kani_args, " ")); + // Use CARGO_ENCODED_RUSTFLAGS instead of RUSTFLAGS is preferred. See + // https://doc.rust-lang.org/cargo/reference/environment-variables.html + .env("CARGO_ENCODED_RUSTFLAGS", rustc_args.join(OsStr::new("\x1f"))) + .env("CARGO_TERM_PROGRESS_WHEN", "never"); - self.run_terminal(cmd)?; + match self.run_cargo(cmd, verification_target.target()) { + Err(err) => { + if keep_going { + let target_str = format!("{verification_target}"); + util::error(&format!("Failed to compile {target_str}")); + failed_targets.push(target_str); + } else { + return Err(err); + } + } + Ok(Some(artifact)) => artifacts.push(artifact), + Ok(None) => {} + } found_target = true; } } @@ -132,11 +142,10 @@ impl KaniSession { } Ok(CargoOutputs { - outdir: outdir.clone(), - symtabs: glob(&outdir.join("*.symtab.json"))?, - metadata: glob(&outdir.join("*.kani-metadata.json"))?, - restrictions: self.args.restrict_vtable().then_some(outdir), + outdir, + metadata: artifacts, cargo_metadata: metadata, + failed_targets: keep_going.then_some(failed_targets), }) } @@ -167,15 +176,89 @@ impl KaniSession { cmd.exec().context("Failed to get cargo metadata.") } + + /// Run cargo and collect any error found. + /// We also collect the metadata file generated during compilation if any. + fn run_cargo(&self, cargo_cmd: Command, target: &Target) -> Result> { + let support_color = atty::is(atty::Stream::Stdout); + let mut artifact = None; + if let Some(mut cargo_process) = self.run_piped(cargo_cmd)? { + let reader = BufReader::new(cargo_process.stdout.take().unwrap()); + let mut error_count = 0; + for message in Message::parse_stream(reader) { + let message = message.unwrap(); + match message { + Message::CompilerMessage(msg) => match msg.message.level { + DiagnosticLevel::FailureNote => { + print_msg(&msg.message, support_color)?; + } + DiagnosticLevel::Error => { + error_count += 1; + print_msg(&msg.message, support_color)?; + } + DiagnosticLevel::Ice => { + print_msg(&msg.message, support_color)?; + let _ = cargo_process.wait(); + return Err(anyhow::Error::msg(msg.message).context(format!( + "Failed to compile `{}` due to an internal compiler error.", + msg.target.name + ))); + } + _ => { + if !self.args.quiet { + print_msg(&msg.message, support_color)?; + } + } + }, + Message::CompilerArtifact(rustc_artifact) => { + if rustc_artifact.target == *target { + debug_assert!( + artifact.is_none(), + "expected only one artifact for `{target:?}`", + ); + artifact = Some(rustc_artifact); + } + } + Message::BuildScriptExecuted(_) | Message::BuildFinished(_) => { + // do nothing + } + Message::TextLine(msg) => { + if !self.args.quiet { + println!("{msg}"); + } + } + + // Non-exhaustive enum. + _ => { + if !self.args.quiet { + println!("{message:?}"); + } + } + } + } + let status = cargo_process.wait()?; + if !status.success() { + bail!( + "Failed to execute cargo ({status}). Found {error_count} compilation errors." + ); + } + } + // We generate kani specific artifacts only for the build target. The build target is + // always the last artifact generated in a build, and all the other artifacts are related + // to dependencies or build scripts. Hence, we need to invoke `map_kani_artifact` only + // for the last compiler artifact. + Ok(artifact.and_then(map_kani_artifact)) + } } -/// Given a `path` with glob characters in it (e.g. `*.json`), return a vector of matching files -fn glob(path: &Path) -> Result> { - let results = glob::glob(path.to_str().context("Non-UTF-8 path enountered")?)?; - // the logic to turn "Iter>" into "Result, E>" doesn't play well - // with anyhow, so a type annotation is required - let v: core::result::Result, glob::GlobError> = results.collect(); - Ok(v?) +/// Print the compiler message following the coloring schema. +fn print_msg(diagnostic: &Diagnostic, use_rendered: bool) -> Result<()> { + if use_rendered { + print!("{diagnostic}"); + } else { + print!("{}", console::strip_ansi_codes(diagnostic.rendered.as_ref().unwrap())); + } + Ok(()) } /// Extract the packages that should be verified. @@ -209,20 +292,80 @@ fn packages_to_verify<'b>(args: &KaniArgs, metadata: &'b Metadata) -> Vec<&'b Pa packages } +/// Extract Kani artifact that might've been generated from a given rustc artifact. +/// Not every rustc artifact will map to a kani artifact, hence the `Option<>`. +/// +/// Unfortunately, we cannot always rely on the messages to get the path for the original artifact +/// that `rustc` produces. So we hack the content of the output path to point to the original +/// metadata file. See for more details. +fn map_kani_artifact(rustc_artifact: cargo_metadata::Artifact) -> Option { + debug!(?rustc_artifact, "map_kani_artifact"); + if rustc_artifact.target.is_custom_build() { + // We don't verify custom builds. + return None; + } + let result = rustc_artifact.filenames.iter().find_map(|path| { + if path.extension() == Some("rmeta") { + let file_stem = path.file_stem()?.strip_prefix("lib")?; + let parent = + path.parent().map(|p| p.as_std_path().to_path_buf()).unwrap_or(PathBuf::new()); + let mut meta_path = parent.join(file_stem); + meta_path.set_extension(ArtifactType::Metadata); + trace!(rmeta=?path, kani_meta=?meta_path.display(), "map_kani_artifact"); + + // This will check if the file exists and we just skip if it doesn't. + Artifact::try_new(&meta_path, ArtifactType::Metadata).ok() + } else if path.extension() == Some("rlib") { + // We skip `rlib` files since we should also generate a `rmeta`. + trace!(rlib=?path, "map_kani_artifact"); + None + } else { + // For all the other cases we write the path of the metadata into the output file. + // The compiler should always write a valid stub into the artifact file, however the + // kani-metadata file only exists if there were valid targets. + trace!(artifact=?path, "map_kani_artifact"); + let input_file = File::open(path).ok()?; + let stub: CompilerArtifactStub = serde_json::from_reader(input_file).unwrap(); + Artifact::try_new(&stub.metadata_path, ArtifactType::Metadata).ok() + } + }); + debug!(?result, "map_kani_artifact"); + result +} + /// Possible verification targets. +#[derive(Debug)] enum VerificationTarget { - Bin(String), - Lib, - Test(String), + Bin(Target), + Lib(Target), + Test(Target), } impl VerificationTarget { /// Convert to cargo argument that select the specific target. fn to_args(&self) -> Vec { match self { - VerificationTarget::Test(name) => vec![String::from("--test"), name.clone()], - VerificationTarget::Bin(name) => vec![String::from("--bin"), name.clone()], - VerificationTarget::Lib => vec![String::from("--lib")], + VerificationTarget::Test(target) => vec![String::from("--test"), target.name.clone()], + VerificationTarget::Bin(target) => vec![String::from("--bin"), target.name.clone()], + VerificationTarget::Lib(_) => vec![String::from("--lib")], + } + } + + fn target(&self) -> &Target { + match self { + VerificationTarget::Test(target) + | VerificationTarget::Bin(target) + | VerificationTarget::Lib(target) => target, + } + } +} + +impl Display for VerificationTarget { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + VerificationTarget::Test(target) => write!(f, "test `{}`", target.name), + VerificationTarget::Bin(target) => write!(f, "binary `{}`", target.name), + VerificationTarget::Lib(target) => write!(f, "lib `{}`", target.name), } } } @@ -251,7 +394,7 @@ fn package_targets(args: &KaniArgs, package: &Package) -> Vec { // Binary targets. - verification_targets.push(VerificationTarget::Bin(target.name.clone())); + verification_targets.push(VerificationTarget::Bin(target.clone())); } CRATE_TYPE_LIB | CRATE_TYPE_RLIB | CRATE_TYPE_CDYLIB | CRATE_TYPE_DYLIB | CRATE_TYPE_STATICLIB => { @@ -264,7 +407,7 @@ fn package_targets(args: &KaniArgs, package: &Package) -> Vec { // Test target. if args.tests { - verification_targets.push(VerificationTarget::Test(target.name.clone())); + verification_targets.push(VerificationTarget::Test(target.clone())); } else { ignored_tests.push(target.name.as_str()); } @@ -280,7 +423,7 @@ fn package_targets(args: &KaniArgs, package: &Package) -> Vec verification_targets.push(VerificationTarget::Lib), + (true, false) => verification_targets.push(VerificationTarget::Lib(target.clone())), (_, _) => {} } } diff --git a/kani-driver/src/call_cbmc.rs b/kani-driver/src/call_cbmc.rs index d94329fa10d1..2acaf5a7c02f 100644 --- a/kani-driver/src/call_cbmc.rs +++ b/kani-driver/src/call_cbmc.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 OR MIT use anyhow::{bail, Result}; -use kani_metadata::HarnessMetadata; +use kani_metadata::{CbmcSolver, HarnessMetadata}; use std::ffi::OsString; use std::fmt::Write; use std::path::Path; @@ -30,12 +30,11 @@ pub struct VerificationResult { /// The parsed output, message by message, of CBMC. However, the `Result` message has been /// removed and is available in `results` instead. pub messages: Option>, - /// The `Result` properties in detail. - pub results: Option>, - /// CBMC process exit status. NOTE: Only potentially useful if `status` is `Failure`. + /// The `Result` properties in detail or the exit_status of CBMC. + /// Note: CBMC process exit status is only potentially useful if `status` is `Failure`. /// Kani will see CBMC report "failure" that's actually success (interpreting "failed" /// checks like coverage as expected and desirable.) - pub exit_status: i32, + pub results: Result, i32>, /// The runtime duration of this CBMC invocation. pub runtime: Duration, /// Whether concrete playback generated a test @@ -67,20 +66,17 @@ impl KaniSession { // Spawn the CBMC process and process its output below let cbmc_process_opt = self.run_piped(cmd)?; - if let Some(cbmc_process) = cbmc_process_opt { - let output = process_cbmc_output(cbmc_process, |i| { - kani_cbmc_output_filter( - i, - self.args.extra_pointer_checks, - &self.args.output_format, - ) - })?; - - VerificationResult::from(output, start_time) - } else { - // None is only ever returned when it's a dry run - VerificationResult::mock_success() - } + let cbmc_process = cbmc_process_opt.ok_or(anyhow::Error::msg("Failed to run cbmc"))?; + let output = process_cbmc_output(cbmc_process, |i| { + kani_cbmc_output_filter( + i, + self.args.extra_pointer_checks, + self.args.quiet, + &self.args.output_format, + ) + })?; + + VerificationResult::from(output, start_time) }; self.gen_and_add_concrete_playback(harness, &mut verification_results)?; @@ -122,6 +118,8 @@ impl KaniSession { args.push(unwind_value.to_string().into()); } + self.handle_solver_args(&harness_metadata.attributes.solver, &mut args)?; + if self.args.run_sanity_checks { args.push("--validate-goto-model".into()); args.push("--validate-ssa-equation".into()); @@ -185,6 +183,46 @@ impl KaniSession { args } + + fn handle_solver_args( + &self, + harness_solver: &Option, + args: &mut Vec, + ) -> Result<()> { + let solver = if let Some(solver) = &self.args.solver { + // `--solver` option takes precedence over attributes + solver + } else if let Some(solver) = harness_solver { + solver + } else { + // Nothing to do + return Ok(()); + }; + + match solver { + CbmcSolver::Cadical => { + args.push("--sat-solver".into()); + args.push("cadical".into()); + } + CbmcSolver::Kissat => { + args.push("--external-sat-solver".into()); + args.push("kissat".into()); + } + CbmcSolver::Minisat => { + // Minisat is currently CBMC's default solver, so no need to + // pass any arguments + } + CbmcSolver::Binary(solver_binary) => { + // Check if the specified binary exists in path + if which::which(solver_binary).is_err() { + bail!("the specified solver \"{solver_binary}\" was not found in path") + } + args.push("--external-sat-solver".into()); + args.push(solver_binary.into()); + } + } + Ok(()) + } } impl VerificationResult { @@ -204,8 +242,7 @@ impl VerificationResult { VerificationResult { status: determine_status_from_properties(&results), messages: Some(items), - results: Some(results), - exit_status: output.process_status, + results: Ok(results), runtime, generated_concrete_test: false, } @@ -214,8 +251,7 @@ impl VerificationResult { VerificationResult { status: VerificationStatus::Failure, messages: Some(items), - results: None, - exit_status: output.process_status, + results: Err(output.process_status), runtime, generated_concrete_test: false, } @@ -226,8 +262,7 @@ impl VerificationResult { VerificationResult { status: VerificationStatus::Success, messages: None, - results: None, - exit_status: 42, // on success, exit code is ignored, so put something weird here + results: Ok(vec![]), runtime: Duration::from_secs(0), generated_concrete_test: false, } @@ -237,36 +272,38 @@ impl VerificationResult { VerificationResult { status: VerificationStatus::Failure, messages: None, - results: None, // on failure, exit codes in theory might be used, // but `mock_failure` should never be used in a context where they will, // so again use something weird: - exit_status: 42, + results: Err(42), runtime: Duration::from_secs(0), generated_concrete_test: false, } } pub fn render(&self, output_format: &OutputFormat) -> String { - if let Some(results) = &self.results { - let show_checks = matches!(output_format, OutputFormat::Regular); - let mut result = format_result(results, show_checks); - writeln!(result, "Verification Time: {}s", self.runtime.as_secs_f32()).unwrap(); - result - } else { - let verification_result = console::style("FAILED").red(); - format!( - "\nCBMC failed with status {}\nVERIFICATION:- {verification_result}\n", - self.exit_status - ) + match &self.results { + Ok(results) => { + let show_checks = matches!(output_format, OutputFormat::Regular); + let mut result = format_result(results, show_checks); + writeln!(result, "Verification Time: {}s", self.runtime.as_secs_f32()).unwrap(); + result + } + Err(exit_status) => { + let verification_result = console::style("FAILED").red(); + format!( + "\nCBMC failed with status {exit_status}\nVERIFICATION:- {verification_result}\n", + ) + } } } /// Find the failed properties from this verification run pub fn failed_properties(&self) -> Vec<&Property> { - if let Some(properties) = &self.results { + if let Ok(properties) = &self.results { properties.iter().filter(|prop| prop.status == CheckStatus::Failure).collect() } else { + debug_assert!(false, "expected error to be handled before invoking this function"); vec![] } } @@ -287,7 +324,7 @@ fn determine_status_from_properties(properties: &[Property]) -> VerificationStat pub fn resolve_unwind_value(args: &KaniArgs, harness_metadata: &HarnessMetadata) -> Option { // Check for which flag is being passed and prioritize extracting unwind from the // respective flag/annotation. - args.unwind.or(harness_metadata.unwind_value).or(args.default_unwind) + args.unwind.or(harness_metadata.attributes.unwind_value).or(args.default_unwind) } #[cfg(test)] diff --git a/kani-driver/src/call_cbmc_viewer.rs b/kani-driver/src/call_cbmc_viewer.rs index 1a4b3061778b..3643b9eda93e 100644 --- a/kani-driver/src/call_cbmc_viewer.rs +++ b/kani-driver/src/call_cbmc_viewer.rs @@ -8,7 +8,7 @@ use std::path::Path; use std::process::Command; use crate::session::KaniSession; -use crate::util::alter_extension; +use crate::util::{alter_extension, warning}; impl KaniSession { /// Run CBMC appropriately to produce 3 output XML files, then run cbmc-viewer on them to produce a report. @@ -20,18 +20,11 @@ impl KaniSession { harness_metadata: &HarnessMetadata, ) -> Result<()> { let results_filename = alter_extension(file, "results.xml"); - let coverage_filename = alter_extension(file, "coverage.xml"); let property_filename = alter_extension(file, "property.xml"); - self.record_temporary_files(&[&results_filename, &coverage_filename, &property_filename]); + self.record_temporary_files(&[&results_filename, &property_filename]); self.cbmc_variant(file, &["--xml-ui", "--trace"], &results_filename, harness_metadata)?; - self.cbmc_variant( - file, - &["--xml-ui", "--cover", "location"], - &coverage_filename, - harness_metadata, - )?; self.cbmc_variant( file, &["--xml-ui", "--show-properties"], @@ -42,8 +35,6 @@ impl KaniSession { let args: Vec = vec![ "--result".into(), results_filename.into(), - "--coverage".into(), - coverage_filename.into(), "--property".into(), property_filename.into(), "--srcdir".into(), @@ -65,6 +56,7 @@ impl KaniSession { // Let the user know if !self.args.quiet { println!("Report written to: {}/html/index.html", report_dir.to_string_lossy()); + warning("coverage information has been disabled for `--visualize` reports"); // If using VS Code with Remote-SSH, suggest an option for remote viewing: if std::env::var("VSCODE_IPC_HOOK_CLI").is_ok() && std::env::var("SSH_CONNECTION").is_ok() diff --git a/kani-driver/src/call_goto_instrument.rs b/kani-driver/src/call_goto_instrument.rs index f1076ee39ef2..58096f1953e4 100644 --- a/kani-driver/src/call_goto_instrument.rs +++ b/kani-driver/src/call_goto_instrument.rs @@ -57,8 +57,9 @@ impl KaniSession { } let c_demangled = alter_extension(output, "demangled.c"); - let symtab = project.get_harness_artifact(&harness, ArtifactType::SymTab).unwrap(); - self.demangle_c(symtab, &c_outfile, &c_demangled)?; + let prett_name_map = + project.get_harness_artifact(&harness, ArtifactType::PrettyNameMap).unwrap(); + self.demangle_c(prett_name_map, &c_outfile, &c_demangled)?; if !self.args.quiet { println!("Demangled GotoC code written to {}", c_demangled.to_string_lossy()) } @@ -165,22 +166,21 @@ impl KaniSession { /// For local variables, it would be more complicated than a simple search and replace to obtain the demangled name. pub fn demangle_c( &self, - symtab_file: &impl AsRef, + pretty_name_map_file: &impl AsRef, c_file: &Path, demangled_file: &Path, ) -> Result<()> { let mut c_code = std::fs::read_to_string(c_file)?; - let reader = BufReader::new(File::open(symtab_file)?); - let symtab: serde_json::Value = serde_json::from_reader(reader)?; - for (_, symbol) in symtab["symbolTable"].as_object().unwrap() { - if let Some(serde_json::Value::String(name)) = symbol.get("name") { - if let Some(serde_json::Value::String(pretty)) = symbol.get("prettyName") { - // Struct names start with "tag-", but this prefix is not used in the GotoC files, so we strip it. - // If there is no such prefix, we leave the name unchanged. - let name = name.strip_prefix("tag-").unwrap_or(name); - if !pretty.is_empty() && pretty != name { - c_code = c_code.replace(name, pretty); - } + let reader = BufReader::new(File::open(pretty_name_map_file)?); + let value: serde_json::Value = serde_json::from_reader(reader)?; + let pretty_name_map = value.as_object().unwrap(); + for (name, pretty_name) in pretty_name_map { + if let Some(pretty_name) = pretty_name.as_str() { + // Struct names start with "tag-", but this prefix is not used in the GotoC files, so we strip it. + // If there is no such prefix, we leave the name unchanged. + let name = name.strip_prefix("tag-").unwrap_or(name); + if !pretty_name.is_empty() && pretty_name != name { + c_code = c_code.replace(name, pretty_name); } } } diff --git a/kani-driver/src/call_goto_synthesizer.rs b/kani-driver/src/call_goto_synthesizer.rs new file mode 100644 index 000000000000..3d6007a6b97e --- /dev/null +++ b/kani-driver/src/call_goto_synthesizer.rs @@ -0,0 +1,39 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +use crate::util::warning; +use anyhow::Result; +use std::ffi::OsString; +use std::path::Path; +use std::process::Command; + +use crate::session::KaniSession; + +impl KaniSession { + /// Synthesize loop contracts for a goto binary `input` and produce a new goto binary `output` + /// The synthesizer we use is `goto-synthesizer` built in CBMC codebase, which is an enumerative + /// loop-contracts synthesizer. `goto-synthesizer` enumerates and checks if a candidate can be + /// used to prove some assertions, and applies found invariants when all checks pass. + pub fn synthesize_loop_contracts(&self, input: &Path, output: &Path) -> Result<()> { + if !self.args.quiet { + println!("Running loop contract synthesizer."); + warning("This process may not terminate."); + warning( + "Loop-contracts synthesizer is not compatible with unwinding bounds. Unwind bounds will be ignored.", + ); + } + + let args: Vec = vec![ + "--loop-contracts-no-unwind".into(), + input.to_owned().into_os_string(), // input + output.to_owned().into_os_string(), // output + ]; + + let mut cmd = Command::new("goto-synthesizer"); + cmd.args(args); + + self.run_suppress(cmd)?; + + Ok(()) + } +} diff --git a/kani-driver/src/call_single_file.rs b/kani-driver/src/call_single_file.rs index b068b0878433..7cd206b28495 100644 --- a/kani-driver/src/call_single_file.rs +++ b/kani-driver/src/call_single_file.rs @@ -6,7 +6,7 @@ use std::ffi::OsString; use std::path::Path; use std::process::Command; -use crate::session::{KaniSession, ReachabilityMode}; +use crate::session::{base_folder, lib_folder, KaniSession}; impl KaniSession { /// Used by `kani` and not `cargo-kani` to process a single Rust file into a `.symtab.json` @@ -17,15 +17,8 @@ impl KaniSession { crate_name: &String, outdir: &Path, ) -> Result<()> { - let mut kani_args = self.kani_specific_flags(); - kani_args.push( - match self.reachability_mode() { - ReachabilityMode::ProofHarnesses => "--reachability=harnesses", - ReachabilityMode::AllPubFns => "--reachability=pub_fns", - ReachabilityMode::Tests => "--reachability=tests", - } - .into(), - ); + let mut kani_args = self.kani_compiler_flags(); + kani_args.push(format!("--reachability={}", self.reachability_mode())); let mut rustc_args = self.kani_rustc_flags(); rustc_args.push(file.into()); @@ -55,7 +48,8 @@ impl KaniSession { // Note that the order of arguments is important. Kani specific flags should precede // rustc ones. let mut cmd = Command::new(&self.kani_compiler); - cmd.args(kani_args).args(rustc_args); + let kani_compiler_args = to_rustc_arg(kani_args); + cmd.arg(kani_compiler_args).args(rustc_args); if self.args.quiet { self.run_suppress(cmd)?; @@ -65,10 +59,14 @@ impl KaniSession { Ok(()) } - /// These arguments are arguments passed to kani-compiler that are `kani` specific. - /// These are also used by call_cargo to pass as the env var KANIFLAGS. - pub fn kani_specific_flags(&self) -> Vec { - let mut flags = vec![OsString::from("--goto-c")]; + /// Create a compiler option that represents the reachability mod. + pub fn reachability_arg(&self) -> String { + to_rustc_arg(vec![format!("--reachability={}", self.reachability_mode())]) + } + + /// These arguments are arguments passed to kani-compiler that are `kani` compiler specific. + pub fn kani_compiler_flags(&self) -> Vec { + let mut flags = vec![check_version()]; if self.args.debug { flags.push("--log-level=debug".into()); @@ -89,23 +87,66 @@ impl KaniSession { flags.push("--ignore-global-asm".into()); } + // Users activate it via the command line switch + if self.args.write_json_symtab { + flags.push("--write-json-symtab".into()); + } + if self.args.enable_stubbing { flags.push("--enable-stubbing".into()); } - if let Some(harness) = &self.args.harness { - flags.push(format!("--harness={harness}").into()); + for harness in &self.args.harnesses { + flags.push(format!("--harness={harness}")); } + // This argument will select the Kani flavour of the compiler. It will be removed before + // rustc driver is invoked. + flags.push("--goto-c".into()); + #[cfg(feature = "unsound_experiments")] flags.extend(self.args.unsound_experiments.process_args()); flags } - /// These arguments are arguments passed to kani-compiler that are `rustc` specific. - /// These are also used by call_cargo to pass as the env var KANIFLAGS. + /// This function generates all rustc configurations required by our goto-c codegen. pub fn kani_rustc_flags(&self) -> Vec { - let mut flags = Vec::::new(); + let lib_path = lib_folder().unwrap(); + let kani_std_rlib = lib_path.join("libstd.rlib"); + let kani_std_wrapper = format!("noprelude:std={}", kani_std_rlib.to_str().unwrap()); + let sysroot = base_folder().unwrap(); + let args = vec![ + "-C", + "overflow-checks=on", + "-C", + "panic=abort", + "-C", + "symbol-mangling-version=v0", + "-Z", + "unstable-options", + "-Z", + "panic_abort_tests=yes", + "-Z", + "trim-diagnostic-paths=no", + "-Z", + "human_readable_cgu_names", + "-Z", + "always-encode-mir", + "--cfg=kani", + "-Z", + "crate-attr=feature(register_tool)", + "-Z", + "crate-attr=register_tool(kanitool)", + "--sysroot", + sysroot.to_str().unwrap(), + "-L", + lib_path.to_str().unwrap(), + "--extern", + "kani", + "--extern", + kani_std_wrapper.as_str(), + ]; + let mut flags: Vec<_> = args.iter().map(OsString::from).collect(); if self.args.use_abs { flags.push("-Z".into()); flags.push("force-unstable-if-unmarked=yes".into()); // ?? @@ -124,15 +165,38 @@ impl KaniSession { } } - flags.push("-C".into()); - flags.push("symbol-mangling-version=v0".into()); - // e.g. compiletest will set 'compile-flags' here and we should pass those down to rustc // and we fail in `tests/kani/Match/match_bool.rs` if let Ok(str) = std::env::var("RUSTFLAGS") { flags.extend(str.split(' ').map(OsString::from)); } + // This argument will select the Kani flavour of the compiler. It will be removed before + // rustc driver is invoked. + flags.push("--kani-compiler".into()); + flags } } + +/// This function can be used to convert Kani compiler specific arguments into a rustc one. +/// We currently pass Kani specific arguments using the `--llvm-args` structure which is the +/// hacky mechanism used by other rustc backend to receive arguments unknown to rustc. +/// +/// Note that Cargo caching mechanism takes the building context into consideration, which +/// includes the value of the rust flags. By using `--llvm-args`, we ensure that Cargo takes into +/// consideration all arguments that are used to configure Kani compiler. For example, enabling the +/// reachability checks will force recompilation if they were disabled in previous build. +/// For more details on this caching mechanism, see the +/// [fingerprint documentation](https://github.com/rust-lang/cargo/blob/82c3bb79e3a19a5164e33819ef81bfc2c984bc56/src/cargo/core/compiler/fingerprint/mod.rs) +pub fn to_rustc_arg(kani_args: Vec) -> String { + format!(r#"-Cllvm-args={}"#, kani_args.join(" ")) +} + +/// Function that returns a `--check-version` argument to be added to the compiler flags. +/// This is really just used to force the compiler to recompile everything from scratch when a user +/// upgrades Kani. Cargo currently ignores the codegen backend version. +/// See for more context. +fn check_version() -> String { + format!("--check-version={}", env!("CARGO_PKG_VERSION")) +} diff --git a/kani-driver/src/cbmc_output_parser.rs b/kani-driver/src/cbmc_output_parser.rs index ada0e66c34f1..70e13b7dda06 100644 --- a/kani-driver/src/cbmc_output_parser.rs +++ b/kani-driver/src/cbmc_output_parser.rs @@ -35,6 +35,8 @@ use std::os::unix::process::ExitStatusExt; use std::path::PathBuf; use std::process::{Child, ChildStdout}; +const RESULT_ITEM_PREFIX: &str = " {\n \"result\":"; + /// A parser item is a top-level unit of output from the CBMC json format. /// See the parser for more information on how they are processed. #[derive(Debug, Deserialize)] @@ -57,6 +59,17 @@ pub enum ParserItem { }, } +/// Struct that is equivalent to `ParserItem::Result`. +/// +/// Note: this struct is only used to provide better error messages when there +/// are issues deserializing a `ParserItem::Result`. See `Parser::parse_item` +/// for more details. +#[allow(unused)] +#[derive(Debug, Deserialize)] +struct ResultStruct { + result: Vec, +} + /// Struct that represents a single property in the set of CBMC results. /// /// Note: `reach` is not part of the parsed data, but it's useful to annotate @@ -285,7 +298,7 @@ pub struct TraceValue { pub name: String, pub binary: Option, pub data: Option, - pub width: Option, + pub width: Option, } /// Enum that represents a trace data item. @@ -420,6 +433,25 @@ impl<'a, 'b> Parser<'a, 'b> { if let Ok(item) = result_item { return item; } + // If we failed to parse a `ParserItem::Result` earlier, we will get + // this error message when we attempt to parse it using the complete + // string: + // ``` + // thread '' panicked at 'called `Result::unwrap()` on an `Err` value: + // Error("data did not match any variant of untagged enum ParserItem", line: 0, column: 0)' + // ``` + // This error message doesn't provide information about what went wrong + // while parsing due to `ParserItem` being an untagged enum. A more + // informative error message will be produced if we attempt to + // deserialize it into a struct. The attempt will still fail, but it + // shouldn't be hard to debug with that information. The same strategy + // can be used for other `ParserItem` variants, but they're normally + // easier to debug. + if string_without_delimiter.starts_with(RESULT_ITEM_PREFIX) { + let result_item: Result = + serde_json::from_str(string_without_delimiter); + result_item.unwrap(); + } let complete_string = &self.input_so_far[0..self.input_so_far.len()]; let result_item: Result = serde_json::from_str(complete_string); result_item.unwrap() @@ -681,4 +713,45 @@ mod tests { serde_json::from_str(prop_id_string); let _prop_id = prop_id_result.unwrap(); } + + #[test] + fn check_trace_value_deserialization_works() { + let data = format!( + r#"{{ + "binary": "{:0>1000}", + "data": "0", + "name": "integer", + "type": "unsigned __CPROVER_bitvector[960]", + "width": 960 + }}"#, + 0 + ); + let trace_value: Result = serde_json::from_str(&data); + assert!(trace_value.is_ok()); + } + + /// Checks that a valid CBMC "result" item can be deserialized into a + /// `ParserItem` or `ResultStruct`. + #[test] + fn check_result_deserialization_works() { + let data = r#"{ + "result": [ + { + "description": "assertion failed: 1 > 2", + "property": "long_function_name.assertion.1", + "sourceLocation": { + "column": "16", + "file": "/home/ubuntu/file.rs", + "function": "long_function_name", + "line": "815" + }, + "status": "SUCCESS" + } + ] + }"#; + let parser_item: Result = serde_json::from_str(&data); + let result_struct: Result = serde_json::from_str(&data); + assert!(parser_item.is_ok()); + assert!(result_struct.is_ok()); + } } diff --git a/kani-driver/src/cbmc_property_renderer.rs b/kani-driver/src/cbmc_property_renderer.rs index 7de0350992b9..9b2d58a59160 100644 --- a/kani-driver/src/cbmc_property_renderer.rs +++ b/kani-driver/src/cbmc_property_renderer.rs @@ -168,6 +168,7 @@ impl ParserItem { pub fn kani_cbmc_output_filter( item: ParserItem, extra_ptr_checks: bool, + quiet: bool, output_format: &OutputFormat, ) -> Option { // Some items (e.g., messages) are skipped. @@ -178,9 +179,11 @@ pub fn kani_cbmc_output_filter( let processed_item = process_item(item, extra_ptr_checks); // Both formatting and printing could be handled by objects which // implement a trait `Printer`. - let formatted_item = format_item(&processed_item, output_format); - if let Some(fmt_item) = formatted_item { - println!("{fmt_item}"); + if !quiet { + let formatted_item = format_item(&processed_item, output_format); + if let Some(fmt_item) = formatted_item { + println!("{fmt_item}"); + } } // TODO: Record processed items and dump them into a JSON file // diff --git a/kani-driver/src/concrete_playback.rs b/kani-driver/src/concrete_playback.rs index fe770986e126..66971603c7b7 100644 --- a/kani-driver/src/concrete_playback.rs +++ b/kani-driver/src/concrete_playback.rs @@ -7,14 +7,15 @@ use crate::args::ConcretePlaybackMode; use crate::call_cbmc::VerificationResult; use crate::session::KaniSession; +use crate::util::tempfile::TempFile; use anyhow::{Context, Result}; use concrete_vals_extractor::{extract_harness_values, ConcreteVal}; use kani_metadata::HarnessMetadata; use std::collections::hash_map::DefaultHasher; use std::ffi::OsString; -use std::fs::{self, File}; +use std::fs::File; use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; +use std::io::{BufRead, BufReader, Write}; use std::path::Path; use std::process::Command; @@ -30,7 +31,7 @@ impl KaniSession { None => return Ok(()), }; - if let Some(result_items) = &verification_result.results { + if let Ok(result_items) = &verification_result.results { match extract_harness_values(result_items) { None => println!( "WARNING: Kani could not produce a concrete playback for `{}` because there \ @@ -38,33 +39,37 @@ impl KaniSession { harness.pretty_name ), Some(concrete_vals) => { - let concrete_playback = - format_unit_test(&harness, &concrete_vals, self.args.randomize_layout); + let pretty_name = harness.get_harness_name_unqualified(); + let generated_unit_test = format_unit_test(&pretty_name, &concrete_vals); match playback_mode { ConcretePlaybackMode::Print => { println!( "Concrete playback unit test for `{}`:\n```\n{}\n```", - &harness.pretty_name, &concrete_playback.unit_test_str + &harness.pretty_name, + &generated_unit_test.code.join("\n") ); println!( "INFO: To automatically add the concrete playback unit test `{}` to the \ src code, run Kani with `--concrete-playback=inplace`.", - &concrete_playback.unit_test_name + &generated_unit_test.name ); } ConcretePlaybackMode::InPlace => { if !self.args.quiet { println!( "INFO: Now modifying the source code to include the concrete playback unit test `{}`.", - &concrete_playback.unit_test_name + &generated_unit_test.name ); } self.modify_src_code( &harness.original_file, harness.original_end_line, - &concrete_playback, + &generated_unit_test, ) - .expect("Failed to modify source code"); + .expect(&format!( + "Failed to modify source code for the file `{}`", + &harness.original_file + )); } } verification_result.generated_concrete_test = true; @@ -79,88 +84,73 @@ impl KaniSession { &self, src_path: &str, proof_harness_end_line: usize, - concrete_playback: &UnitTest, + unit_test: &UnitTest, ) -> Result<()> { - let mut src_file = File::open(src_path) - .with_context(|| format!("Couldn't open user's source code file `{src_path}`"))?; - let mut src_as_str = String::new(); - src_file.read_to_string(&mut src_as_str).with_context(|| { - format!("Couldn't read user's source code file `{src_path}` as a string") - })?; - - // Short circuit if unit test already in source code. - if src_as_str.contains(&concrete_playback.unit_test_name) { - if !self.args.quiet { - println!( - "Concrete playback unit test `{}/{}` already found in source code, so skipping modification.", - src_path, concrete_playback.unit_test_name, - ); - } + let unit_test_already_in_src = + self.add_test_inplace(src_path, proof_harness_end_line, unit_test)?; + + if unit_test_already_in_src { return Ok(()); } - // Split the code into two different parts around the insertion point. - let src_newline_matches: Vec<_> = src_as_str.match_indices('\n').collect(); - // If the proof harness ends on the last line of source code, there won't be a newline. - let insertion_pt = if proof_harness_end_line == src_newline_matches.len() + 1 { - src_as_str.len() - } else { - // Existing newline goes with 2nd src half. We also manually add newline before unit test. - src_newline_matches[proof_harness_end_line - 1].0 - }; - let src_before_concrete_playback = &src_as_str[..insertion_pt]; - let src_after_concrete_playback = &src_as_str[insertion_pt..]; - - // Write new source lines to a tmp file, and then rename it to the actual user's source file. - // Renames are usually automic, so we won't corrupt the user's source file during a crash. - let tmp_src_path = src_path.to_string() + ".concrete_playback_overwrite"; - let mut tmp_src_file = File::create(&tmp_src_path) - .with_context(|| format!("Couldn't create tmp source code file `{tmp_src_path}`"))?; - write!( - tmp_src_file, - "{}\n{}{}", - src_before_concrete_playback, - concrete_playback.unit_test_str, - src_after_concrete_playback - ) - .with_context(|| { - format!("Couldn't write new src str into tmp src file `{tmp_src_path}`") - })?; - fs::rename(&tmp_src_path, src_path).with_context(|| { - format!("Couldn't rename tmp src file `{tmp_src_path}` to actual src file `{src_path}`") - })?; - // Run rustfmt on just the inserted lines. - let source_path = Path::new(src_path); - let parent_dir_as_path = source_path.parent().with_context(|| { - format!("Expected source file `{}` to be in a directory", source_path.display()) - })?; - let parent_dir_as_str = parent_dir_as_path.to_str().with_context(|| { - format!( - "Couldn't convert source file parent directory `{}` from str", - parent_dir_as_path.display() - ) - })?; - let src_file_name_as_osstr = source_path.file_name().with_context(|| { - format!("Couldn't get the file name from the source file `{}`", source_path.display()) - })?; - let src_file_name_as_str = src_file_name_as_osstr.to_str().with_context(|| { - format!( - "Couldn't convert source code file name `{src_file_name_as_osstr:?}` from OsStr to str" - ) - })?; - - let concrete_playback_num_lines = concrete_playback.unit_test_str.matches('\n').count() + 1; + let concrete_playback_num_lines = unit_test.code.len(); let unit_test_start_line = proof_harness_end_line + 1; let unit_test_end_line = unit_test_start_line + concrete_playback_num_lines - 1; + let src_path = Path::new(src_path); + let (path, file_name) = extract_parent_dir_and_src_file(src_path)?; let file_line_ranges = vec![FileLineRange { - file: src_file_name_as_str.to_string(), + file: file_name, line_range: Some((unit_test_start_line, unit_test_end_line)), }]; - self.run_rustfmt(&file_line_ranges, Some(parent_dir_as_str))?; + self.run_rustfmt(&file_line_ranges, Some(&path))?; Ok(()) } + /// Writes the new source code to a user's source file using a tempfile as the means. + /// Returns whether the unit test was already in the old source code. + fn add_test_inplace( + &self, + source_path: &str, + proof_harness_end_line: usize, + unit_test: &UnitTest, + ) -> Result { + // Read from source + let source_file = File::open(source_path).unwrap(); + let source_reader = BufReader::new(source_file); + + // Create temp file + let mut temp_file = TempFile::try_new("concrete_playback.tmp")?; + let mut curr_line_num = 0; + + // Use a buffered reader/writer to generate the unit test line by line + for line in source_reader.lines().flatten() { + if line.contains(&unit_test.name) { + if !self.args.quiet { + println!( + "Concrete playback unit test `{}/{}` already found in source code, so skipping modification.", + source_path, unit_test.name, + ); + } + // the drop impl will take care of flushing and resetting + return Ok(true); + } + curr_line_num += 1; + if let Some(temp_writer) = temp_file.writer.as_mut() { + writeln!(temp_writer, "{line}")?; + if curr_line_num == proof_harness_end_line { + for unit_test_line in unit_test.code.iter() { + curr_line_num += 1; + writeln!(temp_writer, "{unit_test_line}")?; + } + } + } + } + + temp_file.rename(source_path).expect("Could not rename file"); + Ok(false) + } + /// Run rustfmt on the given src file, and optionally on only the specific lines. fn run_rustfmt( &self, @@ -207,15 +197,39 @@ impl KaniSession { } } -/// Generate a unit test from a list of concrete values. -/// `randomize_layout_seed` is `None` when layout is not randomized, -/// `Some(None)` when layout is randomized without seed, and -/// `Some(Some(seed))` when layout is randomized with the seed `seed`. -fn format_unit_test( - harness_metadata: &HarnessMetadata, - concrete_vals: &[ConcreteVal], - randomize_layout_seed: Option>, -) -> UnitTest { +/// Generate a formatted unit test from a list of concrete values. +fn format_unit_test(harness_name: &str, concrete_vals: &[ConcreteVal]) -> UnitTest { + // Hash the concrete values along with the proof harness name. + let mut hasher = DefaultHasher::new(); + harness_name.hash(&mut hasher); + concrete_vals.hash(&mut hasher); + let hash = hasher.finish(); + let func_name = format!("kani_concrete_playback_{harness_name}_{hash}"); + + let func_before_concrete_vals = [ + "#[test]".to_string(), + format!("fn {func_name}() {{"), + format!("{:<4}let concrete_vals: Vec> = vec![", " "), + ] + .into_iter(); + let formatted_concrete_vals = format_concrete_vals(concrete_vals); + let func_after_concrete_vals = [ + format!("{:<4}];", " "), + format!("{:<4}kani::concrete_playback_run(concrete_vals, {harness_name});", " "), + "}".to_string(), + ] + .into_iter(); + + let full_func: Vec<_> = func_before_concrete_vals + .chain(formatted_concrete_vals) + .chain(func_after_concrete_vals) + .collect(); + + UnitTest { code: full_func, name: func_name } +} + +/// Format an initializer expression for a number of concrete values. +fn format_concrete_vals(concrete_vals: &[ConcreteVal]) -> impl Iterator + '_ { /* Given a number of byte vectors, format them as: // interp_concrete_val_1 @@ -223,51 +237,21 @@ fn format_unit_test( // interp_concrete_val_2 vec![concrete_val_2], ... */ - let vec_whitespace = " ".repeat(8); - let vecs_as_str = concrete_vals - .iter() - .map(|concrete_val| { - format!( - "{vec_whitespace}// {}\n{vec_whitespace}vec!{:?}", - concrete_val.interp_val, concrete_val.byte_arr - ) - }) - .collect::>() - .join(",\n"); - let harness_name = &harness_metadata.mangled_name; - let pretty_name = &harness_metadata.get_harness_name_unqualified(); - - // Hash the generated det val string along with the proof harness name. - let mut hasher = DefaultHasher::new(); - harness_name.hash(&mut hasher); - vecs_as_str.hash(&mut hasher); - let hash = hasher.finish(); - - let concrete_playback_func_name = format!("kani_concrete_playback_{pretty_name}_{hash}"); - - let randomize_layout_message = match randomize_layout_seed { - None => String::new(), - Some(None) => { - "// This test has to be run with rustc option: -Z randomize-layout\n ".to_string() - } - Some(Some(seed)) => format!( - "// This test has to be run with rust options: -Z randomize-layout -Z layout-seed={seed}\n ", - ), - }; + concrete_vals.iter().flat_map(|concrete_val| { + [ + format!("{:<8}// {}", " ", concrete_val.interp_val), + format!("{:<8}vec!{:?},", " ", concrete_val.byte_arr), + ] + }) +} - #[rustfmt::skip] - let concrete_playback = format!( -"#[test] -fn {concrete_playback_func_name}() {{ - {randomize_layout_message}\ - let concrete_vals: Vec> = vec![ -{vecs_as_str} - ]; - kani::concrete_playback_run(concrete_vals, {pretty_name}); -}}" - ); - - UnitTest { unit_test_str: concrete_playback, unit_test_name: concrete_playback_func_name } +/// Suppose `src_path` was `/path/to/file.txt`. This function extracts this into `/path/to` and `file.txt`. +fn extract_parent_dir_and_src_file(src_path: &Path) -> Result<(String, String)> { + let parent_dir_as_path = src_path.parent().unwrap(); + let parent_dir = parent_dir_as_path.to_string_lossy().to_string(); + let src_file_name_as_osstr = src_path.file_name(); + let src_file = src_file_name_as_osstr.unwrap().to_string_lossy().to_string(); + Ok((parent_dir, src_file)) } struct FileLineRange { @@ -276,8 +260,8 @@ struct FileLineRange { } struct UnitTest { - unit_test_str: String, - unit_test_name: String, + code: Vec, + name: String, } /// Extract concrete values from the CBMC output processed items. @@ -297,6 +281,7 @@ struct UnitTest { mod concrete_vals_extractor { use crate::cbmc_output_parser::{CheckStatus, Property, TraceItem}; + #[derive(Hash)] pub struct ConcreteVal { pub byte_arr: Vec, pub interp_val: String, @@ -383,3 +368,211 @@ mod concrete_vals_extractor { None } } + +#[cfg(test)] +mod tests { + use super::concrete_vals_extractor::*; + use super::*; + use crate::cbmc_output_parser::{ + CheckStatus, Property, PropertyId, SourceLocation, TraceData, TraceItem, TraceValue, + }; + + /// util function for unit tests taht generates the rustfmt args used for formatting specific lines inside specific files. + /// note - adding this within the test mod because it gives a lint warning without it. + fn rustfmt_args(file_line_ranges: &[FileLineRange]) -> Vec { + let mut args: Vec = Vec::new(); + let mut line_range_dicts: Vec = Vec::new(); + for file_line_range in file_line_ranges { + if let Some((start_line, end_line)) = file_line_range.line_range { + let src_file = &file_line_range.file; + let line_range_dict = + format!("{{\"file\":\"{src_file}\",\"range\":[{start_line},{end_line}]}}"); + line_range_dicts.push(line_range_dict); + } + } + if !line_range_dicts.is_empty() { + // `--file-lines` arg is currently unstable. + args.push("--unstable-features".into()); + args.push("--file-lines".into()); + let line_range_dicts_combined = format!("[{}]", line_range_dicts.join(",")); + args.push(line_range_dicts_combined.into()); + } + for file_line_range in file_line_ranges { + args.push((&file_line_range.file).into()); + } + args + } + + #[test] + fn format_zero_concrete_vals() { + let concrete_vals: [ConcreteVal; 0] = []; + let actual: Vec<_> = format_concrete_vals(&concrete_vals).collect(); + let expected: Vec = Vec::new(); + assert_eq!(actual, expected); + } + + /// Check that the generated unit tests have the right formatting and indentation + #[test] + fn format_two_concrete_vals() { + let concrete_vals = [ + ConcreteVal { byte_arr: vec![0, 0], interp_val: "0".to_string() }, + ConcreteVal { byte_arr: vec![0, 0, 0, 0, 0, 0, 0, 0], interp_val: "0l".to_string() }, + ]; + let actual: Vec<_> = format_concrete_vals(&concrete_vals).collect(); + let expected = vec![ + format!("{:<8}// 0", " "), + format!("{:<8}vec![0, 0],", " "), + format!("{:<8}// 0l", " "), + format!("{:<8}vec![0, 0, 0, 0, 0, 0, 0, 0],", " "), + ]; + assert_eq!(actual, expected); + } + + struct SplitUnitTestName { + before_hash: String, + hash: String, + } + + /// Unit test names are formatted as "kani_concrete_playback_{harness_name}_{hash}". + /// This function splits the name into "kani_concrete_playback_{harness_name}" and "{hash}". + fn split_unit_test_name(unit_test_name: &str) -> SplitUnitTestName { + let underscore_locs: Vec<_> = unit_test_name.match_indices('_').collect(); + let last_underscore_idx = underscore_locs.last().unwrap().0; + SplitUnitTestName { + before_hash: unit_test_name[..last_underscore_idx].to_string(), + hash: unit_test_name[last_underscore_idx + 1..].to_string(), + } + } + + /// Since hashes can not be relied on in tests, this compares all parts of a unit test except the hash. + #[test] + fn format_unit_test_full_func() { + let harness_name = "test_proof_harness"; + let concrete_vals = [ConcreteVal { byte_arr: vec![0, 0], interp_val: "0".to_string() }]; + let unit_test = format_unit_test(harness_name, &concrete_vals); + let full_func = unit_test.code; + let split_unit_test_name = split_unit_test_name(&unit_test.name); + let expected_after_func_name = vec![ + format!("{:<4}let concrete_vals: Vec> = vec![", " "), + format!("{:<8}// 0", " "), + format!("{:<8}vec![0, 0],", " "), + format!("{:<4}];", " "), + format!("{:<4}kani::concrete_playback_run(concrete_vals, {harness_name});", " "), + "}".to_string(), + ]; + + assert_eq!(full_func[0], "#[test]"); + assert_eq!( + split_unit_test_name.before_hash, + format!("kani_concrete_playback_{harness_name}") + ); + assert_eq!(full_func[1], format!("fn {}() {{", unit_test.name)); + assert_eq!(full_func[2..], expected_after_func_name); + } + + /// Generates a unit test and returns its hash. + fn extract_hash_from_unit_test(harness_name: &str, concrete_vals: &[ConcreteVal]) -> String { + let unit_test = format_unit_test(harness_name, concrete_vals); + split_unit_test_name(&unit_test.name).hash + } + + /// Two hashes should not be the same if either the harness_name or the concrete_vals changes. + #[test] + fn check_hashes_are_unique() { + let harness_name_1 = "test_proof_harness1"; + let harness_name_2 = "test_proof_harness2"; + let concrete_vals_1 = [ConcreteVal { byte_arr: vec![0, 0], interp_val: "0".to_string() }]; + let concrete_vals_2 = [ConcreteVal { byte_arr: vec![1, 0], interp_val: "0".to_string() }]; + let concrete_vals_3 = [ConcreteVal { byte_arr: vec![0, 0], interp_val: "1".to_string() }]; + + let hash_base = extract_hash_from_unit_test(harness_name_1, &concrete_vals_1); + let hash_diff_harness_name = extract_hash_from_unit_test(harness_name_2, &concrete_vals_1); + let hash_diff_concrete_byte = extract_hash_from_unit_test(harness_name_1, &concrete_vals_2); + let hash_diff_interp_val = extract_hash_from_unit_test(harness_name_1, &concrete_vals_3); + + assert_ne!(hash_base, hash_diff_harness_name); + assert_ne!(hash_base, hash_diff_concrete_byte); + assert_ne!(hash_base, hash_diff_interp_val); + } + + #[test] + fn check_rustfmt_args_no_line_ranges() { + let file_line_ranges = [FileLineRange { file: "file1".to_string(), line_range: None }]; + let args = rustfmt_args(&file_line_ranges); + let expected: Vec = vec!["file1".into()]; + assert_eq!(args, expected); + } + + #[test] + fn check_rustfmt_args_some_line_ranges() { + let file_line_ranges = [ + FileLineRange { file: "file1".to_string(), line_range: None }, + FileLineRange { file: "path/to/file2".to_string(), line_range: Some((1, 3)) }, + ]; + let args = rustfmt_args(&file_line_ranges); + let expected: Vec = [ + "--unstable-features", + "--file-lines", + "[{\"file\":\"path/to/file2\",\"range\":[1,3]}]", + "file1", + "path/to/file2", + ] + .into_iter() + .map(|arg| arg.into()) + .collect(); + assert_eq!(args, expected); + } + + #[test] + fn check_extract_parent_dir_and_src_file() { + let src_path = "/path/to/file.txt"; + let src_path = Path::new(src_path); + let (path, file_name) = extract_parent_dir_and_src_file(src_path).unwrap(); + assert_eq!(path, "/path/to"); + assert_eq!(file_name, "file.txt"); + } + + /// Test util functions which extract the counter example values from a property. + #[test] + fn check_concrete_vals_extractor() { + let processed_items = [Property { + description: "".to_string(), + property_id: PropertyId { + fn_name: Some("".to_string()), + class: "assertion".to_string(), + id: 1, + }, + status: CheckStatus::Failure, + reach: None, + source_location: SourceLocation { + column: None, + file: None, + function: None, + line: None, + }, + trace: Some(vec![TraceItem { + thread: 0, + step_type: "assignment".to_string(), + hidden: false, + lhs: Some("goto_symex$$return_value".to_string()), + source_location: Some(SourceLocation { + column: None, + file: None, + function: Some("kani::any_raw_internal::".to_string()), + line: None, + }), + value: Some(TraceValue { + name: "".to_string(), + binary: Some("0000001100000001".to_string()), + data: Some(TraceData::NonBool("385".to_string())), + width: Some(16), + }), + }]), + }]; + let concrete_vals = extract_harness_values(&processed_items).unwrap(); + let concrete_val = &concrete_vals[0]; + + assert_eq!(concrete_val.byte_arr, vec![1, 3]); + assert_eq!(concrete_val.interp_val, "385"); + } +} diff --git a/kani-driver/src/harness_runner.rs b/kani-driver/src/harness_runner.rs index 10fcbb10f99f..601b2cadbb27 100644 --- a/kani-driver/src/harness_runner.rs +++ b/kani-driver/src/harness_runner.rs @@ -1,42 +1,43 @@ // Copyright Kani Contributors // SPDX-License-Identifier: Apache-2.0 OR MIT -use anyhow::Result; +use anyhow::{bail, Result}; use kani_metadata::{ArtifactType, HarnessMetadata}; use rayon::prelude::*; +use std::cmp::Ordering; use std::path::Path; use crate::args::OutputFormat; use crate::call_cbmc::{VerificationResult, VerificationStatus}; use crate::project::Project; use crate::session::KaniSession; -use crate::util::specialized_harness_name; +use crate::util::{error, specialized_harness_name, warning}; /// A HarnessRunner is responsible for checking all proof harnesses. The data in this structure represents /// "background information" that the controlling driver (e.g. cargo-kani or kani) computed. /// /// This struct is basically just a nicer way of passing many arguments to [`Self::check_all_harnesses`] -pub(crate) struct HarnessRunner<'sess> { +pub(crate) struct HarnessRunner<'sess, 'pr> { /// The underlying kani session pub sess: &'sess KaniSession, /// The project under verification. - pub project: Project, + pub project: &'pr Project, } /// The result of checking a single harness. This both hangs on to the harness metadata /// (as a means to identify which harness), and provides that harness's verification result. -pub(crate) struct HarnessResult<'sess> { - pub harness: &'sess HarnessMetadata, +pub(crate) struct HarnessResult<'pr> { + pub harness: &'pr HarnessMetadata, pub result: VerificationResult, } -impl<'sess> HarnessRunner<'sess> { +impl<'sess, 'pr> HarnessRunner<'sess, 'pr> { /// Given a [`HarnessRunner`] (to abstract over how these harnesses were generated), this runs /// the proof-checking process for each harness in `harnesses`. - pub(crate) fn check_all_harnesses<'a>( + pub(crate) fn check_all_harnesses( &self, - harnesses: &'a [HarnessMetadata], - ) -> Result>> { + harnesses: &'pr [&HarnessMetadata], + ) -> Result>> { let sorted_harnesses = crate::metadata::sort_harnesses_by_loc(harnesses); let pool = { @@ -47,10 +48,10 @@ impl<'sess> HarnessRunner<'sess> { builder.build()? }; - let results = pool.install(|| -> Result>> { + let results = pool.install(|| -> Result>> { sorted_harnesses .par_iter() - .map(|harness| -> Result> { + .map(|harness| -> Result> { let harness_filename = harness.pretty_name.replace("::", "-"); let report_dir = self.project.outdir.join(format!("report-{harness_filename}")); let goto_file = @@ -64,6 +65,10 @@ impl<'sess> HarnessRunner<'sess> { &harness, )?; + if self.sess.args.synthesize_loop_contracts { + self.sess.synthesize_loop_contracts(&specialized_obj, &specialized_obj)?; + } + let result = self.sess.check_harness(&specialized_obj, &report_dir, harness)?; Ok(HarnessResult { harness, result }) }) @@ -91,7 +96,7 @@ impl KaniSession { // Strictly speaking, we're faking success here. This is more "no error" Ok(VerificationResult::mock_success()) } else { - let result = self.with_timer(|| self.run_cbmc(binary, harness), "run_cmbc")?; + let result = self.with_timer(|| self.run_cbmc(binary, harness), "run_cbmc")?; // When quiet, we don't want to print anything at all. // When output is old, we also don't have real results to print. @@ -103,6 +108,33 @@ impl KaniSession { } } + /// Prints a warning at the end of the verification if harness contained a stub but stubs were + /// not enabled. + fn stubbing_statuses(&self, results: &[HarnessResult]) { + if !self.args.enable_stubbing { + let ignored_stubs: Vec<_> = results + .iter() + .filter_map(|result| { + (!result.harness.attributes.stubs.is_empty()) + .then_some(result.harness.pretty_name.as_str()) + }) + .collect(); + match ignored_stubs.len().cmp(&1) { + Ordering::Equal => warning(&format!( + "harness `{}` contained stubs which were ignored.\n\ + To enable stubbing, pass options `--enable-unstable --enable-stubbing`", + ignored_stubs[0] + )), + Ordering::Greater => warning(&format!( + "harnesses `{}` contained stubs which were ignored.\n\ + To enable stubbing, pass options `--enable-unstable --enable-stubbing`", + ignored_stubs.join("`, `") + )), + Ordering::Less => {} + } + } + } + /// Concludes a session by printing a summary report and exiting the process with an /// error code (if applicable). /// @@ -139,14 +171,32 @@ impl KaniSession { "Complete - {succeeding} successfully verified harnesses, {failing} failures, {total} total." ); } else { - // TODO: This could use a better error message, possibly with links to Kani documentation. - // New users may encounter this and could use a pointer to how to write proof harnesses. - println!( - "No proof harnesses (functions with #[kani::proof]) were found to verify." - ); + match (self.args.harnesses.as_slice(), &self.args.function) { + ([], None) => + // TODO: This could use a better message, possibly with links to Kani documentation. + // New users may encounter this and could use a pointer to how to write proof harnesses. + { + println!( + "No proof harnesses (functions with #[kani::proof]) were found to verify." + ) + } + ([harness], None) => { + bail!("no harnesses matched the harness filter: `{harness}`") + } + (harnesses, None) => bail!( + "no harnesses matched the harness filters: `{}`", + harnesses.join("`, `") + ), + ([], Some(func)) => error(&format!("No function named {func} was found")), + _ => unreachable!( + "invalid configuration. Cannot specify harness and function at the same time" + ), + }; } } + self.stubbing_statuses(results); + #[cfg(feature = "unsound_experiments")] self.args.unsound_experiments.print_warnings(); diff --git a/kani-driver/src/main.rs b/kani-driver/src/main.rs index acd7c795196b..42f211e666a8 100644 --- a/kani-driver/src/main.rs +++ b/kani-driver/src/main.rs @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 OR MIT #![feature(let_chains)] #![feature(array_methods)] - use std::ffi::OsString; +use std::process::ExitCode; use anyhow::Result; @@ -23,6 +23,7 @@ mod call_cbmc; mod call_cbmc_viewer; mod call_goto_cc; mod call_goto_instrument; +mod call_goto_synthesizer; mod call_single_file; mod cbmc_output_parser; mod cbmc_property_renderer; @@ -39,10 +40,20 @@ mod unsound_experiments; /// The main function for the `kani-driver`. /// The driver can be invoked via `cargo kani` and `kani` commands, which determines what kind of /// project should be verified. -fn main() -> Result<()> { - match determine_invocation_type(Vec::from_iter(std::env::args_os())) { +fn main() -> ExitCode { + let result = match determine_invocation_type(Vec::from_iter(std::env::args_os())) { InvocationType::CargoKani(args) => cargokani_main(args), InvocationType::Standalone => standalone_main(), + }; + + if let Err(error) = result { + // We are using the debug format for now to print the all the context. + // We should consider creating a standard for error reporting. + debug!(?error, "main_failure"); + util::error(&format!("{error:#}")); + ExitCode::FAILURE + } else { + ExitCode::SUCCESS } } @@ -54,12 +65,12 @@ fn cargokani_main(input_args: Vec) -> Result<()> { let session = session::KaniSession::new(args.common_opts)?; if let Some(CargoKaniSubcommand::Assess(args)) = args.command { - return assess::cargokani_assess_main(session, args); + return assess::run_assess(session, args); } else if session.args.assess { - return assess::cargokani_assess_main(session, assess::AssessArgs::default()); + return assess::run_assess(session, assess::AssessArgs::default()); } - let project = project::cargo_project(&session)?; + let project = project::cargo_project(&session, false)?; if session.args.only_codegen { Ok(()) } else { verify_project(project, session) } } @@ -80,7 +91,7 @@ fn verify_project(project: Project, session: KaniSession) -> Result<()> { debug!(n = harnesses.len(), ?harnesses, "verify_project"); // Verification - let runner = harness_runner::HarnessRunner { sess: &session, project }; + let runner = harness_runner::HarnessRunner { sess: &session, project: &project }; let results = runner.check_all_harnesses(&harnesses)?; session.print_final_summary(&results) diff --git a/kani-driver/src/metadata.rs b/kani-driver/src/metadata.rs index b24827c8258b..0e10a3077880 100644 --- a/kani-driver/src/metadata.rs +++ b/kani-driver/src/metadata.rs @@ -1,13 +1,15 @@ // Copyright Kani Contributors // SPDX-License-Identifier: Apache-2.0 OR MIT -use anyhow::{bail, Result}; +use anyhow::Result; use std::path::Path; +use tracing::{debug, trace}; use kani_metadata::{ - HarnessMetadata, InternedString, KaniMetadata, TraitDefinedMethod, VtableCtxResults, + HarnessAttributes, HarnessMetadata, InternedString, KaniMetadata, TraitDefinedMethod, + VtableCtxResults, }; -use std::collections::HashMap; +use std::collections::{BTreeSet, HashMap}; use std::fs::File; use std::io::{BufReader, BufWriter}; @@ -109,16 +111,22 @@ pub fn merge_kani_metadata(files: Vec) -> KaniMetadata { impl KaniSession { /// Determine which function to use as entry point, based on command-line arguments and kani-metadata. - pub fn determine_targets( + pub fn determine_targets<'a>( &self, - all_harnesses: &[&HarnessMetadata], - ) -> Result> { - if let Some(name) = self.args.harness.clone().or(self.args.function.clone()) { - // Linear search, since this is only ever called once - let harness = find_proof_harness(&name, all_harnesses)?; - return Ok(vec![harness.clone()]); + all_harnesses: &[&'a HarnessMetadata], + ) -> Result> { + let harnesses = if self.args.harnesses.is_empty() { + BTreeSet::from_iter(self.args.function.iter()) + } else { + BTreeSet::from_iter(self.args.harnesses.iter()) + }; + + if harnesses.is_empty() { + Ok(Vec::from(all_harnesses)) + } else { + let harnesses = find_proof_harnesses(harnesses, all_harnesses); + Ok(harnesses) } - Ok(all_harnesses.iter().map(|md| (*md).clone()).collect()) } } @@ -126,8 +134,8 @@ impl KaniSession { /// appearing harnesses get processed earlier. /// This is necessary for the concrete playback feature (with in-place unit test modification) /// because it guarantees that injected unit tests will not change the location of to-be-processed harnesses. -pub fn sort_harnesses_by_loc(harnesses: &[HarnessMetadata]) -> Vec<&HarnessMetadata> { - let mut harnesses_clone: Vec<_> = harnesses.iter().by_ref().collect(); +pub fn sort_harnesses_by_loc<'a>(harnesses: &[&'a HarnessMetadata]) -> Vec<&'a HarnessMetadata> { + let mut harnesses_clone = harnesses.to_vec(); harnesses_clone.sort_unstable_by(|harness1, harness2| { harness1 .original_file @@ -149,7 +157,7 @@ pub fn mock_proof_harness( original_file: "".into(), original_start_line: 0, original_end_line: 0, - unwind_value, + attributes: HarnessAttributes { unwind_value, proof: true, ..Default::default() }, goto_file: None, } } @@ -157,40 +165,24 @@ pub fn mock_proof_harness( /// Search for a proof harness with a particular name. /// At the present time, we use `no_mangle` so collisions shouldn't happen, /// but this function is written to be robust against that changing in the future. -fn find_proof_harness<'a>( - name: &str, - harnesses: &'a [&HarnessMetadata], -) -> Result<&'a HarnessMetadata> { - let mut result: Option<&'a HarnessMetadata> = None; - for h in harnesses.iter() { - // Either an exact match, or... - let matches = h.pretty_name == *name || { - // pretty_name will be things like `module::submodule::name_of_function` - // and we want people to be able to specify `--harness name_of_function` - if let Some(prefix) = h.pretty_name.strip_suffix(name) { - prefix.ends_with("::") - } else { - false - } - }; - if matches { - if let Some(other) = result { - bail!( - "Conflicting proof harnesses named {}:\n {}\n {}", - name, - other.pretty_name, - h.pretty_name - ); - } else { - result = Some(h); - } +fn find_proof_harnesses<'a>( + targets: BTreeSet<&String>, + all_harnesses: &[&'a HarnessMetadata], +) -> Vec<&'a HarnessMetadata> { + debug!(?targets, "find_proof_harness"); + let mut result = vec![]; + for md in all_harnesses.iter() { + // Either an exact match, or a substring match. We check the exact first since it's cheaper. + if targets.contains(&md.pretty_name) + || targets.contains(&md.get_harness_name_unqualified().to_string()) + || targets.iter().any(|target| md.pretty_name.contains(*target)) + { + result.push(*md); + } else { + trace!(skip = md.pretty_name, "find_proof_harnesses"); } } - if let Some(x) = result { - Ok(x) - } else { - bail!("A proof harness named {} was not found", name); - } + result } #[cfg(test)] @@ -205,13 +197,24 @@ mod tests { mock_proof_harness("module::not_check_three", None, None), ]; let ref_harnesses = harnesses.iter().collect::>(); - assert!(find_proof_harness("check_three", &ref_harnesses).is_err()); + assert_eq!( + find_proof_harnesses(BTreeSet::from([&"check_three".to_string()]), &ref_harnesses) + .len(), + 1 + ); assert!( - find_proof_harness("check_two", &ref_harnesses).unwrap().mangled_name + find_proof_harnesses(BTreeSet::from([&"check_two".to_string()]), &ref_harnesses) + .first() + .unwrap() + .mangled_name == "module::check_two" ); assert!( - find_proof_harness("check_one", &ref_harnesses).unwrap().mangled_name == "check_one" + find_proof_harnesses(BTreeSet::from([&"check_one".to_string()]), &ref_harnesses) + .first() + .unwrap() + .mangled_name + == "check_one" ); } } diff --git a/kani-driver/src/project.rs b/kani-driver/src/project.rs index 25648ae4a460..4d5d353c24c7 100644 --- a/kani-driver/src/project.rs +++ b/kani-driver/src/project.rs @@ -59,6 +59,8 @@ pub struct Project { pub merged_artifacts: bool, /// Records the cargo metadata from the build, if there was any pub cargo_metadata: Option, + /// For build `keep_going` mode, we collect the targets that we failed to compile. + pub failed_targets: Option>, } impl Project { @@ -98,7 +100,7 @@ impl Project { } /// Information about a build artifact. -#[derive(Debug, Eq, PartialEq, Clone)] +#[derive(Debug, Eq, PartialEq, Clone, Hash)] pub struct Artifact { /// The path for this artifact in the canonical form. path: PathBuf, @@ -121,12 +123,12 @@ impl Deref for Artifact { impl Artifact { /// Create a new artifact if the given path exists. - fn try_new(path: &Path, typ: ArtifactType) -> Result { + pub fn try_new(path: &Path, typ: ArtifactType) -> Result { Ok(Artifact { path: path.canonicalize()?, typ }) } /// Check if this artifact has the given type. - fn has_type(&self, typ: ArtifactType) -> bool { + pub fn has_type(&self, typ: ArtifactType) -> bool { self.typ == typ } } @@ -145,8 +147,10 @@ fn dump_metadata(metadata: &KaniMetadata, path: &Path) { } /// Generate a project using `cargo`. -pub fn cargo_project(session: &KaniSession) -> Result { - let outputs = session.cargo_build()?; +/// Accept a boolean to build as many targets as possible. The number of failures in that case can +/// be collected from the project. +pub fn cargo_project(session: &KaniSession, keep_going: bool) -> Result { + let outputs = session.cargo_build(keep_going)?; let mut artifacts = vec![]; let outdir = outputs.outdir.canonicalize()?; if session.args.function.is_some() { @@ -155,10 +159,15 @@ pub fn cargo_project(session: &KaniSession) -> Result { // Merge goto files. let joined_name = "cbmc-linked"; let base_name = outdir.join(joined_name); - let symtab_gotos: Vec<_> = - outputs.symtabs.iter().map(|p| convert_type(p, SymTab, SymTabGoto)).collect(); let goto = base_name.with_extension(Goto); - session.link_goto_binary(&symtab_gotos, &goto)?; + let all_gotos = outputs + .metadata + .iter() + .map(|artifact| { + convert_type(&artifact, ArtifactType::Metadata, ArtifactType::SymTabGoto) + }) + .collect::>(); + session.link_goto_binary(&all_gotos, &goto)?; artifacts.push(Artifact::try_new(&goto, Goto)?); // Merge metadata files. @@ -176,6 +185,7 @@ pub fn cargo_project(session: &KaniSession) -> Result { metadata: vec![metadata], merged_artifacts: true, cargo_metadata: Some(outputs.cargo_metadata), + failed_targets: outputs.failed_targets, }) } else { // For the MIR Linker we know there is only one artifact per verification target. Use @@ -203,6 +213,7 @@ pub fn cargo_project(session: &KaniSession) -> Result { metadata, merged_artifacts: false, cargo_metadata: Some(outputs.cargo_metadata), + failed_targets: outputs.failed_targets, }) } } @@ -227,8 +238,8 @@ struct StandaloneProjectBuilder<'a> { } /// All the type of artifacts that may be generated as part of the build. -const BUILD_ARTIFACTS: [ArtifactType; 6] = - [Metadata, Goto, SymTab, SymTabGoto, TypeMap, VTableRestriction]; +const BUILD_ARTIFACTS: [ArtifactType; 7] = + [Metadata, Goto, SymTab, SymTabGoto, TypeMap, VTableRestriction, PrettyNameMap]; impl<'a> StandaloneProjectBuilder<'a> { /// Create a `StandaloneProjectBuilder` from the given input and session. @@ -295,6 +306,7 @@ impl<'a> StandaloneProjectBuilder<'a> { .collect(), merged_artifacts: false, cargo_metadata: None, + failed_targets: None, }) } diff --git a/kani-driver/src/session.rs b/kani-driver/src/session.rs index c833705c5d0e..a5ed9d4c36a7 100644 --- a/kani-driver/src/session.rs +++ b/kani-driver/src/session.rs @@ -9,6 +9,7 @@ use std::path::{Path, PathBuf}; use std::process::{Child, Command, ExitStatus, Stdio}; use std::sync::Mutex; use std::time::Instant; +use strum_macros::Display; use tracing::level_filters::LevelFilter; use tracing_subscriber::{layer::SubscriberExt, EnvFilter, Registry}; use tracing_tree::HierarchicalLayer; @@ -84,8 +85,12 @@ impl KaniSession { } } +#[derive(Debug, Copy, Clone, Display)] +#[strum(serialize_all = "snake_case")] pub enum ReachabilityMode { + #[strum(to_string = "harnesses")] ProofHarnesses, + #[strum(to_string = "pub_fns")] AllPubFns, Tests, } @@ -224,6 +229,19 @@ fn bin_folder() -> Result { Ok(dir) } +/// Return the path for the folder where the pre-compiled rust libraries are located. +pub fn lib_folder() -> Result { + Ok(base_folder()?.join("lib")) +} + +/// Return the base folder for the entire kani installation. +pub fn base_folder() -> Result { + Ok(bin_folder()? + .parent() + .context("Failed to find Kani's base installation folder.")? + .to_path_buf()) +} + impl InstallType { pub fn new() -> Result { // Case 1: We've checked out the development repo and we're built under `target/kani` diff --git a/kani-driver/src/unsound_experiments.rs b/kani-driver/src/unsound_experiments.rs index 6287d664ca90..bdd9f42d21ba 100644 --- a/kani-driver/src/unsound_experiments.rs +++ b/kani-driver/src/unsound_experiments.rs @@ -3,7 +3,6 @@ #![cfg(feature = "unsound_experiments")] use clap::Parser; -use std::ffi::OsString; #[derive(Debug, Parser)] pub struct UnsoundExperimentArgs { /// Zero initilize variables. @@ -16,7 +15,7 @@ pub struct UnsoundExperimentArgs { } impl UnsoundExperimentArgs { - pub fn process_args(&self) -> Vec { + pub fn process_args(&self) -> Vec { self.print_warnings(); let mut flags = vec![]; if self.unsound_experiment_zero_init_vars { diff --git a/kani-driver/src/util.rs b/kani-driver/src/util.rs index 33cb471048ef..2a4f7f1d926e 100644 --- a/kani-driver/src/util.rs +++ b/kani-driver/src/util.rs @@ -1,10 +1,97 @@ // Copyright Kani Contributors // SPDX-License-Identifier: Apache-2.0 OR MIT +//! Module that provides functions which are convenient for different purposes. +//! +//! In particular, the `warning` and `error` functions must be used for +//! diagnostic output across the `kani-driver` components. Please follow the +//! recommendations in +//! when reporting any kind of diagnostic for users. Note that it's recommended +//! to use the Rust compiler's error message utilities if you're working on the +//! `kani-compiler`. + use std::ffi::OsString; use std::path::{Path, PathBuf}; use std::process::Command; +pub mod tempfile { + use std::{ + env, + fs::{self, rename, File}, + io::{BufWriter, Error, Write}, + path::PathBuf, + }; + + use crate::util; + use ::rand; + use anyhow::Context; + use rand::Rng; + + /// Handle a writable temporary file which will be deleted when the object is dropped. + /// To save the contents of the file, users can invoke `rename` which will move the file to + /// its new location and no further deletion will be performed. + pub struct TempFile { + pub file: File, + pub temp_path: PathBuf, + pub writer: Option>, + renamed: bool, + } + + impl TempFile { + /// Create a temp file + pub fn try_new(suffix_name: &str) -> Result { + let mut temp_path = env::temp_dir(); + + // Generate a unique name for the temporary directory + let hash: u32 = rand::thread_rng().gen(); + let file_name: &str = &format!("kani_tmp_{hash}_{suffix_name}"); + + temp_path.push(file_name); + let temp_file = File::create(&temp_path)?; + let writer = BufWriter::new(temp_file.try_clone()?); + + Ok(Self { file: temp_file, temp_path, writer: Some(writer), renamed: false }) + } + + /// Rename the temporary file to the new path, replacing the original file if the path points to a file that already exists. + pub fn rename(mut self, source_path: &str) -> Result<(), String> { + // flush here + self.writer.as_mut().unwrap().flush().unwrap(); + self.writer = None; + // Renames are usually automic, so we won't corrupt the user's source file during a crash. + rename(&self.temp_path, source_path) + .with_context(|| format!("Error renaming file {}", self.temp_path.display())) + .unwrap(); + self.renamed = true; + Ok(()) + } + } + + /// Ensure that the bufwriter is flushed and temp variables are dropped + /// everytime the tempfile is out of scope + /// note: the fields for the struct are dropped automatically by destructor + impl Drop for TempFile { + fn drop(&mut self) { + // if writer is not flushed, flush it + if self.writer.as_ref().is_some() { + // couldn't use ? as drop does not handle returns + if let Err(e) = self.writer.as_mut().unwrap().flush() { + util::warning( + format!("failed to flush {}: {e}", self.temp_path.display()).as_str(), + ); + } + self.writer = None; + } + + if !self.renamed { + if let Err(_e) = fs::remove_file(&self.temp_path.clone()) { + util::warning(&format!("Error removing file {}", self.temp_path.display())); + } + } + } + } +} + /// Replace an extension with another one, in a new PathBuf. (See tests for examples) pub fn alter_extension(path: &Path, ext: &str) -> PathBuf { path.with_extension(ext) @@ -41,19 +128,6 @@ pub fn executable_basename(argv0: &Option<&OsString>) -> Option { None } -/// Joining an OsString with a delimeter is missing from Rust libraries, so -/// let's write out own, and with convenient types... -pub fn join_osstring(elems: &[OsString], joiner: &str) -> OsString { - let mut str = OsString::new(); - for (i, arg) in elems.iter().enumerate() { - if i != 0 { - str.push(OsString::from(joiner)); - } - str.push(arg); - } - str -} - /// Render a Command as a string, to log it (e.g. in dry runs) pub fn render_command(cmd: &Command) -> OsString { let mut str = OsString::new(); @@ -88,13 +162,20 @@ pub fn specialized_harness_name(linked_obj: &Path, harness_filename: &str) -> Pa alter_extension(linked_obj, &format!("for-{harness_filename}.out")) } -/// Print a warning message. This will add a "warning:" tag before the message and style accordinly. +/// Print a warning message. This will add a "warning:" tag before the message and style accordingly. pub fn warning(msg: &str) { let warning = console::style("warning:").bold().yellow(); let msg_fmt = console::style(msg).bold(); println!("{warning} {msg_fmt}") } +/// Print an error message. This will add an "error:" tag before the message and style accordingly. +pub fn error(msg: &str) { + let error = console::style("error:").bold().red(); + let msg_fmt = console::style(msg).bold(); + println!("{error} {msg_fmt}") +} + #[cfg(test)] mod tests { use super::*; @@ -131,20 +212,6 @@ mod tests { assert_eq!(executable_basename(&Some(&OsString::from("foo"))), Some("foo".into())); } - #[test] - fn check_join_osstring() { - assert_eq!( - join_osstring(&["a".into(), "b".into(), "cd".into()], " "), - OsString::from("a b cd") - ); - assert_eq!(join_osstring(&[], " "), OsString::from("")); - assert_eq!(join_osstring(&["a".into()], " "), OsString::from("a")); - assert_eq!( - join_osstring(&["a".into(), "b".into(), "cd".into()], ", "), - OsString::from("a, b, cd") - ); - } - #[test] fn check_render_command() { let mut c1 = Command::new("a"); diff --git a/kani_metadata/Cargo.toml b/kani_metadata/Cargo.toml index 1f0db1e95be1..128dc69896ac 100644 --- a/kani_metadata/Cargo.toml +++ b/kani_metadata/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "kani_metadata" -version = "0.19.0" +version = "0.23.0" edition = "2021" license = "MIT OR Apache-2.0" publish = false @@ -13,3 +13,5 @@ publish = false [dependencies] serde = {version = "1", features = ["derive"]} cbmc = { path = "../cprover_bindings", package = "cprover_bindings" } +strum = "0.24.1" +strum_macros = "0.24.3" diff --git a/kani_metadata/src/artifact.rs b/kani_metadata/src/artifact.rs index 64e99c09e5b8..54ff7a025e19 100644 --- a/kani_metadata/src/artifact.rs +++ b/kani_metadata/src/artifact.rs @@ -11,7 +11,7 @@ use std::path::{Path, PathBuf}; pub enum ArtifactType { /// A complete goto model generated after linking. Goto, - /// The metadata generated by the compiler. + /// The metadata generated by the kani compiler. Metadata, /// The `json` file that represents the symbol table generated by the compiler. SymTab, @@ -22,6 +22,9 @@ pub enum ArtifactType { /// A `json` file that has information about the function pointer restrictions derived from /// vtable generation. VTableRestriction, + /// A `json` file that stores the name to prettyName mapping for symbols + /// (used to demangle names from the C dump). + PrettyNameMap, } impl ArtifactType { @@ -33,6 +36,7 @@ impl ArtifactType { ArtifactType::SymTabGoto => "symtab.out", ArtifactType::TypeMap => "type_map.json", ArtifactType::VTableRestriction => "restrictions.json", + ArtifactType::PrettyNameMap => "pretty_name_map.json", } } } @@ -59,7 +63,8 @@ pub fn convert_type(path: &Path, from: ArtifactType, to: ArtifactType) -> PathBu | ArtifactType::SymTab | ArtifactType::SymTabGoto | ArtifactType::TypeMap - | ArtifactType::VTableRestriction => { + | ArtifactType::VTableRestriction + | ArtifactType::PrettyNameMap => { result.set_extension(""); result.set_extension(&to); } diff --git a/kani_metadata/src/cbmc_solver.rs b/kani_metadata/src/cbmc_solver.rs new file mode 100644 index 000000000000..f6c5c0a54dc4 --- /dev/null +++ b/kani_metadata/src/cbmc_solver.rs @@ -0,0 +1,36 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +use serde::{Deserialize, Serialize}; +use strum_macros::{AsRefStr, EnumString, EnumVariantNames}; + +/// An enum for CBMC solver options. All variants are handled by Kani, except for +/// the `Binary` one, which it passes as is to CBMC's `--external-sat-solver` +/// option. +#[derive( + Debug, + Clone, + AsRefStr, + EnumString, + EnumVariantNames, + PartialEq, + Eq, + Serialize, + Deserialize +)] +#[strum(serialize_all = "snake_case")] +pub enum CbmcSolver { + /// CaDiCaL which is available in CBMC as of version 5.77.0 + Cadical, + + /// The kissat solver that is included in the Kani bundle + Kissat, + + /// MiniSAT (CBMC's default solver) + Minisat, + + /// A solver binary variant whose argument gets passed to + /// `--external-sat-solver`. The specified binary must exist in path. + #[strum(disabled, serialize = "bin=")] + Binary(String), +} diff --git a/kani_metadata/src/harness.rs b/kani_metadata/src/harness.rs index 547fca9459dc..0a7e66185c35 100644 --- a/kani_metadata/src/harness.rs +++ b/kani_metadata/src/harness.rs @@ -1,13 +1,14 @@ // Copyright Kani Contributors // SPDX-License-Identifier: Apache-2.0 OR MIT +use crate::CbmcSolver; use serde::{Deserialize, Serialize}; use std::path::PathBuf; /// We emit this structure for each annotated proof harness (`#[kani::proof]`) we find. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct HarnessMetadata { - /// The name the user gave to the function. + /// The fully qualified name the user gave to the function (i.e. includes the module path). pub pretty_name: String, /// The name of the function in the CBMC symbol table. pub mangled_name: String, @@ -19,10 +20,30 @@ pub struct HarnessMetadata { pub original_start_line: usize, /// The line in that file where the proof harness ends. pub original_end_line: usize, - /// Optional data to store unwind value. - pub unwind_value: Option, /// Optional modeling file that was generated by the compiler that includes this harness. pub goto_file: Option, + /// The `#[kani::<>]` attributes added to a harness. + pub attributes: HarnessAttributes, +} + +/// The attributes added by the user to control how a harness is executed. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct HarnessAttributes { + /// Whether the harness has been annotated with proof. + pub proof: bool, + /// Optional data to store solver. + pub solver: Option, + /// Optional data to store unwind value. + pub unwind_value: Option, + /// The stubs used in this harness. + pub stubs: Vec, +} + +/// The stubbing type. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Stub { + pub original: String, + pub replacement: String, } impl HarnessMetadata { diff --git a/kani_metadata/src/lib.rs b/kani_metadata/src/lib.rs index 8a8594807552..7da3cbb94121 100644 --- a/kani_metadata/src/lib.rs +++ b/kani_metadata/src/lib.rs @@ -1,13 +1,17 @@ // Copyright Kani Contributors // SPDX-License-Identifier: Apache-2.0 OR MIT +use std::{collections::HashSet, path::PathBuf}; + use serde::{Deserialize, Serialize}; pub use artifact::ArtifactType; +pub use cbmc_solver::CbmcSolver; pub use harness::*; pub use vtable::*; pub mod artifact; +mod cbmc_solver; mod harness; mod vtable; @@ -31,5 +35,18 @@ pub struct UnsupportedFeature { /// A string identifying the feature. pub feature: String, /// A list of locations (file, line) where this unsupported feature can be found. - pub locations: Vec<(String, String)>, + pub locations: HashSet, +} + +/// The location in a file +#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] +pub struct Location { + pub filename: String, + pub start_line: u64, +} + +/// We stub artifacts with the path to a KaniMetadata file. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CompilerArtifactStub { + pub metadata_path: PathBuf, } diff --git a/library/kani/Cargo.toml b/library/kani/Cargo.toml index 444df9d1ea40..4585b2d45232 100644 --- a/library/kani/Cargo.toml +++ b/library/kani/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "kani" -version = "0.19.0" +version = "0.23.0" edition = "2021" license = "MIT OR Apache-2.0" publish = false diff --git a/library/kani/src/arbitrary.rs b/library/kani/src/arbitrary.rs index 02044e3d7ac5..e5c6b2f66e91 100644 --- a/library/kani/src/arbitrary.rs +++ b/library/kani/src/arbitrary.rs @@ -3,7 +3,10 @@ //! This module introduces the Arbitrary trait as well as implementation for primitive types and //! other std containers. -use std::num::*; +use std::{ + marker::{PhantomData, PhantomPinned}, + num::*, +}; /// This trait should be used to generate symbolic variables that represent any valid value of /// its type. @@ -120,3 +123,15 @@ where if bool::any() { Ok(T::any()) } else { Err(E::any()) } } } + +impl Arbitrary for std::marker::PhantomData { + fn any() -> Self { + PhantomData + } +} + +impl Arbitrary for std::marker::PhantomPinned { + fn any() -> Self { + PhantomPinned + } +} diff --git a/library/kani/src/lib.rs b/library/kani/src/lib.rs index 9c4bd6ca30c9..10fa12034b3f 100644 --- a/library/kani/src/lib.rs +++ b/library/kani/src/lib.rs @@ -112,7 +112,6 @@ pub fn any() -> T { /// This creates a symbolic *valid* value of type `T`. /// The value is constrained to be a value accepted by the predicate passed to the filter. /// You can assign the return value of this function to a variable that you want to make symbolic. -/// The explanation field gives a mechanism to explain why the assumption is required for the proof. /// /// # Example: /// @@ -120,7 +119,7 @@ pub fn any() -> T { /// under all possible `NonZeroU8` input values between 0 and 12. /// /// ```rust -/// let inputA = kani::any_where::(|x| *x < 12, "explanation"); +/// let inputA = kani::any_where::(|x| *x < 12); /// fn_under_verification(inputA); /// ``` /// @@ -128,7 +127,7 @@ pub fn any() -> T { /// trait. The Arbitrary trait is used to build a symbolic value that represents all possible /// valid values for type `T`. #[inline(always)] -pub fn any_where bool>(f: F, _msg: &'static str) -> T { +pub fn any_where bool>(f: F) -> T { let result = T::any(); assume(f(&result)); result diff --git a/library/kani_macros/Cargo.toml b/library/kani_macros/Cargo.toml index 2ff7b4eb9421..6d000aa6610b 100644 --- a/library/kani_macros/Cargo.toml +++ b/library/kani_macros/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "kani_macros" -version = "0.19.0" +version = "0.23.0" edition = "2021" license = "MIT OR Apache-2.0" publish = false diff --git a/library/kani_macros/src/lib.rs b/library/kani_macros/src/lib.rs index dcd043d7f6a9..8166f20ef493 100644 --- a/library/kani_macros/src/lib.rs +++ b/library/kani_macros/src/lib.rs @@ -148,6 +148,29 @@ pub fn stub(attr: TokenStream, item: TokenStream) -> TokenStream { result } +#[cfg(not(kani))] +#[proc_macro_attribute] +pub fn solver(_attr: TokenStream, item: TokenStream) -> TokenStream { + // No-op in non-kani mode + item +} + +/// Select the SAT solver to use with CBMC for this harness +/// The attribute `#[kani::solver(arg)]` can only be used alongside `#[kani::proof]`` +/// +/// arg - name of solver, e.g. kissat +#[cfg(kani)] +#[proc_macro_attribute] +pub fn solver(attr: TokenStream, item: TokenStream) -> TokenStream { + let mut result = TokenStream::new(); + // Translate `#[kani::solver(arg)]` to `#[kanitool::solver(arg)]` + let insert_string = "#[kanitool::solver(".to_owned() + &attr.to_string() + ")]"; + result.extend(insert_string.parse::().unwrap()); + + result.extend(item); + result +} + /// Allow users to auto generate Arbitrary implementations by using `#[derive(Arbitrary)]` macro. #[proc_macro_error] #[proc_macro_derive(Arbitrary)] diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml index 9b16c80b3013..d97891f2e15b 100644 --- a/library/std/Cargo.toml +++ b/library/std/Cargo.toml @@ -5,7 +5,7 @@ # Note: this package is intentionally named std to make sure the names of # standard library symbols are preserved name = "std" -version = "0.19.0" +version = "0.23.0" edition = "2021" license = "MIT OR Apache-2.0" publish = false diff --git a/scripts/assess-scan-regression.sh b/scripts/assess-scan-regression.sh index 14bf57d34c56..f349219d5389 100755 --- a/scripts/assess-scan-regression.sh +++ b/scripts/assess-scan-regression.sh @@ -15,20 +15,30 @@ cargo kani --enable-unstable assess scan # Clean up (cd foo && cargo clean) (cd bar && cargo clean) +(cd compile_error && cargo clean) +(cd manifest_error && cargo clean) # Check for expected files (and clean up) EXPECTED_FILES=( - bar/bar.kani-assess-metadata.json - foo/foo.kani-assess-metadata.json bar/bar.kani-assess.log + bar/bar.kani-assess-metadata.json + compile_error/compile_error.kani-assess.log + compile_error/compile_error.kani-assess-metadata.json + manifest_error/manifest_error.kani-assess.log + manifest_error/manifest_error.kani-assess-metadata.json foo/foo.kani-assess.log + foo/foo.kani-assess-metadata.json ) + +errors=0 for file in ${EXPECTED_FILES[@]}; do if [ -f $KANI_DIR/tests/assess-scan-test-scaffold/$file ]; then rm $KANI_DIR/tests/assess-scan-test-scaffold/$file else - echo "Failed to find $file" && exit 1 + errors=1 + echo "Failed to find $file" fi done echo "Done with assess scan test" +exit $errors diff --git a/scripts/check-cbmc-version.py b/scripts/check-cbmc-version.py index fb831a75a16c..a23438f1ab4d 100755 --- a/scripts/check-cbmc-version.py +++ b/scripts/check-cbmc-version.py @@ -49,7 +49,7 @@ def main(): if desired_version > current_version: version_string = '.'.join([str(num) for num in current_version]) desired_version_string = '.'.join([str(num) for num in desired_version]) - print(f'WARNING: CBMC version is {version_string}, expected at least {desired_version_string}') + print(f'ERROR: CBMC version is {version_string}, expected at least {desired_version_string}') sys.exit(EXIT_CODE_MISMATCH) diff --git a/scripts/ci/copyright-exclude b/scripts/ci/copyright-exclude index 5c823691b4bc..5cc538031006 100644 --- a/scripts/ci/copyright-exclude +++ b/scripts/ci/copyright-exclude @@ -6,6 +6,7 @@ .props .public.key Cargo.lock +CHANGELOG LICENSE-APACHE LICENSE-MIT editorconfig diff --git a/scripts/kani-regression.sh b/scripts/kani-regression.sh index a57d651ff3be..fa09f0628aae 100755 --- a/scripts/kani-regression.sh +++ b/scripts/kani-regression.sh @@ -22,8 +22,8 @@ KANI_DIR=$SCRIPT_DIR/.. export KANI_FAIL_ON_UNEXPECTED_DESCRIPTION="true" # Required dependencies -check-cbmc-version.py --major 5 --minor 75 -check-cbmc-viewer-version.py --major 3 --minor 5 +check-cbmc-version.py --major 5 --minor 78 +check-cbmc-viewer-version.py --major 3 --minor 8 check_kissat_version.sh # Formatting check @@ -32,6 +32,8 @@ ${SCRIPT_DIR}/kani-fmt.sh --check # Build all packages in the workspace if [[ "" != "${KANI_ENABLE_UNSOUND_EXPERIMENTS-}" ]]; then cargo build-dev -- --features unsound_experiments +elif [[ "" != "${KANI_ENABLE_WRITE_JSON_SYMTAB-}" ]]; then + cargo build-dev -- --features write_json_symtab else cargo build-dev fi @@ -74,7 +76,8 @@ for testp in "${TESTS[@]}"; do suite=${testl[0]} mode=${testl[1]} echo "Check compiletest suite=$suite mode=$mode" - cargo run -p compiletest --quiet -- --suite $suite --mode $mode --quiet + cargo run -p compiletest --quiet -- --suite $suite --mode $mode \ + --quiet --no-fail-fast done # Check codegen for the standard library diff --git a/scripts/std-lib-regression.sh b/scripts/std-lib-regression.sh index f16cf0753912..c91fcad2b866 100755 --- a/scripts/std-lib-regression.sh +++ b/scripts/std-lib-regression.sh @@ -50,8 +50,7 @@ cd std_lib_test # Add some content to the rust file including an std function that is non-generic. echo ' -#[kani::proof] -fn check_format() { +pub fn main() { assert!("2021".parse::().unwrap() == 2021); } ' > src/lib.rs @@ -67,8 +66,15 @@ cp ${KANI_DIR}/rust-toolchain.toml . echo "Starting cargo build with Kani" export RUST_BACKTRACE=1 export RUSTC_LOG=error -export KANIFLAGS="--goto-c --ignore-global-asm --reachability=legacy" -export RUSTFLAGS="--kani-flags" + +RUST_FLAGS=( + "--kani-compiler" + "-Cpanic=abort" + "-Cllvm-args=--goto-c" + "-Cllvm-args=--ignore-global-asm" + "-Cllvm-args=--reachability=legacy" +) +export RUSTFLAGS="${RUST_FLAGS[@]}" export RUSTC="$KANI_DIR/target/kani/bin/kani-compiler" # Compile rust to iRep $WRAPPER cargo build --verbose -Z build-std --lib --target $TARGET diff --git a/src/os_hacks.rs b/src/os_hacks.rs index 7662645ffc33..c8102e272595 100644 --- a/src/os_hacks.rs +++ b/src/os_hacks.rs @@ -148,7 +148,8 @@ fn setup_nixos_patchelf(kani_dir: &Path) -> Result<()> { for filename in &["kani-compiler", "kani-driver"] { patch_interp(&bin.join(filename))?; } - for filename in &["cbmc", "goto-analyzer", "goto-cc", "goto-instrument", "symtab2gb"] { + for filename in &["cbmc", "goto-analyzer", "goto-cc", "goto-instrument", "kissat", "symtab2gb"] + { let file = bin.join(filename); patch_interp(&file)?; patch_rpath(&file)?; diff --git a/tests/assess-scan-test-scaffold/compile_error/Cargo.toml b/tests/assess-scan-test-scaffold/compile_error/Cargo.toml new file mode 100644 index 000000000000..1189e08d0956 --- /dev/null +++ b/tests/assess-scan-test-scaffold/compile_error/Cargo.toml @@ -0,0 +1,11 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT + +[package] +name = "compile_error" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/tests/assess-scan-test-scaffold/compile_error/src/lib.rs b/tests/assess-scan-test-scaffold/compile_error/src/lib.rs new file mode 100644 index 000000000000..2bad1be422f9 --- /dev/null +++ b/tests/assess-scan-test-scaffold/compile_error/src/lib.rs @@ -0,0 +1,7 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +/// Function with a compilation error +pub fn with_error(left: usize, right: u32) -> usize { + left + right +} diff --git a/tests/assess-scan-test-scaffold/manifest_error/Cargo.toml b/tests/assess-scan-test-scaffold/manifest_error/Cargo.toml new file mode 100644 index 000000000000..09b611dc645c --- /dev/null +++ b/tests/assess-scan-test-scaffold/manifest_error/Cargo.toml @@ -0,0 +1,12 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT + +[package] +name = "manifest_error" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +unknown = { path="./does/not/exist" } diff --git a/tests/assess-scan-test-scaffold/manifest_error/src/lib.rs b/tests/assess-scan-test-scaffold/manifest_error/src/lib.rs new file mode 100644 index 000000000000..0058f8ca1b58 --- /dev/null +++ b/tests/assess-scan-test-scaffold/manifest_error/src/lib.rs @@ -0,0 +1,5 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +/// Nothing here +pub fn void() {} diff --git a/tests/cargo-kani/asm/global_error/doesnt_call_crate_with_global_asm.expected b/tests/cargo-kani/asm/global_error/doesnt_call_crate_with_global_asm.expected index 89d23a65d258..59aa1503d9a4 100644 --- a/tests/cargo-kani/asm/global_error/doesnt_call_crate_with_global_asm.expected +++ b/tests/cargo-kani/asm/global_error/doesnt_call_crate_with_global_asm.expected @@ -1,3 +1,2 @@ error: Crate crate_with_global_asm contains global ASM, which is not supported by Kani. Rerun with `--enable-unstable --ignore-global-asm` to suppress this error (**Verification results may be impacted**). -error: could not compile `crate_with_global_asm` due to previous error -Error: cargo exited with status exit status: 101 +error: could not compile `crate_with_global_asm` due to 2 previous errors diff --git a/tests/cargo-kani/assess-artifacts/expected b/tests/cargo-kani/assess-artifacts/expected index 54e0b3d4cbdc..6b61a4f56822 100644 --- a/tests/cargo-kani/assess-artifacts/expected +++ b/tests/cargo-kani/assess-artifacts/expected @@ -1,4 +1,4 @@ -Found 1 packages +Analyzed 1 packages ============================================ Unsupported feature | Crates | Instances | impacted | of use diff --git a/tests/cargo-kani/assess-workspace-artifacts/expected b/tests/cargo-kani/assess-workspace-artifacts/expected index 4b46613a5023..21d964340630 100644 --- a/tests/cargo-kani/assess-workspace-artifacts/expected +++ b/tests/cargo-kani/assess-workspace-artifacts/expected @@ -1,4 +1,4 @@ -Found 2 packages +Analyzed 2 packages ============================================ Unsupported feature | Crates | Instances | impacted | of use diff --git a/tests/cargo-kani/chrono_dep/Cargo.toml b/tests/cargo-kani/chrono_dep/Cargo.toml new file mode 100644 index 000000000000..3ca46a688159 --- /dev/null +++ b/tests/cargo-kani/chrono_dep/Cargo.toml @@ -0,0 +1,11 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +[package] +name = "chrono_dep" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +chrono = "=0.4.19" diff --git a/tests/ui/stubbing-flag/expected b/tests/cargo-kani/chrono_dep/main.expected similarity index 100% rename from tests/ui/stubbing-flag/expected rename to tests/cargo-kani/chrono_dep/main.expected diff --git a/tests/cargo-kani/chrono_dep/src/main.rs b/tests/cargo-kani/chrono_dep/src/main.rs new file mode 100644 index 000000000000..6b40e78850a2 --- /dev/null +++ b/tests/cargo-kani/chrono_dep/src/main.rs @@ -0,0 +1,10 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +//! This test checks that the Kani compiler handles chrono crate which was +//! previously failing due to https://github.com/model-checking/kani/issues/1949 + +#[kani::proof] +fn main() { + assert!(1 + 1 == 2); +} diff --git a/tests/cargo-kani/no_std/Cargo.toml b/tests/cargo-kani/no_std/Cargo.toml new file mode 100644 index 000000000000..10c0b0b681b7 --- /dev/null +++ b/tests/cargo-kani/no_std/Cargo.toml @@ -0,0 +1,16 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +[package] +name = "no_std" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] + +[features] +std = [] + +[package.metadata.kani.flags] +features = ["std"] diff --git a/tests/cargo-kani/no_std/foo.expected b/tests/cargo-kani/no_std/foo.expected new file mode 100644 index 000000000000..7f5175082667 --- /dev/null +++ b/tests/cargo-kani/no_std/foo.expected @@ -0,0 +1 @@ +error: cannot find macro `__kani__workaround_core_assert` in this scope diff --git a/tests/cargo-kani/no_std/src/main.rs b/tests/cargo-kani/no_std/src/main.rs new file mode 100644 index 000000000000..0a2bff90fbd8 --- /dev/null +++ b/tests/cargo-kani/no_std/src/main.rs @@ -0,0 +1,20 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +//! This test checks that Kani handles assert in no_std environment which +//! currently doesn't work: +//! https://github.com/model-checking/kani/issues/2187) + +#![no_std] + +#[cfg(feature = "std")] +extern crate std; + +#[kani::proof] +fn foo() { + let x: i32 = kani::any(); + let y = 0; + std::debug_assert!(x + y == x, "Message"); +} + +fn main() {} diff --git a/tests/cargo-kani/simple-proof-annotation/main.expected b/tests/cargo-kani/simple-proof-annotation/main.expected index 6cf62a785b3e..1a0db0160d5c 100644 --- a/tests/cargo-kani/simple-proof-annotation/main.expected +++ b/tests/cargo-kani/simple-proof-annotation/main.expected @@ -1 +1 @@ -Error: A proof harness named main was not found +error: no harnesses matched the harness filter: `main` diff --git a/tests/cargo-kani/simple-visualize/Cargo.toml b/tests/cargo-kani/simple-visualize/Cargo.toml index 826eb3afc2d6..24f2576ca69f 100644 --- a/tests/cargo-kani/simple-visualize/Cargo.toml +++ b/tests/cargo-kani/simple-visualize/Cargo.toml @@ -10,4 +10,4 @@ edition = "2018" [workspace] [package.metadata.kani] -flags = {visualize = true} +flags = {enable-unstable = true, visualize = true} diff --git a/tests/cargo-kani/small-vec/src/lib.rs b/tests/cargo-kani/small-vec/src/lib.rs index 90f7f41023d2..45b07a345285 100644 --- a/tests/cargo-kani/small-vec/src/lib.rs +++ b/tests/cargo-kani/small-vec/src/lib.rs @@ -4,6 +4,7 @@ use smallvec::{smallvec, SmallVec}; #[kani::proof] +#[kani::unwind(4)] pub fn check_vec() { // Create small vec with three elements. let chars: SmallVec<[char; 3]> = smallvec![kani::any(), kani::any(), kani::any()]; diff --git a/tests/cargo-kani/stubbing-do-not-resolve/harness.expected b/tests/cargo-kani/stubbing-do-not-resolve/harness.expected index edb36ddee86a..e45663fe2800 100644 --- a/tests/cargo-kani/stubbing-do-not-resolve/harness.expected +++ b/tests/cargo-kani/stubbing-do-not-resolve/harness.expected @@ -1,5 +1,4 @@ -error: unable to resolve function/method: crate::other_crate2::mock -error: unable to resolve function/method: super::other_crate2::mock -error: unable to resolve function/method: self::other_crate2::mock -error: unable to resolve function/method: other_crate1::mock -error: could not compile `stubbing-do-not-resolve` due to 4 previous errors \ No newline at end of file +error: failed to resolve `crate::other_crate2::mock`: unable to find `other_crate2` inside module `stubbing_do_not_resolve` +error: failed to resolve `super::other_crate2::mock`: unable to find `other_crate2` inside module `stubbing_do_not_resolve` +error: failed to resolve `self::other_crate2::mock`: unable to find `other_crate2` inside module `my_mod` +error: failed to resolve `other_crate1::mock`: unable to find `mock` inside module `my_mod::other_crate1` diff --git a/tests/cargo-kani/stubbing-double-extern-path/crate_a/Cargo.toml b/tests/cargo-kani/stubbing-double-extern-path/crate_a/Cargo.toml new file mode 100644 index 000000000000..f040aa2dd400 --- /dev/null +++ b/tests/cargo-kani/stubbing-double-extern-path/crate_a/Cargo.toml @@ -0,0 +1,10 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +[package] +name = "crate_a" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/tests/cargo-kani/stubbing-double-extern-path/crate_a/src/lib.rs b/tests/cargo-kani/stubbing-double-extern-path/crate_a/src/lib.rs new file mode 100644 index 000000000000..29506767e847 --- /dev/null +++ b/tests/cargo-kani/stubbing-double-extern-path/crate_a/src/lib.rs @@ -0,0 +1,7 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +//! Define `assert_true` function. + +pub fn assert_true(b: bool) { + assert!(b); +} diff --git a/tests/cargo-kani/stubbing-double-extern-path/crate_b/Cargo.toml b/tests/cargo-kani/stubbing-double-extern-path/crate_b/Cargo.toml new file mode 100644 index 000000000000..7298c3a4ca98 --- /dev/null +++ b/tests/cargo-kani/stubbing-double-extern-path/crate_b/Cargo.toml @@ -0,0 +1,11 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +[package] +name = "crate_b" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +crate_a = { path = "../crate_a" } diff --git a/tests/cargo-kani/stubbing-double-extern-path/crate_b/src/lib.rs b/tests/cargo-kani/stubbing-double-extern-path/crate_b/src/lib.rs new file mode 100644 index 000000000000..918b96f8df31 --- /dev/null +++ b/tests/cargo-kani/stubbing-double-extern-path/crate_b/src/lib.rs @@ -0,0 +1,8 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +//! Define `assert_false` function and export `assert_true` as well. +pub use crate_a::*; + +pub fn assert_false(b: bool) { + assert!(!b); +} diff --git a/tests/cargo-kani/stubbing-double-extern-path/harness/Cargo.toml b/tests/cargo-kani/stubbing-double-extern-path/harness/Cargo.toml new file mode 100644 index 000000000000..4d26da916be8 --- /dev/null +++ b/tests/cargo-kani/stubbing-double-extern-path/harness/Cargo.toml @@ -0,0 +1,16 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +[package] +name = "harness" +version = "0.1.0" +edition = "2021" +description = "Should test invoking double extern but found cycle issue" +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +crate_b = { path = "../crate_b" } + +[package.metadata.kani.flags] +enable-unstable = true +enable-stubbing = true +harness = ["check_inverted"] diff --git a/tests/cargo-kani/stubbing-double-extern-path/harness/expected b/tests/cargo-kani/stubbing-double-extern-path/harness/expected new file mode 100644 index 000000000000..178d9ab00302 --- /dev/null +++ b/tests/cargo-kani/stubbing-double-extern-path/harness/expected @@ -0,0 +1 @@ +error[E0391]: cycle detected when optimizing MIR for `crate_a::assert_true` diff --git a/tests/cargo-kani/stubbing-double-extern-path/harness/src/lib.rs b/tests/cargo-kani/stubbing-double-extern-path/harness/src/lib.rs new file mode 100644 index 000000000000..ff33cd77fede --- /dev/null +++ b/tests/cargo-kani/stubbing-double-extern-path/harness/src/lib.rs @@ -0,0 +1,17 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +use crate_b::*; + +#[kani::proof] +fn check() { + assert_true(true); + assert_false(false); +} + +#[kani::proof] +#[kani::stub(::crate_b::assert_true, ::crate_b::assert_false)] +#[kani::stub(assert_false, assert_true)] +fn check_inverted() { + assert_true(false); + assert_false(true); +} diff --git a/tests/cargo-ui/assess-error/Cargo.toml b/tests/cargo-ui/assess-error/Cargo.toml new file mode 100644 index 000000000000..e82e43e9ccca --- /dev/null +++ b/tests/cargo-ui/assess-error/Cargo.toml @@ -0,0 +1,12 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT + +[package] +name = "compilation-error" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[package.metadata.kani] +flags = { assess=true, enable-unstable=true } diff --git a/tests/cargo-ui/assess-error/expected b/tests/cargo-ui/assess-error/expected new file mode 100644 index 000000000000..70754ddea192 --- /dev/null +++ b/tests/cargo-ui/assess-error/expected @@ -0,0 +1,2 @@ +error: Failed to compile lib `compilation-error` +error: Failed to assess project: Failed to build all targets diff --git a/tests/cargo-ui/assess-error/src/lib.rs b/tests/cargo-ui/assess-error/src/lib.rs new file mode 100644 index 000000000000..e37051854821 --- /dev/null +++ b/tests/cargo-ui/assess-error/src/lib.rs @@ -0,0 +1,19 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +//! Check that the compilation error detection works as expected +use std::option; + +pub fn add(left: usize, right: u32) -> usize { + left + right +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_works() { + let result = add(2, 2); + assert_eq!(result, 4); + } +} diff --git a/tests/cargo-ui/function-stubbing-trait-mismatch/main.expected b/tests/cargo-ui/function-stubbing-trait-mismatch/main.expected index c0470a56de35..8093e4a76a10 100644 --- a/tests/cargo-ui/function-stubbing-trait-mismatch/main.expected +++ b/tests/cargo-ui/function-stubbing-trait-mismatch/main.expected @@ -1,3 +1,2 @@ error: `&str` doesn't implement `DoIt`. The function `foo` cannot be stubbed by `bar` due to generic bounds not being met. error: `&str` doesn't implement `std::cmp::PartialEq`. The function `foo` cannot be stubbed by `bar` due to generic bounds not being met. -error: could not compile `function-stubbing-trait-mismatch` due to 2 previous errors \ No newline at end of file diff --git a/tests/cargo-ui/multiple-harnesses/Cargo.toml b/tests/cargo-ui/multiple-harnesses/Cargo.toml new file mode 100644 index 000000000000..1e3c9d87f8dc --- /dev/null +++ b/tests/cargo-ui/multiple-harnesses/Cargo.toml @@ -0,0 +1,11 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +[package] +name = "harnesses" +version = "0.1.0" +edition = "2021" + +[dependencies] + +[package.metadata.kani.flags] +harness = ["foo", "bar"] diff --git a/tests/cargo-ui/multiple-harnesses/expected b/tests/cargo-ui/multiple-harnesses/expected new file mode 100644 index 000000000000..d31fc64ada83 --- /dev/null +++ b/tests/cargo-ui/multiple-harnesses/expected @@ -0,0 +1,3 @@ +Checking harness bar... +Checking harness foo... +Complete - 2 successfully verified harnesses, 0 failures, 2 total. diff --git a/tests/cargo-ui/multiple-harnesses/src/lib.rs b/tests/cargo-ui/multiple-harnesses/src/lib.rs new file mode 100644 index 000000000000..cab063cd1b25 --- /dev/null +++ b/tests/cargo-ui/multiple-harnesses/src/lib.rs @@ -0,0 +1,14 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +// +//! This test checks if we can specify multiple harnesses in the Cargo.toml file. + +#[kani::proof] +pub fn foo() { + assert_eq!(1 + 2, 3); +} + +#[kani::proof] +pub fn bar() { + assert_ne!(2, 3); +} diff --git a/tests/cargo-ui/supported-lib-types/lib-rlib/Cargo.toml b/tests/cargo-ui/supported-lib-types/lib-rlib/Cargo.toml index 7434f1c27f5f..1b4066c265ef 100644 --- a/tests/cargo-ui/supported-lib-types/lib-rlib/Cargo.toml +++ b/tests/cargo-ui/supported-lib-types/lib-rlib/Cargo.toml @@ -10,3 +10,16 @@ description = "Test that Kani correctly handle supported crate types" name = "lib" crate-type = ["lib", "rlib"] path = "../src/lib.rs" + +[package.metadata.kani.flags] +# This test doesn't work with the cache due to naming conflict caused by +# declaring ["lib", "rlib"] which is in fact redundant. +# See https://github.com/rust-lang/cargo/issues/6313 for more details. +# +# This still works for a fresh build and it only prints a warning. Thus, we +# force rebuild for now. +# +# Note that support for this case is deprecated. AFAIK, there is no plan to fix +# cargo build cache for cases like this. Until then, we might as well check that +# our support level matches cargo's. +force-build = true diff --git a/tests/cargo-ui/unsupported-lib-types/proc-macro/expected b/tests/cargo-ui/unsupported-lib-types/proc-macro/expected index 7b04d6407dc1..2a7badb42720 100644 --- a/tests/cargo-ui/unsupported-lib-types/proc-macro/expected +++ b/tests/cargo-ui/unsupported-lib-types/proc-macro/expected @@ -1,2 +1,2 @@ Skipped the following unsupported targets: 'lib'. -Error: No supported targets were found. +error: No supported targets were found. diff --git a/tests/cargo-ui/unsupported-lib-types/rlib-pmacro/expected b/tests/cargo-ui/unsupported-lib-types/rlib-pmacro/expected index 70755074c1bf..091b6c24e140 100644 --- a/tests/cargo-ui/unsupported-lib-types/rlib-pmacro/expected +++ b/tests/cargo-ui/unsupported-lib-types/rlib-pmacro/expected @@ -1,2 +1 @@ -error: Unrecognized option: 'reachability' error: could not compile `unsupported-lib` diff --git a/tests/cargo-ui/verbose-cmds/expected b/tests/cargo-ui/verbose-cmds/expected index b77548218d38..a15f3ba25b6e 100644 --- a/tests/cargo-ui/verbose-cmds/expected +++ b/tests/cargo-ui/verbose-cmds/expected @@ -1,5 +1,4 @@ -KANIFLAGS= -RUSTC= +CARGO_ENCODED_RUSTFLAGS= cargo rustc Running: `goto-cc Running: `goto-instrument diff --git a/tests/expected/function-stubbing-no-harness/expected b/tests/expected/function-stubbing-no-harness/expected index 8e46cf772653..47dea3fe757d 100644 --- a/tests/expected/function-stubbing-no-harness/expected +++ b/tests/expected/function-stubbing-no-harness/expected @@ -1 +1 @@ -Error: A proof harness named foo was not found \ No newline at end of file +error: no harnesses matched the harness filter: `foo` diff --git a/tests/expected/function-stubbing-warning/expected b/tests/expected/function-stubbing-warning/expected index e0d1ef736228..5a5f9f608542 100644 --- a/tests/expected/function-stubbing-warning/expected +++ b/tests/expected/function-stubbing-warning/expected @@ -1,2 +1,5 @@ -Stubbing is not enabled; attribute `kani::stub` will be ignored -Failed Checks: assertion failed: foo() == 42 \ No newline at end of file +Checking harness main... +Failed Checks: assertion failed: foo() == 42 + +warning: harness `main` contained stubs which were ignored.\ +To enable stubbing, pass options `--enable-unstable --enable-stubbing` diff --git a/tests/expected/stubbing-ambiguous-path/expected b/tests/expected/stubbing-ambiguous-path/expected index 170d0ce6095a..387fee15b3f0 100644 --- a/tests/expected/stubbing-ambiguous-path/expected +++ b/tests/expected/stubbing-ambiguous-path/expected @@ -1,5 +1,3 @@ -error: glob imports in local module `main` make it impossible to unambiguously resolve path; the possibilities are:\ - mod1::foo\ - mod2::foo - -error: unable to resolve function/method: foo \ No newline at end of file +error: failed to resolve `foo`: `foo` is ambiguous because of multiple glob imports in module `main`. Found:\ +mod2::foo\ +mod1::foo\ diff --git a/tests/kani/Assume/main.rs b/tests/kani/Assume/main.rs index d4ca44a920e3..4b798e087ef7 100644 --- a/tests/kani/Assume/main.rs +++ b/tests/kani/Assume/main.rs @@ -10,6 +10,7 @@ fn main() { #[kani::proof] fn verify_any_where() { - let i: i32 = kani::any_where(|x| *x < 10, "Only single digit values are legal"); + // Only single digit values are legal + let i: i32 = kani::any_where(|x| *x < 10); assert!(i < 20); } diff --git a/tests/kani/Drop/drop_after_moving_across_channel.rs b/tests/kani/Drop/drop_after_moving_across_channel.rs new file mode 100644 index 000000000000..f06f7b3fa1e7 --- /dev/null +++ b/tests/kani/Drop/drop_after_moving_across_channel.rs @@ -0,0 +1,32 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +//! This test checks whether dropping objects passed through +//! std::sync::mpsc::channel is handled. +//! This test only passes on MacOS today, so we duplicate the test for now. +#![cfg(target_os = "macos")] + +use std::sync::mpsc::*; + +static mut CELL: i32 = 0; + +struct DropSetCELLToOne {} + +impl Drop for DropSetCELLToOne { + fn drop(&mut self) { + unsafe { + CELL = 1; + } + } +} + +#[kani::unwind(1)] +#[kani::proof] +fn main() { + { + let (send, recv) = channel::(); + send.send(DropSetCELLToOne {}).unwrap(); + let _to_drop: DropSetCELLToOne = recv.recv().unwrap(); + } + assert_eq!(unsafe { CELL }, 1, "Drop should be called"); +} diff --git a/tests/kani/Drop/fixme_drop_after_moving_across_channel.rs b/tests/kani/Drop/fixme_drop_after_moving_across_channel.rs index 8ab694ef98e3..a66d7df7cf36 100644 --- a/tests/kani/Drop/fixme_drop_after_moving_across_channel.rs +++ b/tests/kani/Drop/fixme_drop_after_moving_across_channel.rs @@ -9,27 +9,38 @@ // kani::unwind(2) takes longer than 10m on a M1 Mac. For details, // please see: https://github.com/model-checking/kani/issues/1286 -use std::sync::mpsc::*; +#[cfg(target_os = "linux")] +mod fixme_harness { + use std::sync::mpsc::*; -static mut CELL: i32 = 0; + static mut CELL: i32 = 0; -struct DropSetCELLToOne {} + struct DropSetCELLToOne {} -impl Drop for DropSetCELLToOne { - fn drop(&mut self) { - unsafe { - CELL = 1; + impl Drop for DropSetCELLToOne { + fn drop(&mut self) { + unsafe { + CELL = 1; + } } } + + #[kani::unwind(1)] + #[kani::proof] + fn main() { + { + let (send, recv) = channel::(); + send.send(DropSetCELLToOne {}).unwrap(); + let _to_drop: DropSetCELLToOne = recv.recv().unwrap(); + } + assert_eq!(unsafe { CELL }, 1, "Drop should be called"); + } } -#[kani::unwind(1)] -#[kani::proof] -fn main() { - { - let (send, recv) = channel::(); - send.send(DropSetCELLToOne {}).unwrap(); - let _to_drop: DropSetCELLToOne = recv.recv().unwrap(); +#[cfg(target_os = "macos")] +mod forced_failure { + #[kani::proof] + fn just_panic() { + panic!("This test only fails on linux"); } - assert_eq!(unsafe { CELL }, 1, "Drop should be called"); } diff --git a/tests/kani/Loops/loop_free.rs b/tests/kani/Loops/loop_free.rs new file mode 100644 index 000000000000..75664af8f25c --- /dev/null +++ b/tests/kani/Loops/loop_free.rs @@ -0,0 +1,20 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +// +//! Ensure that Kani identifies that there is not loop in this code. +//! This was related to https://github.com/model-checking/kani/issues/2164 +fn loop_free(b: bool, other: T) -> T { + match b { + true => T::default(), + false => other, + } +} + +/// Set the unwind to 1 so this test will fail instead of running forever. +#[kani::proof] +#[kani::unwind(1)] +fn check_no_loop() { + let b: bool = kani::any(); + let result = loop_free(b, 5); + assert!(result == 5 || (b && result == 0)) +} diff --git a/tests/kani/Loops/loop_with_drop.rs b/tests/kani/Loops/loop_with_drop.rs new file mode 100644 index 000000000000..6d2669f8af7a --- /dev/null +++ b/tests/kani/Loops/loop_with_drop.rs @@ -0,0 +1,24 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +// +//! Ensure that Kani correctly unwinds the loop with drop instructions. +//! This was related to https://github.com/model-checking/kani/issues/2164 + +/// Dummy function with a for loop that only runs 2 iterations. +fn bounded_loop(b: bool, other: T) -> T { + let mut ret = other; + for i in 0..2 { + ret = match b { + true => T::default(), + false => ret, + }; + } + return ret; +} + +/// Harness that should succeed. We add a conservative loop bound. +#[kani::proof] +#[kani::unwind(3)] +fn harness() { + let _ = bounded_loop(kani::any(), ()); +} diff --git a/tests/kani/Stubbing/glob_cycle.rs b/tests/kani/Stubbing/glob_cycle.rs new file mode 100644 index 000000000000..42600809c7bf --- /dev/null +++ b/tests/kani/Stubbing/glob_cycle.rs @@ -0,0 +1,30 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +// +// kani-flags: --harness check_stub --enable-unstable --enable-stubbing +//! Test that stub can solve glob cycles. + +pub mod mod_a { + pub use crate::mod_b::*; + pub use crate::*; + + /// This method always fail. + pub fn method_a() { + noop(); + panic!(); + } +} + +pub mod mod_b { + pub use crate::mod_a::*; + + /// This harness replace `method_a` which always fail by `method_b` that should always succeed. + #[kani::proof] + #[kani::stub(mod_a::method_a, mod_b::noop)] + pub fn check_stub() { + method_a(); + } + + /// This method always succeed. + pub fn noop() {} +} diff --git a/tests/kani/Stubbing/glob_path.rs b/tests/kani/Stubbing/glob_path.rs new file mode 100644 index 000000000000..17a9e7ed4264 --- /dev/null +++ b/tests/kani/Stubbing/glob_path.rs @@ -0,0 +1,31 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +// +// kani-flags: --harness check_stub --enable-unstable --enable-stubbing +//! Test that stub can solve glob cycles even when the path expands the cycle. + +pub mod mod_a { + pub use crate::mod_b::*; + pub use crate::*; + + /// This method always fails. + pub fn method_a() { + mod_a::mod_b::mod_a::mod_b::noop(); + panic!(); + } +} + +pub mod mod_b { + pub use crate::mod_a::*; + pub use crate::*; + + /// This harness replaces `method_a` (always fails), by `method_b` (always succeeds). + #[kani::proof] + #[kani::stub(mod_a::mod_b::mod_a::method_a, mod_b::noop)] + pub fn check_stub() { + method_a(); + } + + /// This method always succeeds. + pub fn noop() {} +} diff --git a/tests/no_unsound_experiments/ZeroInit/expected b/tests/no_unsound_experiments/ZeroInit/expected index 4f837db2f7a8..b157b6a259f3 100644 --- a/tests/no_unsound_experiments/ZeroInit/expected +++ b/tests/no_unsound_experiments/ZeroInit/expected @@ -1 +1,3 @@ -error: Found argument '--unsound-experiment-zero-init-vars' which wasn't expected, or isn't valid in this context +error: unexpected argument '--unsound-experiment-zero-init-vars' found + +note: to pass '--unsound-experiment-zero-init-vars' as a value, use '-- --unsound-experiment-zero-init-vars' diff --git a/tests/perf/btreeset/insert_same/Cargo.toml b/tests/perf/btreeset/insert_same/Cargo.toml new file mode 100644 index 000000000000..0a4e0f7ee037 --- /dev/null +++ b/tests/perf/btreeset/insert_same/Cargo.toml @@ -0,0 +1,11 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT + +[package] +name = "insert_same" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/tests/perf/btreeset/insert_same/expected b/tests/perf/btreeset/insert_same/expected new file mode 100644 index 000000000000..34c886c358cb --- /dev/null +++ b/tests/perf/btreeset/insert_same/expected @@ -0,0 +1 @@ +VERIFICATION:- SUCCESSFUL diff --git a/tests/perf/btreeset/insert_same/src/main.rs b/tests/perf/btreeset/insert_same/src/main.rs new file mode 100644 index 000000000000..d77dd0644cde --- /dev/null +++ b/tests/perf/btreeset/insert_same/src/main.rs @@ -0,0 +1,20 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +//! This test checks the performance of pushing the same element onto a `BTreeSet` +//! The test is from +//! With CBMC's default solver (minisat), it takes ~517 seconds +//! With Kissat 3.0.0, it takes ~22 seconds + +use std::collections::BTreeSet; + +#[kani::proof] +#[kani::unwind(3)] +#[kani::solver(minisat)] +fn main() { + let mut set: BTreeSet = BTreeSet::new(); + let x = kani::any(); + set.insert(x); + set.insert(x); + assert!(set.len() == 1); +} diff --git a/tests/perf/s2n-quic b/tests/perf/s2n-quic index a15e731b84c7..5c9f5a661877 160000 --- a/tests/perf/s2n-quic +++ b/tests/perf/s2n-quic @@ -1 +1 @@ -Subproject commit a15e731b84c70d93fbf7e459dd6bb246796091d2 +Subproject commit 5c9f5a661877d27eddbcfed1c64c6186c97adba0 diff --git a/tests/perf/vec/vec/src/main.rs b/tests/perf/vec/vec/src/main.rs index c3feaa48b6d6..3103ed4a8f91 100644 --- a/tests/perf/vec/vec/src/main.rs +++ b/tests/perf/vec/vec/src/main.rs @@ -8,6 +8,7 @@ #[kani::proof] #[kani::unwind(5)] +#[kani::solver(minisat)] fn main() { let v1: Vec> = vec![vec![1], vec![]]; diff --git a/tests/script-based-pre/build-cache-bin/bin/Cargo.toml b/tests/script-based-pre/build-cache-bin/bin/Cargo.toml new file mode 100644 index 000000000000..53ef8b4ced03 --- /dev/null +++ b/tests/script-based-pre/build-cache-bin/bin/Cargo.toml @@ -0,0 +1,6 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +[package] +name = "bin" +version = "0.1.0" +edition = "2021" diff --git a/tests/script-based-pre/build-cache-bin/bin/src/lib.rs b/tests/script-based-pre/build-cache-bin/bin/src/lib.rs new file mode 100644 index 000000000000..8f4468728cac --- /dev/null +++ b/tests/script-based-pre/build-cache-bin/bin/src/lib.rs @@ -0,0 +1,18 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +#[kani::proof] +fn cover_bool() { + match kani::any() { + true => kani::cover!(true, "true"), + false => kani::cover!(true, "false"), + } +} + +#[kani::proof] +fn cover_option() { + match kani::any() { + Some(true) => kani::cover!(true, "true"), + Some(false) => kani::cover!(true, "false"), + None => kani::cover!(true, "none"), + } +} diff --git a/tests/script-based-pre/build-cache-bin/cache_works.expected b/tests/script-based-pre/build-cache-bin/cache_works.expected new file mode 100644 index 000000000000..e67a9b7f7fc6 --- /dev/null +++ b/tests/script-based-pre/build-cache-bin/cache_works.expected @@ -0,0 +1,21 @@ +Initial compilation +target/initial.log:Compiled 1 crates +target/initial.log:No harness verified +Re-execute the same command +target/same.log:Compiled 0 crates +target/same.log:No harness verified +Run with new arg that affects kani-driver workflow only +target/driver_opt.log:Compiled 0 crates +target/driver_opt.log:Checking harness cover_option... +target/driver_opt.log:Checking harness cover_bool... +target/driver_opt.log:Complete - 2 successfully verified harnesses, 0 failures, 2 total. +Run with a new argument that affects compilation +target/disable_checks.log:Compiled 1 crates +target/disable_checks.log:Checking harness cover_option... +target/disable_checks.log:Checking harness cover_bool... +target/disable_checks.log:Complete - 2 successfully verified harnesses, 0 failures, 2 total. +Run with new dependency +target/new_dep.log:Compiled 2 crates +target/new_dep.log:Checking harness cover_option... +target/new_dep.log:Checking harness cover_bool... +target/new_dep.log:Complete - 2 successfully verified harnesses, 0 failures, 2 total. diff --git a/tests/script-based-pre/build-cache-bin/cache_works.sh b/tests/script-based-pre/build-cache-bin/cache_works.sh new file mode 100755 index 000000000000..22f102392a6a --- /dev/null +++ b/tests/script-based-pre/build-cache-bin/cache_works.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT + +# Checks situations where running kani multiple times will work as expected when +# the target crate is binary. +# +# The following checks should not trigger recompilation. +# - Exact same input being invoked a second time. +# - Different options that do not affect the compilation, only the Kani workt flow. +# While the following should recompile the target. +# - Pass a new argument that affects compilation +# - Add a dependency +set -e +set -u + +ORIG=bin +OUT_DIR=target +MANIFEST=${OUT_DIR}/${ORIG}/Cargo.toml + +# Expects two arguments: "kani arguments" "output_file" +function check_kani { + local args=$1 + local log_file="${OUT_DIR}/$2" + # Run kani with the given arguments + if [ -z "${args}" ] + then + cargo kani --manifest-path "${MANIFEST}" --target-dir "${OUT_DIR}" \ + 2>&1 | tee "${log_file}" + else + cargo kani --manifest-path "${MANIFEST}" --target-dir "${OUT_DIR}" \ + "${args}" 2>&1 | tee "${log_file}" + fi + + # Print information about the generated log file. + # Check for occurrences of "Compiling" messages in the log files + local compiled=$(grep -c "Compiling" ${log_file}) + echo "${log_file}:Compiled ${compiled} crates" + + # Check which harnesses were verified + grep "Checking harness" -H ${log_file} || echo "${log_file}:No harness verified" + + # Check the verification summary + grep "successfully verified harnesses" -H ${log_file} || true +} + +# Ensure output folder is clean +rm -rf ${OUT_DIR} +mkdir -p ${OUT_DIR} +# Move the original source to the output folder since it will be modified +cp -r ${ORIG} ${OUT_DIR} + +echo "Initial compilation" +check_kani --only-codegen initial.log + +echo "Re-execute the same command" +check_kani --only-codegen same.log + +echo "Run with new arg that affects kani-driver workflow only" +check_kani "" driver_opt.log + +echo "Run with a new argument that affects compilation" +check_kani --no-assertion-reach-checks disable_checks.log + +echo "Run with new dependency" +cargo new --lib ${OUT_DIR}/new_dep +cargo add new_dep --manifest-path ${MANIFEST} --path ${OUT_DIR}/new_dep +check_kani --no-assertion-reach-checks new_dep.log + +# Try to leave a clean output folder at the end +rm -rf ${OUT_DIR} diff --git a/tests/script-based-pre/build-cache-bin/config.yml b/tests/script-based-pre/build-cache-bin/config.yml new file mode 100644 index 000000000000..a5e5035b11f4 --- /dev/null +++ b/tests/script-based-pre/build-cache-bin/config.yml @@ -0,0 +1,4 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +script: cache_works.sh +expected: cache_works.expected diff --git a/tests/script-based-pre/build-cache-dirty/config.yml b/tests/script-based-pre/build-cache-dirty/config.yml new file mode 100644 index 000000000000..dc1f964977b4 --- /dev/null +++ b/tests/script-based-pre/build-cache-dirty/config.yml @@ -0,0 +1,4 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +script: rebuild.sh +expected: rebuild.expected diff --git a/tests/script-based-pre/build-cache-dirty/rebuild.expected b/tests/script-based-pre/build-cache-dirty/rebuild.expected new file mode 100644 index 000000000000..31b45fd8b3ee --- /dev/null +++ b/tests/script-based-pre/build-cache-dirty/rebuild.expected @@ -0,0 +1,23 @@ +Initial compilation +omplete - 2 successfully verified harnesses, 0 failures, 2 total. +target/initial.log:Compiled 2 crates +target/initial.log:Checking harness check_u8_i16... +target/initial.log:Checking harness check_u8_u32... +target/initial.log:Complete - 2 successfully verified harnesses, 0 failures, 2 total. +Run with a new argument that affects compilation +target/enable_checks.log:Compiled 2 crates +target/enable_checks.log:Checking harness check_u8_i16... +target/enable_checks.log:Checking harness check_u8_u32... +target/enable_checks.log:Complete - 2 successfully verified harnesses, 0 failures, 2 total. +Run after change to the source code +target/changed_src.log:Compiled 1 crates +target/changed_src.log:Checking harness noop_check... +target/changed_src.log:Checking harness check_u8_i16... +target/changed_src.log:Checking harness check_u8_u32... +target/changed_src.log:Complete - 3 successfully verified harnesses, 0 failures, 3 total. +Run with new dependency +target/new_dep.log:Compiled 2 crates +target/new_dep.log:Checking harness noop_check... +target/new_dep.log:Checking harness check_u8_i16... +target/new_dep.log:Checking harness check_u8_u32... +target/new_dep.log:Complete - 3 successfully verified harnesses, 0 failures, 3 total. diff --git a/tests/script-based-pre/build-cache-dirty/rebuild.sh b/tests/script-based-pre/build-cache-dirty/rebuild.sh new file mode 100755 index 000000000000..e9e800b96287 --- /dev/null +++ b/tests/script-based-pre/build-cache-dirty/rebuild.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT + +# Checks situations where running kani multiple times should trigger a new build +# The cases we cover here are: +# - Pass a new argument that affects compilation +# - Change the source code +# - Add a dependency +# Note: This should run in the folder where the script is. + +OUT_DIR=target +MANIFEST=${OUT_DIR}/target_lib/Cargo.toml +LIB_SRC=${OUT_DIR}/target_lib/src/lib.rs + +# Expects two arguments: "kani arguments" "output_file" +function check_kani { + local args=$1 + local log_file="${OUT_DIR}/$2" + # Run kani with the given arguments + if [ -z "${args}" ] + then + cargo kani --manifest-path "${MANIFEST}" --target-dir "${OUT_DIR}" \ + 2>&1 | tee "${log_file}" + else + cargo kani --manifest-path "${MANIFEST}" --target-dir "${OUT_DIR}" \ + "${args}" 2>&1 | tee "${log_file}" + fi + + # Print information about the generated log file. + # Check for occurrences of "Compiling" messages in the log files + local compiled=$(grep -c "Compiling" ${log_file}) + echo "${log_file}:Compiled ${compiled} crates" + + # Check which harnesses were verified + grep "Checking harness" -H ${log_file} || echo "${log_file}:No harness verified" + + # Check the verification summary + grep "successfully verified harnesses" -H ${log_file} || true +} + +# Ensure output folder is clean +rm -rf ${OUT_DIR} +mkdir -p ${OUT_DIR} + +# Copy the project so we don't make changes to the source code +cp -r target_lib ${OUT_DIR} + +echo "Initial compilation" +check_kani --no-assertion-reach-checks initial.log + +echo "Run with a new argument that affects compilation" +check_kani "" enable_checks.log + +echo "Run after change to the source code" +echo ' +#[kani::proof] +fn noop_check() {} +' >> ${LIB_SRC} +check_kani "" changed_src.log + +echo "Run with new dependency" +cargo new --lib ${OUT_DIR}/new_dep +cargo add new_dep --manifest-path ${MANIFEST} --path ${OUT_DIR}/new_dep +check_kani "" new_dep.log + +# Try to leave a clean output folder at the end +rm -rf ${OUT_DIR} diff --git a/tests/script-based-pre/build-cache-dirty/target_lib/Cargo.toml b/tests/script-based-pre/build-cache-dirty/target_lib/Cargo.toml new file mode 100644 index 000000000000..e2a176e022ef --- /dev/null +++ b/tests/script-based-pre/build-cache-dirty/target_lib/Cargo.toml @@ -0,0 +1,11 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +[package] +name = "target_lib" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +either = "1.8" diff --git a/tests/script-based-pre/build-cache-dirty/target_lib/src/lib.rs b/tests/script-based-pre/build-cache-dirty/target_lib/src/lib.rs new file mode 100644 index 000000000000..fa495defdaaa --- /dev/null +++ b/tests/script-based-pre/build-cache-dirty/target_lib/src/lib.rs @@ -0,0 +1,19 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +//! We don't use any of our dependencies to keep the test fast + +#[kani::proof] +fn check_u8_u32() { + let before: u8 = kani::any(); + let temp = before as u32; + let after: u8 = temp.try_into().unwrap(); + assert_eq!(after, before); +} + +#[kani::proof] +fn check_u8_i16() { + let before: u8 = kani::any(); + let temp = before as i16; + let after: u8 = temp.try_into().unwrap(); + assert_eq!(after, before); +} diff --git a/tests/script-based-pre/build-cache-fresh/cache_works.expected b/tests/script-based-pre/build-cache-fresh/cache_works.expected new file mode 100644 index 000000000000..f283bf6fdff0 --- /dev/null +++ b/tests/script-based-pre/build-cache-fresh/cache_works.expected @@ -0,0 +1,16 @@ +Initial compilation +target/initial.log:Compiled 1 crates +target/initial.log:No harness verified +Re-execute the same command +target/same.log:Compiled 0 crates +target/same.log:No harness verified +Run with new arg that affects kani-driver workflow only +target/driver_opt.log:Compiled 0 crates +target/driver_opt.log:Checking harness cover_option... +target/driver_opt.log:Checking harness cover_bool... +target/driver_opt.log:Complete - 2 successfully verified harnesses, 0 failures, 2 total. +Run with a new cbmc option +target/cbmc_opt.log:Compiled 0 crates +target/cbmc_opt.log:Checking harness cover_option... +target/cbmc_opt.log:Checking harness cover_bool... +target/cbmc_opt.log:Complete - 2 successfully verified harnesses, 0 failures, 2 total. diff --git a/tests/script-based-pre/build-cache-fresh/cache_works.sh b/tests/script-based-pre/build-cache-fresh/cache_works.sh new file mode 100755 index 000000000000..558437f0973e --- /dev/null +++ b/tests/script-based-pre/build-cache-fresh/cache_works.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT + +# Checks situations where running kani multiple times will not trigger a recompilation +# The cases we cover here are: +# - Exact same input being invoked 2x. +# - Different options that do not affect the compilation, only the Kani workflow. +# - Different options that do not affect the compilation, only the CBMC execution. + +MANIFEST=lib/Cargo.toml +OUT_DIR=target + +# Expects two arguments: "kani arguments" "output_file" +function check_kani { + local args=$1 + local log_file="${OUT_DIR}/$2" + # Run kani with the given arguments + if [ -z "${args}" ] + then + cargo kani --manifest-path "${MANIFEST}" --target-dir "${OUT_DIR}" \ + 2>&1 | tee "${log_file}" + else + cargo kani --manifest-path "${MANIFEST}" --target-dir "${OUT_DIR}" \ + "${args}" 2>&1 | tee "${log_file}" + fi + + # Print information about the generated log file. + # Check for occurrences of "Compiling" messages in the log files + local compiled=$(grep -c "Compiling" ${log_file}) + echo "${log_file}:Compiled ${compiled} crates" + + # Check which harnesses were verified + grep "Checking harness" -H ${log_file} || echo "${log_file}:No harness verified" + + # Check the verification summary + grep "successfully verified harnesses" -H ${log_file} || true +} + +# Ensure output folder is clean +rm -rf ${OUT_DIR} +mkdir -p ${OUT_DIR} + +echo "Initial compilation" +check_kani --only-codegen initial.log + +echo "Re-execute the same command" +check_kani --only-codegen same.log + +echo "Run with new arg that affects kani-driver workflow only" +check_kani "" driver_opt.log + +echo "Run with a new cbmc option" +check_kani --no-default-checks cbmc_opt.log + +# Try to leave a clean output folder at the end +rm -rf ${OUT_DIR} diff --git a/tests/script-based-pre/build-cache-fresh/config.yml b/tests/script-based-pre/build-cache-fresh/config.yml new file mode 100644 index 000000000000..a5e5035b11f4 --- /dev/null +++ b/tests/script-based-pre/build-cache-fresh/config.yml @@ -0,0 +1,4 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +script: cache_works.sh +expected: cache_works.expected diff --git a/tests/script-based-pre/build-cache-fresh/lib/Cargo.toml b/tests/script-based-pre/build-cache-fresh/lib/Cargo.toml new file mode 100644 index 000000000000..b75742d280b6 --- /dev/null +++ b/tests/script-based-pre/build-cache-fresh/lib/Cargo.toml @@ -0,0 +1,10 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +[package] +name = "lib" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/tests/script-based-pre/build-cache-fresh/lib/src/lib.rs b/tests/script-based-pre/build-cache-fresh/lib/src/lib.rs new file mode 100644 index 000000000000..8f4468728cac --- /dev/null +++ b/tests/script-based-pre/build-cache-fresh/lib/src/lib.rs @@ -0,0 +1,18 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +#[kani::proof] +fn cover_bool() { + match kani::any() { + true => kani::cover!(true, "true"), + false => kani::cover!(true, "false"), + } +} + +#[kani::proof] +fn cover_option() { + match kani::any() { + Some(true) => kani::cover!(true, "true"), + Some(false) => kani::cover!(true, "false"), + None => kani::cover!(true, "none"), + } +} diff --git a/tests/script-based-pre/check-quiet/assume.rs b/tests/script-based-pre/check-quiet/assume.rs new file mode 100644 index 000000000000..0e34eb476615 --- /dev/null +++ b/tests/script-based-pre/check-quiet/assume.rs @@ -0,0 +1,16 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +#[kani::proof] +fn assume1() { + let i: i32 = kani::any(); + kani::assume(i < 10); + assert!(i < 20); +} + +#[kani::proof] +fn assume2() { + let i: u32 = kani::any(); + kani::assume(i < 10); + assert!(i < 20); +} diff --git a/tests/script-based-pre/check-quiet/check-quiet.expected b/tests/script-based-pre/check-quiet/check-quiet.expected new file mode 100644 index 000000000000..a08a54e8e695 --- /dev/null +++ b/tests/script-based-pre/check-quiet/check-quiet.expected @@ -0,0 +1 @@ +success: `--quiet` produced NO output \ No newline at end of file diff --git a/tests/script-based-pre/check-quiet/check-quiet.sh b/tests/script-based-pre/check-quiet/check-quiet.sh new file mode 100755 index 000000000000..2f6b15a67887 --- /dev/null +++ b/tests/script-based-pre/check-quiet/check-quiet.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT + +# Checks that no output is produced if `--quiet` is used + +set -eu + +KANI_OUTPUT=`kani assume.rs --quiet | wc -l` + +if [[ ${KANI_OUTPUT} -ne 0 ]]; then + echo "error: \`--quiet\` produced some output" + exit 1 +else + echo "success: \`--quiet\` produced NO output" +fi diff --git a/tests/script-based-pre/check-quiet/config.yml b/tests/script-based-pre/check-quiet/config.yml new file mode 100644 index 000000000000..1ab22cbed6ba --- /dev/null +++ b/tests/script-based-pre/check-quiet/config.yml @@ -0,0 +1,4 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +script: check-quiet.sh +expected: check-quiet.expected diff --git a/tests/ui/LoopContractsSynthesizer/main_signed/expected b/tests/ui/LoopContractsSynthesizer/main_signed/expected new file mode 100644 index 000000000000..34c886c358cb --- /dev/null +++ b/tests/ui/LoopContractsSynthesizer/main_signed/expected @@ -0,0 +1 @@ +VERIFICATION:- SUCCESSFUL diff --git a/tests/ui/LoopContractsSynthesizer/main_signed/main_signed.rs b/tests/ui/LoopContractsSynthesizer/main_signed/main_signed.rs new file mode 100644 index 000000000000..aa49e7c93d95 --- /dev/null +++ b/tests/ui/LoopContractsSynthesizer/main_signed/main_signed.rs @@ -0,0 +1,18 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +// kani-flags: --enable-unstable --synthesize-loop-contracts + +// Check if goto-synthesizer is correctly called, and synthesizes the required +// loop invariants. + +#[kani::proof] +fn main() { + let mut y: i32 = kani::any_where(|i| *i > 0); + + while y > 0 { + y = y - 1; + } + + assert!(y == 0); +} diff --git a/tests/ui/LoopContractsSynthesizer/main_unsigned/expected b/tests/ui/LoopContractsSynthesizer/main_unsigned/expected new file mode 100644 index 000000000000..34c886c358cb --- /dev/null +++ b/tests/ui/LoopContractsSynthesizer/main_unsigned/expected @@ -0,0 +1 @@ +VERIFICATION:- SUCCESSFUL diff --git a/tests/ui/LoopContractsSynthesizer/main_unsigned/main_unsigned.rs b/tests/ui/LoopContractsSynthesizer/main_unsigned/main_unsigned.rs new file mode 100644 index 000000000000..e2e272f1819c --- /dev/null +++ b/tests/ui/LoopContractsSynthesizer/main_unsigned/main_unsigned.rs @@ -0,0 +1,18 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +// kani-flags: --enable-unstable --synthesize-loop-contracts + +// Check if goto-synthesizer is correctly called, and synthesizes the required +// loop invariants. + +#[kani::proof] +fn main() { + let mut x: u64 = kani::any_where(|i| *i > 1); + + while x > 1 { + x = x - 1; + } + + assert!(x == 1); +} diff --git a/tests/ui/derive-arbitrary/non_arbitrary_field/expected b/tests/ui/derive-arbitrary/non_arbitrary_field/expected index e73c04c24d94..17a014736f5c 100644 --- a/tests/ui/derive-arbitrary/non_arbitrary_field/expected +++ b/tests/ui/derive-arbitrary/non_arbitrary_field/expected @@ -2,5 +2,3 @@ error[E0277]: the trait bound `NotArbitrary: kani::Arbitrary` is not satisfied |\ | not_arbitrary: NotArbitrary,\ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `kani::Arbitrary` is not implemented for `NotArbitrary` - -Error: "Failed to compile crate." diff --git a/tests/ui/derive-arbitrary/non_arbitrary_param/expected b/tests/ui/derive-arbitrary/non_arbitrary_param/expected index 1775ceb9f078..e74643f3bdb6 100644 --- a/tests/ui/derive-arbitrary/non_arbitrary_param/expected +++ b/tests/ui/derive-arbitrary/non_arbitrary_param/expected @@ -3,5 +3,3 @@ error[E0277]: the trait bound `Void: kani::Arbitrary` is not satisfied |\ | let _wrapper: Wrapper = kani::any();\ | ^^^^^^^^^ the trait `kani::Arbitrary` is not implemented for `Void`\ - -Error: "Failed to compile crate." diff --git a/tests/ui/derive-arbitrary/phantom_data/expected b/tests/ui/derive-arbitrary/phantom_data/expected new file mode 100644 index 000000000000..34c886c358cb --- /dev/null +++ b/tests/ui/derive-arbitrary/phantom_data/expected @@ -0,0 +1 @@ +VERIFICATION:- SUCCESSFUL diff --git a/tests/ui/derive-arbitrary/phantom_data/test.rs b/tests/ui/derive-arbitrary/phantom_data/test.rs new file mode 100644 index 000000000000..d5fc95552df8 --- /dev/null +++ b/tests/ui/derive-arbitrary/phantom_data/test.rs @@ -0,0 +1,24 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +//! Check that Kani can automatically derive `Arbitrary` on a struct that has +//! `std::marker::PhantomData` + +#[derive(kani::Arbitrary)] +struct Foo { + x: i32, + _f: std::marker::PhantomData, +} + +impl Foo { + fn new(v: i32) -> Self { + Self { x: v, _f: std::marker::PhantomData } + } +} + +#[kani::proof] +fn main() { + let x = kani::any(); + let f: Foo = Foo::new(x); + assert_eq!(f.x, x); +} diff --git a/tests/ui/derive-arbitrary/phantom_pinned/expected b/tests/ui/derive-arbitrary/phantom_pinned/expected new file mode 100644 index 000000000000..34c886c358cb --- /dev/null +++ b/tests/ui/derive-arbitrary/phantom_pinned/expected @@ -0,0 +1 @@ +VERIFICATION:- SUCCESSFUL diff --git a/tests/ui/derive-arbitrary/phantom_pinned/test.rs b/tests/ui/derive-arbitrary/phantom_pinned/test.rs new file mode 100644 index 000000000000..a4290eb64cac --- /dev/null +++ b/tests/ui/derive-arbitrary/phantom_pinned/test.rs @@ -0,0 +1,24 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +//! Check that Kani can automatically derive `Arbitrary` on a struct that has +//! `std::marker::PhantomPinned` + +#[derive(kani::Arbitrary)] +struct Foo { + x: i32, + _f: std::marker::PhantomPinned, +} + +impl Foo { + fn new(v: i32) -> Self { + Self { x: v, _f: std::marker::PhantomPinned } + } +} + +#[kani::proof] +fn check_arbitrary_phantom_pinned() { + let x = kani::any(); + let f: Foo = Foo::new(x); + assert_eq!(f.x, x); +} diff --git a/tests/ui/derive-arbitrary/union/expected b/tests/ui/derive-arbitrary/union/expected index 90953c69e7a5..3acea286d7ca 100644 --- a/tests/ui/derive-arbitrary/union/expected +++ b/tests/ui/derive-arbitrary/union/expected @@ -10,5 +10,3 @@ note: `#[derive(Arbitrary)]` cannot be used for unions such as `Wrapper` | union Wrapper {\ | ^^^^^^^\ = note: this error originates in the derive macro `kani::Arbitrary` - -Error: "Failed to compile crate." diff --git a/tests/ui/function-stubbing-error/expected b/tests/ui/function-stubbing-error/expected index cd7fa2cc4982..91c37681759f 100644 --- a/tests/ui/function-stubbing-error/expected +++ b/tests/ui/function-stubbing-error/expected @@ -1,4 +1,5 @@ -Attribute `kani::stub` takes two path arguments; found 3 -Attribute `kani::stub` takes two path arguments; found 1 -Attribute `kani::stub` takes two path arguments; found argument that is not a path -error: aborting due to 4 previous errors \ No newline at end of file +error: attribute `kani::stub` takes two path arguments; found 3 +error: attribute `kani::stub` takes two path arguments; found argument that is not a path +error: attribute `kani::stub` takes two path arguments; found 1 +error: aborting due to 4 previous errors + diff --git a/tests/ui/invalid-harnesses/expected b/tests/ui/invalid-harnesses/expected index 399c642386aa..bc24569a9522 100644 --- a/tests/ui/invalid-harnesses/expected +++ b/tests/ui/invalid-harnesses/expected @@ -1,16 +1,16 @@ -warning: Duplicate attribute\ +warning: duplicate attribute\ invalid.rs:\ |\ | #[kani::proof]\ | ^^^^^^^^^^^^^^ -error: Functions used as harnesses can not have any arguments.\ +error: functions used as harnesses cannot have any arguments\ invalid.rs:\ |\ | #[kani::proof] | ^^^^^^^^^^^^^^ -error: The proof attribute cannot be applied to generic functions.\ +error: the `proof` attribute cannot be applied to generic functions\ invalid.rs:\ |\ | #[kani::proof]\ diff --git a/tests/ui/logging/warning/expected b/tests/ui/logging/warning/expected index 51553d0c9e0a..49552eae9143 100644 --- a/tests/ui/logging/warning/expected +++ b/tests/ui/logging/warning/expected @@ -1 +1 @@ -warning: Duplicate attribute +warning: duplicate attribute diff --git a/tests/ui/mir-linker/generic-harness/expected b/tests/ui/mir-linker/generic-harness/expected index 5621a9a68433..0798bb9e99a3 100644 --- a/tests/ui/mir-linker/generic-harness/expected +++ b/tests/ui/mir-linker/generic-harness/expected @@ -1 +1 @@ -error: The proof attribute cannot be applied to generic functions. +error: the `proof` attribute cannot be applied to generic functions diff --git a/tests/ui/multiple-harnesses/expected b/tests/ui/multiple-harnesses/check_all/expected similarity index 100% rename from tests/ui/multiple-harnesses/expected rename to tests/ui/multiple-harnesses/check_all/expected diff --git a/tests/ui/multiple-harnesses/test.rs b/tests/ui/multiple-harnesses/check_all/test.rs similarity index 100% rename from tests/ui/multiple-harnesses/test.rs rename to tests/ui/multiple-harnesses/check_all/test.rs diff --git a/tests/ui/multiple-harnesses/check_some/expected b/tests/ui/multiple-harnesses/check_some/expected new file mode 100644 index 000000000000..e4069333d590 --- /dev/null +++ b/tests/ui/multiple-harnesses/check_some/expected @@ -0,0 +1,3 @@ +Checking harness check_first_harness... +Checking harness check_second_harness... +Complete - 2 successfully verified harnesses, 0 failures, 2 total. diff --git a/tests/ui/multiple-harnesses/check_some/select_harnesses.rs b/tests/ui/multiple-harnesses/check_some/select_harnesses.rs new file mode 100644 index 000000000000..d2e24c4ccb34 --- /dev/null +++ b/tests/ui/multiple-harnesses/check_some/select_harnesses.rs @@ -0,0 +1,19 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +// kani-flags: --harness check_first_harness --harness check_second_harness +//! Ensure that we can select multiple harnesses at a time. +#[kani::proof] +fn check_first_harness() { + assert!(1 == 1); +} + +#[kani::proof] +fn check_second_harness() { + assert!(2 == 2); +} + +/// A harness that will fail verification if it is run. +#[kani::proof] +fn ignore_third_harness() { + assert!(3 == 2); +} diff --git a/tests/ui/multiple-harnesses/multiple_matches/expected b/tests/ui/multiple-harnesses/multiple_matches/expected new file mode 100644 index 000000000000..7aadd93cdb53 --- /dev/null +++ b/tests/ui/multiple-harnesses/multiple_matches/expected @@ -0,0 +1,5 @@ +Checking harness second::verify_harness... +Checking harness second::verify_blah... +Checking harness second::verify_foo... +Checking harness first::check_foo... +Complete - 4 successfully verified harnesses, 0 failures, 4 total. diff --git a/tests/ui/multiple-harnesses/multiple_matches/select_harnesses.rs b/tests/ui/multiple-harnesses/multiple_matches/select_harnesses.rs new file mode 100644 index 000000000000..250bfdef7597 --- /dev/null +++ b/tests/ui/multiple-harnesses/multiple_matches/select_harnesses.rs @@ -0,0 +1,39 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +// kani-flags: --harness second --harness foo +//! Ensure that the set of harnesses run is the union of all arguments. + +mod first { + #[kani::proof] + fn check_foo() { + assert!(1 == 1); + } + + #[kani::proof] + fn check_blah() { + assert!(2 == 2); + } + + /// A harness that will fail verification if it is run. + #[kani::proof] + fn ignore_third_harness() { + assert!(3 == 2); + } +} + +mod second { + #[kani::proof] + fn verify_foo() { + assert!(1 == 1); + } + + #[kani::proof] + fn verify_blah() { + assert!(2 == 2); + } + + #[kani::proof] + fn verify_harness() { + assert!(3 == 3); + } +} diff --git a/tests/ui/multiple-harnesses/no_matching_harness/expected b/tests/ui/multiple-harnesses/no_matching_harness/expected new file mode 100644 index 000000000000..d0eb27af10f8 --- /dev/null +++ b/tests/ui/multiple-harnesses/no_matching_harness/expected @@ -0,0 +1 @@ +error: no harnesses matched the harness filters: `non_existing`, `invalid` diff --git a/tests/ui/multiple-harnesses/no_matching_harness/non_matching.rs b/tests/ui/multiple-harnesses/no_matching_harness/non_matching.rs new file mode 100644 index 000000000000..2c3e5d5ffa88 --- /dev/null +++ b/tests/ui/multiple-harnesses/no_matching_harness/non_matching.rs @@ -0,0 +1,10 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +// kani-flags: --harness non_existing --harness invalid +//! Check that we just ignore non-matching filters + +/// A harness that will fail verification if it is run. +#[kani::proof] +fn ignored_harness() { + assert!(3 == 2); +} diff --git a/tests/ui/multiple-harnesses/some_matching_harnesses/expected b/tests/ui/multiple-harnesses/some_matching_harnesses/expected new file mode 100644 index 000000000000..08b81d02e429 --- /dev/null +++ b/tests/ui/multiple-harnesses/some_matching_harnesses/expected @@ -0,0 +1,3 @@ +Checking harness existing_harness... +Checking harness existing... +Complete - 2 successfully verified harnesses, 0 failures, 2 total. diff --git a/tests/ui/multiple-harnesses/some_matching_harnesses/subset.rs b/tests/ui/multiple-harnesses/some_matching_harnesses/subset.rs new file mode 100644 index 000000000000..662ca3a192a8 --- /dev/null +++ b/tests/ui/multiple-harnesses/some_matching_harnesses/subset.rs @@ -0,0 +1,19 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +// kani-flags: --harness existing --harness non_existing --harness invalid +//! Check that we just ignore non-matching filters +#[kani::proof] +fn existing() { + assert!(1 == 1); +} + +#[kani::proof] +fn existing_harness() { + assert!(2 == 2); +} + +/// A harness that will fail verification if it is run. +#[kani::proof] +fn ignored_harness() { + assert!(3 == 2); +} diff --git a/tests/ui/multiple-proof-attributes/expected b/tests/ui/multiple-proof-attributes/expected index 64b3d70b9bf2..5907806a163f 100644 --- a/tests/ui/multiple-proof-attributes/expected +++ b/tests/ui/multiple-proof-attributes/expected @@ -1,4 +1,4 @@ -warning: Duplicate attribute\ +warning: duplicate attribute\ main.rs:\ |\ | #[kani::proof]\ diff --git a/tests/ui/solver-attribute/cadical/expected b/tests/ui/solver-attribute/cadical/expected new file mode 100644 index 000000000000..aca99d1dd185 --- /dev/null +++ b/tests/ui/solver-attribute/cadical/expected @@ -0,0 +1,2 @@ +Solving with CaDiCaL +VERIFICATION:- SUCCESSFUL diff --git a/tests/ui/solver-attribute/cadical/test.rs b/tests/ui/solver-attribute/cadical/test.rs new file mode 100644 index 000000000000..d8e897f923fb --- /dev/null +++ b/tests/ui/solver-attribute/cadical/test.rs @@ -0,0 +1,14 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +//! Checks that `cadical` is a valid argument to `kani::solver` + +#[kani::proof] +#[kani::solver(cadical)] +fn check() { + let mut a = [2, 3, 1]; + a.sort(); + assert_eq!(a[0], 1); + assert_eq!(a[1], 2); + assert_eq!(a[2], 3); +} diff --git a/tests/ui/solver-attribute/invalid/expected b/tests/ui/solver-attribute/invalid/expected new file mode 100644 index 000000000000..53f6b87bf547 --- /dev/null +++ b/tests/ui/solver-attribute/invalid/expected @@ -0,0 +1,6 @@ +error: invalid argument for `#[kani::solver]` attribute, expected one of the supported solvers (e.g. `kissat`) or a SAT solver binary (e.g. `bin=""`)\ +test.rs:\ +|\ +| #[kani::solver(123)]\ +| ^^^^^^^^^^^^^^^^^^^^ +error: aborting due to previous error diff --git a/tests/ui/solver-attribute/invalid/test.rs b/tests/ui/solver-attribute/invalid/test.rs new file mode 100644 index 000000000000..79b654cac9fe --- /dev/null +++ b/tests/ui/solver-attribute/invalid/test.rs @@ -0,0 +1,6 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +#[kani::proof] +#[kani::solver(123)] +fn check() {} diff --git a/tests/ui/solver-attribute/multiple-args/expected b/tests/ui/solver-attribute/multiple-args/expected new file mode 100644 index 000000000000..64e1a5468fc3 --- /dev/null +++ b/tests/ui/solver-attribute/multiple-args/expected @@ -0,0 +1,6 @@ +error: the `#[kani::solver]` attribute expects a single argument. Got 2 arguments.\ +test.rs:\ +|\ +| #[kani::solver(kissat, minisat)]\ +| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +error: aborting due to previous error diff --git a/tests/ui/solver-attribute/multiple-args/test.rs b/tests/ui/solver-attribute/multiple-args/test.rs new file mode 100644 index 000000000000..feaed0e539c5 --- /dev/null +++ b/tests/ui/solver-attribute/multiple-args/test.rs @@ -0,0 +1,6 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +#[kani::proof] +#[kani::solver(kissat, minisat)] +fn check() {} diff --git a/tests/ui/solver-attribute/multiple-attrs/expected b/tests/ui/solver-attribute/multiple-attrs/expected new file mode 100644 index 000000000000..1287dedaaab1 --- /dev/null +++ b/tests/ui/solver-attribute/multiple-attrs/expected @@ -0,0 +1,6 @@ +error: only one '#[kani::solver]' attribute is allowed per harness\ +test.rs:\ +|\ +| #[kani::solver(kissat)]\ +| ^^^^^^^^^^^^^^^^^^^^^^^ +error: aborting due to previous error diff --git a/tests/ui/solver-attribute/multiple-attrs/test.rs b/tests/ui/solver-attribute/multiple-attrs/test.rs new file mode 100644 index 000000000000..7cbe496c6cd6 --- /dev/null +++ b/tests/ui/solver-attribute/multiple-attrs/test.rs @@ -0,0 +1,7 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +#[kani::proof] +#[kani::solver(kissat)] +#[kani::solver(kissat)] +fn check() {} diff --git a/tests/ui/solver-attribute/no-arg/expected b/tests/ui/solver-attribute/no-arg/expected new file mode 100644 index 000000000000..42cadb93b477 --- /dev/null +++ b/tests/ui/solver-attribute/no-arg/expected @@ -0,0 +1,6 @@ +error: the `#[kani::solver]` attribute expects a single argument. Got 0 arguments.\ +test.rs:\ +|\ +| #[kani::solver]\ +| ^^^^^^^^^^^^^^^ +error: aborting due to previous error diff --git a/tests/ui/solver-attribute/no-arg/test.rs b/tests/ui/solver-attribute/no-arg/test.rs new file mode 100644 index 000000000000..93fa4df96170 --- /dev/null +++ b/tests/ui/solver-attribute/no-arg/test.rs @@ -0,0 +1,6 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +#[kani::proof] +#[kani::solver] +fn check() {} diff --git a/tests/ui/solver-attribute/not-found/expected b/tests/ui/solver-attribute/not-found/expected new file mode 100644 index 000000000000..0b8344b57997 --- /dev/null +++ b/tests/ui/solver-attribute/not-found/expected @@ -0,0 +1 @@ +error: the specified solver "non_existing_solver" was not found in path diff --git a/tests/ui/solver-attribute/not-found/test.rs b/tests/ui/solver-attribute/not-found/test.rs new file mode 100644 index 000000000000..0bcaf1a81ed8 --- /dev/null +++ b/tests/ui/solver-attribute/not-found/test.rs @@ -0,0 +1,8 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +//! Checks that Kani errors out if specified solver binary is not found + +#[kani::proof] +#[kani::solver(bin = "non_existing_solver")] +fn check() {} diff --git a/tests/ui/solver-attribute/unknown/expected b/tests/ui/solver-attribute/unknown/expected new file mode 100644 index 000000000000..7d3bf6d61ef3 --- /dev/null +++ b/tests/ui/solver-attribute/unknown/expected @@ -0,0 +1,6 @@ +error: unknown solver `foo`\ +test.rs:\ +|\ +| #[kani::solver(foo)]\ +| ^^^^^^^^^^^^^^^^^^^^ +error: aborting due to previous error diff --git a/tests/ui/solver-attribute/unknown/test.rs b/tests/ui/solver-attribute/unknown/test.rs new file mode 100644 index 000000000000..a1e98f0bb1fb --- /dev/null +++ b/tests/ui/solver-attribute/unknown/test.rs @@ -0,0 +1,6 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +#[kani::proof] +#[kani::solver(foo)] +fn check() {} diff --git a/tests/ui/solver-option/bin/expected b/tests/ui/solver-option/bin/expected new file mode 100644 index 000000000000..9f3b67b8a52a --- /dev/null +++ b/tests/ui/solver-option/bin/expected @@ -0,0 +1,2 @@ +Solving with External SAT solver +VERIFICATION:- SUCCESSFUL diff --git a/tests/ui/solver-option/bin/test.rs b/tests/ui/solver-option/bin/test.rs new file mode 100644 index 000000000000..3529deb0eea9 --- /dev/null +++ b/tests/ui/solver-option/bin/test.rs @@ -0,0 +1,12 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +// kani-flags: --solver bin=kissat + +//! Checks that `--solver` accepts `bin=` + +#[kani::proof] +fn check_solver_option() { + let a: [i32; 5] = kani::any(); + let s = &a[..]; + assert_eq!(a, s); +} diff --git a/tests/ui/solver-option/cadical/expected b/tests/ui/solver-option/cadical/expected new file mode 100644 index 000000000000..aca99d1dd185 --- /dev/null +++ b/tests/ui/solver-option/cadical/expected @@ -0,0 +1,2 @@ +Solving with CaDiCaL +VERIFICATION:- SUCCESSFUL diff --git a/tests/ui/solver-option/cadical/test.rs b/tests/ui/solver-option/cadical/test.rs new file mode 100644 index 000000000000..a7b6e1304bf3 --- /dev/null +++ b/tests/ui/solver-option/cadical/test.rs @@ -0,0 +1,12 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +// kani-flags: --solver cadical + +//! Checks that the `cadical` is supported as an argument to `--solver` + +#[kani::proof] +fn check_solver_option() { + let v = vec![kani::any(), 2]; + let v_copy = v.clone(); + assert_eq!(v, v_copy); +} diff --git a/tests/ui/solver-option/invalid/expected b/tests/ui/solver-option/invalid/expected new file mode 100644 index 000000000000..a709dc72109f --- /dev/null +++ b/tests/ui/solver-option/invalid/expected @@ -0,0 +1 @@ +error: invalid value 'foo=bar' for '--solver ' diff --git a/tests/ui/solver-option/invalid/test.rs b/tests/ui/solver-option/invalid/test.rs new file mode 100644 index 000000000000..c3fbcb03c5b8 --- /dev/null +++ b/tests/ui/solver-option/invalid/test.rs @@ -0,0 +1,8 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +// kani-flags: --solver foo=bar + +//! Checks that `--solver` rejects an invalid argument + +#[kani::proof] +fn check_solver_option() {} diff --git a/tests/ui/solver-option/kissat/expected b/tests/ui/solver-option/kissat/expected new file mode 100644 index 000000000000..9f3b67b8a52a --- /dev/null +++ b/tests/ui/solver-option/kissat/expected @@ -0,0 +1,2 @@ +Solving with External SAT solver +VERIFICATION:- SUCCESSFUL diff --git a/tests/ui/solver-option/kissat/test.rs b/tests/ui/solver-option/kissat/test.rs new file mode 100644 index 000000000000..0b1403132ae3 --- /dev/null +++ b/tests/ui/solver-option/kissat/test.rs @@ -0,0 +1,13 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +// kani-flags: --solver kissat + +//! Checks that the solver option overrides the solver attribute + +#[kani::proof] +#[kani::solver(minisat)] +fn check_solver_option() { + let v = vec![kani::any(), 3]; + let v_copy = v.clone(); + assert_eq!(v, v_copy); +} diff --git a/tests/ui/solver-option/minisat/expected b/tests/ui/solver-option/minisat/expected new file mode 100644 index 000000000000..3c0e3ef0d079 --- /dev/null +++ b/tests/ui/solver-option/minisat/expected @@ -0,0 +1,2 @@ +Solving with MiniSAT +VERIFICATION:- SUCCESSFUL diff --git a/tests/ui/solver-option/minisat/test.rs b/tests/ui/solver-option/minisat/test.rs new file mode 100644 index 000000000000..b92a4cd1b6c6 --- /dev/null +++ b/tests/ui/solver-option/minisat/test.rs @@ -0,0 +1,12 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +// kani-flags: --solver minisat + +//! Checks that `--solver minisat` is accepted + +#[kani::proof] +fn check_solver_option_minisat() { + let x: i32 = kani::any(); + let y: i32 = kani::any(); + kani::cover!(x == y && x == -789); +} diff --git a/tests/ui/stubbing-unsupported-multiple-harness/expected b/tests/ui/stubbing-unsupported-multiple-harness/expected new file mode 100644 index 000000000000..dd5048054254 --- /dev/null +++ b/tests/ui/stubbing-unsupported-multiple-harness/expected @@ -0,0 +1 @@ +error: Failed to apply stubs. Harnesses with stubs must be verified separately. Found: `check_no_stub`, `check_stub_bar`, `check_stub_foo` diff --git a/tests/ui/stubbing-unsupported-multiple-harness/stub_harnesses.rs b/tests/ui/stubbing-unsupported-multiple-harness/stub_harnesses.rs new file mode 100644 index 000000000000..f83dc309277e --- /dev/null +++ b/tests/ui/stubbing-unsupported-multiple-harness/stub_harnesses.rs @@ -0,0 +1,34 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +// +// kani-flags: --harness check --enable-unstable --enable-stubbing +// +//! This tests whether we provide a user friendly error if more than one harness has stubs + +fn foo(b: bool) { + assert!(b); +} + +fn bar(b: bool) { + assert!(!b); +} + +/// Harness should succeed if stub has been applied and fail otherwise. +#[kani::proof] +#[kani::stub(foo, bar)] +fn check_stub_foo() { + foo(false) +} + +/// Harness should succeed if stub has been applied and fail otherwise. +#[kani::proof] +#[kani::stub(bar, foo)] +fn check_stub_bar() { + bar(true) +} + +#[kani::proof] +fn check_no_stub() { + foo(true); + bar(false); +} diff --git a/tests/ui/stubbing/invalid-path/expected b/tests/ui/stubbing/invalid-path/expected new file mode 100644 index 000000000000..5584cde0871b --- /dev/null +++ b/tests/ui/stubbing/invalid-path/expected @@ -0,0 +1,4 @@ +error: failed to resolve `crate::mod_a::method_a::invalid`: expected module, found function `mod_a::method_a`\ +invalid.rs:\ +|\ +| #[cfg_attr(kani, kani::stub(crate::mod_a::method_a::invalid, noop))]\ diff --git a/tests/ui/stubbing/invalid-path/invalid.rs b/tests/ui/stubbing/invalid-path/invalid.rs new file mode 100644 index 000000000000..d24c757fd0cd --- /dev/null +++ b/tests/ui/stubbing/invalid-path/invalid.rs @@ -0,0 +1,27 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT +// +// kani-flags: --harness invalid_stub --enable-unstable --enable-stubbing + +pub mod mod_a { + use crate::mod_b::noop; + + /// This method always fail. + pub fn method_a() { + noop(); + panic!(); + } +} + +pub mod mod_b { + pub use crate::mod_a::method_a; + + #[cfg_attr(kani, kani::proof)] + #[cfg_attr(kani, kani::stub(crate::mod_a::method_a::invalid, noop))] + pub fn invalid_stub() { + method_a(); + } + + /// This method always succeed. + pub fn noop() {} +} diff --git a/tests/ui/stubbing/stubbing-flag/expected b/tests/ui/stubbing/stubbing-flag/expected new file mode 100644 index 000000000000..34c886c358cb --- /dev/null +++ b/tests/ui/stubbing/stubbing-flag/expected @@ -0,0 +1 @@ +VERIFICATION:- SUCCESSFUL diff --git a/tests/ui/stubbing-flag/main.rs b/tests/ui/stubbing/stubbing-flag/main.rs similarity index 100% rename from tests/ui/stubbing-flag/main.rs rename to tests/ui/stubbing/stubbing-flag/main.rs diff --git a/tests/ui/stubbing-trait-validation/expected b/tests/ui/stubbing/stubbing-trait-validation/expected similarity index 100% rename from tests/ui/stubbing-trait-validation/expected rename to tests/ui/stubbing/stubbing-trait-validation/expected diff --git a/tests/ui/stubbing-trait-validation/trait_mismatch.rs b/tests/ui/stubbing/stubbing-trait-validation/trait_mismatch.rs similarity index 100% rename from tests/ui/stubbing-trait-validation/trait_mismatch.rs rename to tests/ui/stubbing/stubbing-trait-validation/trait_mismatch.rs diff --git a/tests/ui/stubbing-type-validation/expected b/tests/ui/stubbing/stubbing-type-validation/expected similarity index 100% rename from tests/ui/stubbing-type-validation/expected rename to tests/ui/stubbing/stubbing-type-validation/expected diff --git a/tests/ui/stubbing-type-validation/type_mismatch.rs b/tests/ui/stubbing/stubbing-type-validation/type_mismatch.rs similarity index 100% rename from tests/ui/stubbing-type-validation/type_mismatch.rs rename to tests/ui/stubbing/stubbing-type-validation/type_mismatch.rs diff --git a/tests/ui/unwind-multiple-arguments/expected b/tests/ui/unwind-multiple-arguments/expected index ed05be175837..dcd5fc606b86 100644 --- a/tests/ui/unwind-multiple-arguments/expected +++ b/tests/ui/unwind-multiple-arguments/expected @@ -1 +1 @@ -error: Exactly one Unwind Argument as Integer accepted +error: invalid argument for `unwind` attribute, expected an integer diff --git a/tests/ui/unwind-without-proof/expected b/tests/ui/unwind-without-proof/expected index de8a986153b8..684da6b85607 100644 --- a/tests/ui/unwind-without-proof/expected +++ b/tests/ui/unwind-without-proof/expected @@ -1 +1 @@ -error: The unwind attribute also requires the '#[kani::proof]' attribute +error: the `unwind` attribute also requires the '#[kani::proof]' attribute diff --git a/tools/bookrunner/Cargo.toml b/tools/bookrunner/Cargo.toml index f98b4ec72a83..600ba419e492 100644 --- a/tools/bookrunner/Cargo.toml +++ b/tools/bookrunner/Cargo.toml @@ -15,7 +15,7 @@ rustdoc = { path = "librustdoc" } walkdir = "2.3.2" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -toml = "0.5" +toml = "0.7" [package.metadata.rust-analyzer] # This package uses rustc crates. diff --git a/tools/build-kani/Cargo.toml b/tools/build-kani/Cargo.toml index 490b9aeea245..d436d4be8a2c 100644 --- a/tools/build-kani/Cargo.toml +++ b/tools/build-kani/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "build-kani" -version = "0.19.0" +version = "0.23.0" edition = "2021" description = "Builds Kani, Sysroot and release bundle." license = "MIT OR Apache-2.0" @@ -12,5 +12,5 @@ publish = false [dependencies] anyhow = "1" cargo_metadata = "0.15.0" -clap = { version = "4", features=["derive"] } +clap = { version = "4.1.3", features=["derive"] } which = "4" diff --git a/tools/build-kani/src/main.rs b/tools/build-kani/src/main.rs index 756aae2a4c43..0f95fb23819c 100644 --- a/tools/build-kani/src/main.rs +++ b/tools/build-kani/src/main.rs @@ -10,9 +10,7 @@ mod parser; mod sysroot; -use crate::sysroot::{ - build_bin, build_lib, build_lib_legacy, kani_sysroot_legacy_lib, kani_sysroot_lib, -}; +use crate::sysroot::{build_bin, build_lib, kani_sysroot_lib}; use anyhow::{bail, Result}; use clap::Parser; use std::{ffi::OsString, path::Path, process::Command}; @@ -23,7 +21,6 @@ fn main() -> Result<()> { match args.subcommand { parser::Commands::BuildDev(build_parser) => { build_lib(); - build_lib_legacy(); build_bin(&build_parser.args); } parser::Commands::Bundle(bundle_parser) => { @@ -76,7 +73,6 @@ fn prebundle(dir: &Path) -> Result<()> { build_bin(&["--release"]); // And that libraries have been built too. build_lib(); - build_lib_legacy(); Ok(()) } @@ -104,16 +100,16 @@ fn bundle_kani(dir: &Path) -> Result<()> { // 4. Pre-compiled library files cp_dir(&kani_sysroot_lib(), dir)?; - cp_dir(&kani_sysroot_legacy_lib(), dir)?; // 5. Record the exact toolchain we use std::fs::write(dir.join("rust-toolchain-version"), env!("RUSTUP_TOOLCHAIN"))?; - // 5. Include a licensing note + // 6. Include a licensing note cp(Path::new("tools/build-kani/license-notes.txt"), dir)?; Ok(()) } + /// Copy CBMC files into `dir` fn bundle_cbmc(dir: &Path) -> Result<()> { // In an effort to avoid creating new places where we must specify the exact version diff --git a/tools/build-kani/src/sysroot.rs b/tools/build-kani/src/sysroot.rs index ce34f6185358..38a047b2ff64 100644 --- a/tools/build-kani/src/sysroot.rs +++ b/tools/build-kani/src/sysroot.rs @@ -5,7 +5,6 @@ //! In this folder, you can find the following folders: //! - `bin/`: Where all Kani binaries will be located. //! - `lib/`: Kani libraries as well as rust standard libraries. -//! - `legacy-lib/`: Kani libraries built based on the the toolchain standard libraries. //! //! Rustc expects the sysroot to have a specific folder layout: //! `{SYSROOT}/rustlib//lib/` @@ -50,15 +49,6 @@ pub fn kani_sysroot_lib() -> PathBuf { path_buf!(kani_sysroot(), "lib") } -/// Returns the path to where Kani pre-compiled library are stored. -/// -/// The legacy libraries are compiled on the top of rustup sysroot. Using it results in missing -/// symbols. This is still needed though because when we use the rust monomorphizer as our -/// reachability algorithm, the resulting boundaries are different than the new sysroot. -pub fn kani_sysroot_legacy_lib() -> PathBuf { - path_buf!(kani_sysroot(), "legacy-lib") -} - /// Returns the path to where Kani's pre-compiled binaries are stored. pub fn kani_sysroot_bin() -> PathBuf { path_buf!(kani_sysroot(), "bin") @@ -197,44 +187,6 @@ fn build_artifacts(cargo_cmd: &mut Child) -> Vec { .collect() } -/// Build Kani libraries using the regular rust toolchain standard libraries. -/// We should be able to remove this once the MIR linker is stable. -pub fn build_lib_legacy() { - // Run cargo build with -Z build-std - let target_dir = env!("KANI_LEGACY_LIBS"); - let args = [ - "build", - "-p", - "std", - "-p", - "kani", - "-p", - "kani_macros", - "--target-dir", - target_dir, - "--message-format", - "json-diagnostic-rendered-ansi", - ]; - let mut child = Command::new("cargo") - .env("CARGO_ENCODED_RUSTFLAGS", ["--cfg=kani"].join("\x1f")) - .args(args) - .stdout(Stdio::piped()) - .spawn() - .expect("Failed to build Kani libraries."); - - // Collect the build artifacts. - let artifacts = build_artifacts(&mut child); - let _ = child.wait().expect("Couldn't get cargo's exit status"); - - // Create sysroot folder. - let legacy_lib = kani_sysroot_legacy_lib(); - legacy_lib.exists().then(|| fs::remove_dir_all(&legacy_lib)); - fs::create_dir_all(&legacy_lib).expect(&format!("Failed to create {legacy_lib:?}")); - - // Copy Kani libraries to inside the legacy-lib folder. - copy_libs(&artifacts, &legacy_lib, &is_kani_lib); -} - /// Extra arguments to be given to `cargo build` while building Kani's binaries. /// Note that the following arguments are always provided: /// ```bash diff --git a/tools/compiletest/src/common.rs b/tools/compiletest/src/common.rs index 890e06260121..949726030b7a 100644 --- a/tools/compiletest/src/common.rs +++ b/tools/compiletest/src/common.rs @@ -134,8 +134,17 @@ pub struct Config { /// Timeout duration for each test. pub timeout: Option, + /// Whether we will abort execution when a failure occurs. + /// When set to false, this will execute the entire test suite regardless of any failure. + pub fail_fast: bool, + /// Whether we will run the tests or not. pub dry_run: bool, + + /// Whether we should update expected tests when there is a mismatch. This is helpful for + /// updating multiple tests. Users should still manually edit the files after to only keep + /// relevant expectations. + pub fix_expected: bool, } #[derive(Debug, Clone)] diff --git a/tools/compiletest/src/main.rs b/tools/compiletest/src/main.rs index eb268436c23f..98a12f3d1ea3 100644 --- a/tools/compiletest/src/main.rs +++ b/tools/compiletest/src/main.rs @@ -86,7 +86,10 @@ pub fn parse_config(args: Vec) -> Config { .optflag("h", "help", "show this message") .optopt("", "edition", "default Rust edition", "EDITION") .optopt("", "timeout", "the timeout for each test in seconds", "TIMEOUT") + .optflag("", "no-fail-fast", "run all tests regardless of failure") .optflag("", "dry-run", "don't actually run the tests") + .optflag("", "fix-expected", + "override all expected files that did not match the output. Tests will NOT fail when there is a mismatch") ; let (argv0, args_) = args.split_first().unwrap(); @@ -157,7 +160,9 @@ pub fn parse_config(args: Vec) -> Config { color, edition: matches.opt_str("edition"), force_rerun: matches.opt_present("force-rerun"), + fail_fast: !matches.opt_present("no-fail-fast"), dry_run: matches.opt_present("dry-run"), + fix_expected: matches.opt_present("fix-expected"), timeout, } } @@ -176,6 +181,9 @@ pub fn log_config(config: &Config) { logv(c, format!("verbose: {}", config.verbose)); logv(c, format!("quiet: {}", config.quiet)); logv(c, format!("timeout: {:?}", config.timeout)); + logv(c, format!("fail-fast: {:?}", config.fail_fast)); + logv(c, format!("dry-run: {:?}", config.dry_run)); + logv(c, format!("fix-expected: {:?}", config.fix_expected)); logv( c, format!( @@ -285,7 +293,7 @@ pub fn test_opts(config: &Config) -> test::TestOpts { list: false, options: test::Options::new(), time_options: None, - fail_fast: true, + fail_fast: config.fail_fast, force_run_in_process: false, } } diff --git a/tools/compiletest/src/runtest.rs b/tools/compiletest/src/runtest.rs index 1119297270bb..59b79a24d8d9 100644 --- a/tools/compiletest/src/runtest.rs +++ b/tools/compiletest/src/runtest.rs @@ -16,7 +16,7 @@ use crate::{fatal_error, json}; use std::env; use std::fs::{self, create_dir_all}; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::process::{Command, ExitStatus, Output, Stdio}; use std::str; @@ -279,8 +279,7 @@ impl<'test> TestCx<'test> { } let proc_res = self.compose_and_run(cargo); - let expected = fs::read_to_string(self.testpaths.file.clone()).unwrap(); - self.verify_output(&proc_res, &expected); + self.verify_output(&proc_res, &self.testpaths.file); // TODO: We should probably be checking the exit status somehow // See https://github.com/model-checking/kani/issues/1895 @@ -334,7 +333,7 @@ impl<'test> TestCx<'test> { } // Check if the `expected` file exists, and load its contents into `expected_output` - let expected_output = if let Some(expected_path) = exec_config.expected { + let expected_path = if let Some(expected_path) = exec_config.expected { let expected_rel_path = PathBuf::from(expected_path); let expected_path = self.testpaths.file.join(expected_rel_path); if !expected_path.exists() { @@ -344,7 +343,7 @@ impl<'test> TestCx<'test> { ); fatal_error(&err_msg); } - Some(fs::read_to_string(expected_path).unwrap()) + Some(expected_path) } else { None }; @@ -355,8 +354,8 @@ impl<'test> TestCx<'test> { let proc_res = self.compose_and_run(script_path_cmd); // Compare with expected output if it was provided - if let Some(output) = expected_output { - self.verify_output(&proc_res, &output); + if let Some(path) = expected_path { + self.verify_output(&proc_res, &path); } // Compare with exit code (0 if it wasn't provided) @@ -376,9 +375,8 @@ impl<'test> TestCx<'test> { /// the expected output in `expected` file. fn run_expected_test(&self) { let proc_res = self.run_kani(); - let expected = - fs::read_to_string(self.testpaths.file.parent().unwrap().join("expected")).unwrap(); - self.verify_output(&proc_res, &expected); + let expected_path = self.testpaths.file.parent().unwrap().join("expected"); + self.verify_output(&proc_res, &expected_path); } /// Runs Kani with stub implementations of various data structures. @@ -397,20 +395,35 @@ impl<'test> TestCx<'test> { /// Print an error if the verification output does not contain the expected /// lines. - fn verify_output(&self, proc_res: &ProcRes, expected: &str) { + fn verify_output(&self, proc_res: &ProcRes, expected_path: &Path) { // Include the output from stderr here for cases where there are exceptions + let expected = fs::read_to_string(expected_path).unwrap(); let output = proc_res.stdout.to_string() + &proc_res.stderr; - if let Some(lines) = TestCx::contains_lines( + let diff = TestCx::contains_lines( &output.split('\n').collect::>(), expected.split('\n').collect(), - ) { - self.fatal_proc_rec( - &format!( - "test failed: expected output to contain the line(s):\n{}", - lines.join("\n") - ), - proc_res, - ); + ); + match (diff, self.config.fix_expected) { + (None, _) => { /* Test passed. Do nothing*/ } + (Some(_), true) => { + // Fix output but still fail the test so users know which ones were updated + fs::write(expected_path, output) + .expect(&format!("Failed to update file {}", expected_path.display())); + self.fatal_proc_rec( + &format!("updated `{}` file, please review", expected_path.display()), + proc_res, + ) + } + (Some(lines), false) => { + // Throw an error + self.fatal_proc_rec( + &format!( + "test failed: expected output to contain the line(s):\n{}", + lines.join("\n") + ), + proc_res, + ); + } } }