diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 2d493753e5..888bf120ca 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -89,7 +89,8 @@ jobs: - tests::nakamoto_integrations::miner_writes_proposed_block_to_stackerdb - tests::nakamoto_integrations::correct_burn_outs - tests::nakamoto_integrations::vote_for_aggregate_key_burn_op - - tests::nakamoto_integrations::follower_bootup + - tests::nakamoto_integrations::follower_bootup_simple + - tests::nakamoto_integrations::follower_bootup_custom_chain_id - tests::nakamoto_integrations::forked_tenure_is_ignored - tests::nakamoto_integrations::nakamoto_attempt_time - tests::nakamoto_integrations::skip_mining_long_tx @@ -122,7 +123,9 @@ jobs: - tests::signer::v0::signer_set_rollover - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle - tests::signer::v0::continue_after_tenure_extend - - tests::signer::v0::tenure_extend_after_idle + - tests::signer::v0::tenure_extend_after_idle_signers + - tests::signer::v0::tenure_extend_after_idle_miner + - tests::signer::v0::tenure_extend_succeeds_after_rejected_attempt - tests::signer::v0::stx_transfers_dont_effect_idle_timeout - tests::signer::v0::idle_tenure_extend_active_mining - tests::signer::v0::multiple_miners_with_custom_chain_id @@ -130,6 +133,12 @@ jobs: - tests::signer::v0::continue_after_fast_block_no_sortition - tests::signer::v0::block_validation_response_timeout - tests::signer::v0::tenure_extend_after_bad_commit + - tests::signer::v0::block_proposal_max_age_rejections + - tests::signer::v0::global_acceptance_depends_on_block_announcement + - tests::signer::v0::no_reorg_due_to_successive_block_validation_ok + - tests::signer::v0::incoming_signers_ignore_block_proposals + - tests::signer::v0::outgoing_signers_ignore_block_proposals + - tests::signer::v0::injected_signatures_are_ignored_across_boundaries - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state @@ -140,6 +149,7 @@ jobs: - tests::nakamoto_integrations::mock_mining - tests::nakamoto_integrations::multiple_miners - tests::nakamoto_integrations::follower_bootup_across_multiple_cycles + - tests::nakamoto_integrations::nakamoto_lockup_events - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover - tests::nakamoto_integrations::v3_signer_api_endpoint diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1c59f23e8d..661f2e3746 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -164,6 +164,38 @@ jobs: - check-release uses: ./.github/workflows/stacks-core-tests.yml + ## Checks to run on built binaries + ## + ## Runs when: + ## - it is a release run + ## or: + ## - it is not a release run + ## and any of: + ## - this workflow is called manually + ## - PR is opened + ## - PR added to merge queue + ## - commit to either (development, next, master) branch + stacks-core-build-tests: + if: | + needs.check-release.outputs.is_release == 'true' || ( + github.event_name == 'workflow_dispatch' || + github.event_name == 'pull_request' || + github.event_name == 'merge_group' || + ( + contains(' + refs/heads/master + refs/heads/develop + refs/heads/next + ', github.event.pull_request.head.ref) && + github.event_name == 'push' + ) + ) + name: Stacks Core Build Tests + needs: + - rustfmt + - check-release + uses: ./.github/workflows/core-build-tests.yml + bitcoin-tests: if: | needs.check-release.outputs.is_release == 'true' || ( diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml new file mode 100644 index 0000000000..1ba4825527 --- /dev/null +++ b/.github/workflows/clippy.yml @@ -0,0 +1,40 @@ +## Perform Clippy checks - currently set to defaults +## https://github.com/rust-lang/rust-clippy#usage +## https://rust-lang.github.io/rust-clippy/master/index.html +## +name: Clippy Checks + +# Only run when: +# - PRs are (re)opened against develop branch +on: + pull_request: + branches: + - develop + types: + - opened + - reopened + - synchronize + +jobs: + clippy_check: + name: Clippy Check + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Define Rust Toolchain + id: define_rust_toolchain + run: echo "RUST_TOOLCHAIN=$(cat ./rust-toolchain)" >> $GITHUB_ENV + - name: Setup Rust Toolchain + id: setup_rust_toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + toolchain: ${{ env.RUST_TOOLCHAIN }} + components: clippy + - name: Clippy + id: clippy + uses: actions-rs/clippy-check@v1 + with: + token: ${{ secrets.GITHUB_TOKEN }} + args: -p libstackerdb -p stacks-signer -p pox-locking -p clarity -p libsigner -p stacks-common --no-deps --tests --all-features -- -D warnings \ No newline at end of file diff --git a/.github/workflows/core-build-tests.yml b/.github/workflows/core-build-tests.yml new file mode 100644 index 0000000000..393e2ff6b0 --- /dev/null +++ b/.github/workflows/core-build-tests.yml @@ -0,0 +1,33 @@ +name: Core build tests + +# Only run when: +# - PRs are (re)opened against develop branch +on: + workflow_call: + +jobs: + check-consts: + name: Check the constants from stacks-inspect + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Define Rust Toolchain + id: define_rust_toolchain + run: echo "RUST_TOOLCHAIN=$(cat ./rust-toolchain)" >> $GITHUB_ENV + - name: Setup Rust Toolchain + id: setup_rust_toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + toolchain: ${{ env.RUST_TOOLCHAIN }} + - name: Build the binaries + id: build + run: | + cargo build + - name: Dump constants JSON + id: consts-dump + run: cargo run --bin stacks-inspect -- dump-consts | tee out.json + - name: Set expected constants JSON + id: expects-json + run: diff out.json ./sample/expected_consts.json diff --git a/.github/workflows/p2p-tests.yml b/.github/workflows/p2p-tests.yml index 1c33eca0fb..81790bdc12 100644 --- a/.github/workflows/p2p-tests.yml +++ b/.github/workflows/p2p-tests.yml @@ -43,10 +43,10 @@ jobs: - net::tests::convergence::test_walk_star_15_org_biased - net::tests::convergence::test_walk_inbound_line_15 - net::api::tests::postblock_proposal::test_try_make_response - - net::server::tests::test_http_10_threads_getinfo - - net::server::tests::test_http_10_threads_getblock - - net::server::tests::test_http_too_many_clients - - net::server::tests::test_http_slow_client + - net::server::test::test_http_10_threads_getinfo + - net::server::test::test_http_10_threads_getblock + - net::server::test::test_http_too_many_clients + - net::server::test::test_http_slow_client steps: ## Setup test environment - name: Setup Test Environment diff --git a/.github/workflows/stacks-core-tests.yml b/.github/workflows/stacks-core-tests.yml index 98eb5cf92c..457a2aaefd 100644 --- a/.github/workflows/stacks-core-tests.yml +++ b/.github/workflows/stacks-core-tests.yml @@ -18,55 +18,6 @@ concurrency: cancel-in-progress: ${{ github.event_name == 'pull_request' }} jobs: - # Full genesis test with code coverage - full-genesis: - name: Full Genesis Test - runs-on: ubuntu-latest - strategy: - ## Continue with the test matrix even if we've had a failure - fail-fast: false - ## Run a maximum of 2 concurrent tests from the test matrix - max-parallel: 2 - matrix: - test-name: - - neon_integrations::bitcoind_integration_test - steps: - ## Setup test environment - - name: Setup Test Environment - id: setup_tests - uses: stacks-network/actions/stacks-core/testenv@main - with: - genesis: true - btc-version: "25.0" - - ## Run test matrix using restored cache of archive file - ## - Test will timeout after env.TEST_TIMEOUT minutes - - name: Run Tests - id: run_tests - timeout-minutes: ${{ fromJSON(env.TEST_TIMEOUT) }} - uses: stacks-network/actions/stacks-core/run-tests@main - with: - test-name: ${{ matrix.test-name }} - threads: 1 - archive-file: ~/genesis_archive.tar.zst - - ## Upload code coverage file - - name: Code Coverage - id: codecov - uses: stacks-network/actions/codecov@main - with: - test-name: large_genesis - filename: ./lcov.info - - - name: Status Output - run: | - echo "run_tests: ${{ steps.run_tests.outputs.status }}" - echo "codecov: ${{ steps.codecov.outputs.status }}" - - - name: Check Failures - if: steps.run_tests.outputs.status == 'failure' || steps.codecov.outputs.status == 'failure' - run: exit 1 - # Unit tests with code coverage unit-tests: name: Unit Tests @@ -186,7 +137,6 @@ jobs: runs-on: ubuntu-latest if: always() needs: - - full-genesis - open-api-validation - core-contracts-clarinet-test steps: diff --git a/CHANGELOG.md b/CHANGELOG.md index d19470075d..8e2fc5c172 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,12 +5,23 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). -## [Unreleased] +## [3.1.0.0.3] ### Added +- Add `tenure_timeout_secs` to the miner for determining when a time-based tenure extend should be attempted. +- Added configuration option `block_proposal_max_age_secs` under `[connection_options]` to prevent processing stale block proposals + ### Changed +- The RPC endpoint `/v3/block_proposal` no longer will evaluate block proposals more than `block_proposal_max_age_secs` old +- When a transaction is dropped due to replace-by-fee, the `/drop_mempool_tx` event observer payload now includes `new_txid`, which is the transaction that replaced this dropped transaction. When a transaction is dropped for other reasons, `new_txid` is `null`. [#5381](https://github.com/stacks-network/stacks-core/pull/5381) +- Nodes will assume that all PoX anchor blocks exist by default, and stall initial block download indefinitely to await their arrival (#5502) + +### Fixed + +- Signers no longer accept messages for blocks from different reward cycles (#5662) + ## [3.1.0.0.2] ### Added diff --git a/Cargo.lock b/Cargo.lock index 8a3769b6a8..3b05c44ef1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -525,12 +525,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" -[[package]] -name = "cast" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" - [[package]] name = "cc" version = "1.0.83" @@ -581,17 +575,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "clap" -version = "2.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" -dependencies = [ - "bitflags 1.3.2", - "textwrap", - "unicode-width", -] - [[package]] name = "clap" version = "4.5.0" @@ -637,15 +620,15 @@ name = "clarity" version = "0.0.1" dependencies = [ "assert-json-diff 1.1.0", - "hashbrown", + "hashbrown 0.14.3", "integer-sqrt", "lazy_static", "mutants", "rand 0.8.5", "rand_chacha 0.3.1", "regex", - "rstest 0.17.0", - "rstest_reuse 0.5.0", + "rstest", + "rstest_reuse", "rusqlite", "serde", "serde_derive", @@ -750,61 +733,6 @@ dependencies = [ "cfg-if 1.0.0", ] -[[package]] -name = "criterion" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" -dependencies = [ - "atty", - "cast", - "clap 2.34.0", - "criterion-plot", - "csv", - "itertools", - "lazy_static", - "num-traits", - "oorandom", - "plotters", - "rayon", - "regex", - "serde", - "serde_cbor", - "serde_derive", - "serde_json", - "tinytemplate", - "walkdir", -] - -[[package]] -name = "criterion-plot" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" -dependencies = [ - "cast", - "itertools", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" -dependencies = [ - "crossbeam-utils", -] - [[package]] name = "crossbeam-utils" version = "0.8.19" @@ -831,27 +759,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "csv" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" -dependencies = [ - "csv-core", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "csv-core" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" -dependencies = [ - "memchr", -] - [[package]] name = "ctr" version = "0.6.0" @@ -1009,12 +916,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "either" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" - [[package]] name = "encoding_rs" version = "0.8.33" @@ -1394,12 +1295,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "half" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" - [[package]] name = "hashbrown" version = "0.14.3" @@ -1411,13 +1306,19 @@ dependencies = [ "serde", ] +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" + [[package]] name = "hashlink" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ - "hashbrown", + "hashbrown 0.14.3", ] [[package]] @@ -1683,12 +1584,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.3" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.15.2", ] [[package]] @@ -1741,15 +1642,6 @@ version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - [[package]] name = "itoa" version = "1.0.10" @@ -1841,7 +1733,7 @@ name = "libsigner" version = "0.0.1" dependencies = [ "clarity", - "hashbrown", + "hashbrown 0.14.3", "lazy_static", "libc", "libstackerdb", @@ -2141,12 +2033,6 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" -[[package]] -name = "oorandom" -version = "11.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" - [[package]] name = "opaque-debug" version = "0.3.0" @@ -2194,17 +2080,6 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" -[[package]] -name = "pest" -version = "2.7.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219c0dcc30b6a27553f9cc242972b67f75b60eb0db71f0b5462f38b058c41546" -dependencies = [ - "memchr", - "thiserror", - "ucd-trie", -] - [[package]] name = "pico-args" version = "0.5.0" @@ -2276,34 +2151,6 @@ version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" -[[package]] -name = "plotters" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" -dependencies = [ - "num-traits", - "plotters-backend", - "plotters-svg", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "plotters-backend" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" - -[[package]] -name = "plotters-svg" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" -dependencies = [ - "plotters-backend", -] - [[package]] name = "polling" version = "2.8.0" @@ -2334,15 +2181,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "polynomial" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27abb6e4638dcecc65a92b50d7f1d87dd6dea987ba71db987b6bf881f4877e9d" -dependencies = [ - "num-traits", -] - [[package]] name = "polyval" version = "0.4.5" @@ -2524,26 +2362,6 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "rayon" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" -dependencies = [ - "crossbeam-deque", - "crossbeam-utils", -] - [[package]] name = "redox_syscall" version = "0.4.1" @@ -2621,7 +2439,7 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" name = "relay-server" version = "0.0.1" dependencies = [ - "hashbrown", + "hashbrown 0.14.3", ] [[package]] @@ -2665,21 +2483,6 @@ dependencies = [ "winreg", ] -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin 0.5.2", - "untrusted 0.7.1", - "web-sys", - "winapi 0.3.9", -] - [[package]] name = "ring" version = "0.17.7" @@ -2690,7 +2493,7 @@ dependencies = [ "getrandom 0.2.12", "libc", "spin 0.9.8", - "untrusted 0.9.0", + "untrusted", "windows-sys 0.48.0", ] @@ -2718,19 +2521,6 @@ dependencies = [ "libc", ] -[[package]] -name = "rstest" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2288c66aeafe3b2ed227c981f364f9968fa952ef0b30e84ada4486e7ee24d00a" -dependencies = [ - "cfg-if 1.0.0", - "proc-macro2", - "quote", - "rustc_version 0.4.0", - "syn 1.0.109", -] - [[package]] name = "rstest" version = "0.17.0" @@ -2757,17 +2547,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "rstest_reuse" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32c6cfaae58c048728261723a72b80a0aa9f3768e9a7da3b302a24d262525219" -dependencies = [ - "quote", - "rustc_version 0.3.3", - "syn 1.0.109", -] - [[package]] name = "rstest_reuse" version = "0.5.0" @@ -2810,15 +2589,6 @@ dependencies = [ "semver 0.9.0", ] -[[package]] -name = "rustc_version" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" -dependencies = [ - "semver 0.11.0", -] - [[package]] name = "rustc_version" version = "0.4.0" @@ -2862,7 +2632,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ "log", - "ring 0.17.7", + "ring", "rustls-webpki", "sct", ] @@ -2882,8 +2652,8 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.7", - "untrusted 0.9.0", + "ring", + "untrusted", ] [[package]] @@ -2898,15 +2668,6 @@ version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - [[package]] name = "scoped-tls" version = "1.0.1" @@ -2925,8 +2686,8 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.7", - "untrusted 0.9.0", + "ring", + "untrusted", ] [[package]] @@ -2954,16 +2715,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ - "semver-parser 0.7.0", -] - -[[package]] -name = "semver" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" -dependencies = [ - "semver-parser 0.10.2", + "semver-parser", ] [[package]] @@ -2978,15 +2730,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -[[package]] -name = "semver-parser" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" -dependencies = [ - "pest", -] - [[package]] name = "serde" version = "1.0.196" @@ -2996,16 +2739,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "serde_cbor" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" -dependencies = [ - "half", - "serde", -] - [[package]] name = "serde_derive" version = "1.0.196" @@ -3266,26 +2999,21 @@ dependencies = [ name = "stacks-common" version = "0.0.1" dependencies = [ - "assert-json-diff 1.1.0", "chrono", "curve25519-dalek 2.0.0", "ed25519-dalek", - "hashbrown", + "hashbrown 0.14.3", "lazy_static", "libc", "nix", - "percent-encoding", "rand 0.8.5", "rand_core 0.6.4", "ripemd", - "rstest 0.11.0", - "rstest_reuse 0.1.3", "rusqlite", "secp256k1", "serde", "serde_derive", "serde_json", - "serde_stacker", "sha2 0.10.8", "sha3", "slog", @@ -3305,7 +3033,7 @@ dependencies = [ "base64 0.12.3", "chrono", "clarity", - "hashbrown", + "hashbrown 0.14.3", "http-types", "lazy_static", "libc", @@ -3317,7 +3045,6 @@ dependencies = [ "rand_core 0.6.4", "regex", "reqwest", - "ring 0.16.20", "rusqlite", "serde", "serde_derive", @@ -3332,7 +3059,6 @@ dependencies = [ "tikv-jemallocator", "tiny_http", "tokio", - "toml", "tracing", "tracing-subscriber", "url", @@ -3344,14 +3070,13 @@ name = "stacks-signer" version = "0.0.1" dependencies = [ "backoff", - "clap 4.5.0", + "clap", "clarity", - "hashbrown", + "hashbrown 0.14.3", "lazy_static", "libsigner", "libstackerdb", "num-traits", - "polynomial", "prometheus", "rand 0.8.5", "rand_core 0.6.4", @@ -3382,10 +3107,9 @@ dependencies = [ "assert-json-diff 1.1.0", "chrono", "clarity", - "criterion", "curve25519-dalek 2.0.0", "ed25519-dalek", - "hashbrown", + "hashbrown 0.14.3", "integer-sqrt", "lazy_static", "libc", @@ -3402,8 +3126,8 @@ dependencies = [ "regex", "ripemd", "rlimit", - "rstest 0.17.0", - "rstest_reuse 0.5.0", + "rstest", + "rstest_reuse", "rusqlite", "secp256k1", "serde", @@ -3421,6 +3145,7 @@ dependencies = [ "stx-genesis", "tikv-jemallocator", "time 0.2.27", + "toml", "url", "winapi 0.3.9", ] @@ -3582,15 +3307,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - [[package]] name = "thiserror" version = "1.0.65" @@ -3724,16 +3440,6 @@ dependencies = [ "log", ] -[[package]] -name = "tinytemplate" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" -dependencies = [ - "serde", - "serde_json", -] - [[package]] name = "tinyvec" version = "1.6.0" @@ -3921,12 +3627,6 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" -[[package]] -name = "ucd-trie" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" - [[package]] name = "unicase" version = "2.7.0" @@ -3957,12 +3657,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-width" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" - [[package]] name = "universal-hash" version = "0.4.0" @@ -3973,12 +3667,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - [[package]] name = "untrusted" version = "0.9.0" @@ -4039,16 +3727,6 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" -[[package]] -name = "walkdir" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" -dependencies = [ - "same-file", - "winapi-util", -] - [[package]] name = "want" version = "0.3.1" @@ -4211,15 +3889,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -[[package]] -name = "winapi-util" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" -dependencies = [ - "winapi 0.3.9", -] - [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index c00c223c47..194e946ef4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,8 @@ rand = "0.8" rand_chacha = "0.3.1" tikv-jemallocator = "0.5.4" rusqlite = { version = "0.31.0", features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] } -thiserror = { version = "1.0.65" } +thiserror = "1.0.65" +toml = "0.5.6" # Use a bit more than default optimization for # dev builds to speed up test execution diff --git a/README.md b/README.md index 0279b25116..5e0aa26dbe 100644 --- a/README.md +++ b/README.md @@ -82,8 +82,7 @@ cargo nextest run You can observe the state machine in action locally by running: ```bash -cd testnet/stacks-node -cargo run --bin stacks-node -- start --config ./conf/testnet-follower-conf.toml +cargo run --bin stacks-node -- start --config ./sample/conf/testnet-follower-conf.toml ``` _On Windows, many tests will fail if the line endings aren't `LF`. Please ensure that you have git's `core.autocrlf` set to `input` when you clone the repository to avoid any potential issues. This is due to the Clarity language currently being sensitive to line endings._ diff --git a/clarity/src/vm/analysis/analysis_db.rs b/clarity/src/vm/analysis/analysis_db.rs index 1bef2834a8..36e1f8c970 100644 --- a/clarity/src/vm/analysis/analysis_db.rs +++ b/clarity/src/vm/analysis/analysis_db.rs @@ -50,11 +50,11 @@ impl<'a> AnalysisDatabase<'a> { self.begin(); let result = f(self).or_else(|e| { self.roll_back() - .map_err(|e| CheckErrors::Expects(format!("{e:?}")).into())?; + .map_err(|e| CheckErrors::Expects(format!("{e:?}")))?; Err(e) })?; self.commit() - .map_err(|e| CheckErrors::Expects(format!("{e:?}")).into())?; + .map_err(|e| CheckErrors::Expects(format!("{e:?}")))?; Ok(result) } @@ -130,9 +130,9 @@ impl<'a> AnalysisDatabase<'a> { .map_err(|_| CheckErrors::Expects("Bad data deserialized from DB".into())) }) .transpose()? - .and_then(|mut x| { + .map(|mut x| { x.canonicalize_types(epoch); - Some(x) + x })) } diff --git a/clarity/src/vm/analysis/arithmetic_checker/mod.rs b/clarity/src/vm/analysis/arithmetic_checker/mod.rs index aa69f650f0..429907b4c6 100644 --- a/clarity/src/vm/analysis/arithmetic_checker/mod.rs +++ b/clarity/src/vm/analysis/arithmetic_checker/mod.rs @@ -68,7 +68,7 @@ impl std::fmt::Display for Error { } } -impl<'a> ArithmeticOnlyChecker<'a> { +impl ArithmeticOnlyChecker<'_> { pub fn check_contract_cost_eligible(contract_analysis: &mut ContractAnalysis) { let is_eligible = ArithmeticOnlyChecker::run(contract_analysis).is_ok(); contract_analysis.is_cost_contract_eligible = is_eligible; diff --git a/clarity/src/vm/analysis/contract_interface_builder/mod.rs b/clarity/src/vm/analysis/contract_interface_builder/mod.rs index 6d91f33b1c..4e0aa9a0cb 100644 --- a/clarity/src/vm/analysis/contract_interface_builder/mod.rs +++ b/clarity/src/vm/analysis/contract_interface_builder/mod.rs @@ -276,7 +276,7 @@ impl ContractInterfaceFunction { outputs: ContractInterfaceFunctionOutput { type_f: match function_type { FunctionType::Fixed(FixedFunction { returns, .. }) => { - ContractInterfaceAtomType::from_type_signature(&returns) + ContractInterfaceAtomType::from_type_signature(returns) } _ => return Err(CheckErrors::Expects( "Contract functions should only have fixed function return types!" @@ -287,7 +287,7 @@ impl ContractInterfaceFunction { }, args: match function_type { FunctionType::Fixed(FixedFunction { args, .. }) => { - ContractInterfaceFunctionArg::from_function_args(&args) + ContractInterfaceFunctionArg::from_function_args(args) } _ => { return Err(CheckErrors::Expects( diff --git a/clarity/src/vm/analysis/errors.rs b/clarity/src/vm/analysis/errors.rs index f86308f8d9..5c3f68c7f9 100644 --- a/clarity/src/vm/analysis/errors.rs +++ b/clarity/src/vm/analysis/errors.rs @@ -207,10 +207,10 @@ impl CheckErrors { /// Does this check error indicate that the transaction should be /// rejected? pub fn rejectable(&self) -> bool { - match &self { - CheckErrors::SupertypeTooLarge | CheckErrors::Expects(_) => true, - _ => false, - } + matches!( + self, + CheckErrors::SupertypeTooLarge | CheckErrors::Expects(_) + ) } } @@ -323,7 +323,7 @@ pub fn check_arguments_at_most(expected: usize, args: &[T]) -> Result<(), Che } } -fn formatted_expected_types(expected_types: &Vec) -> String { +fn formatted_expected_types(expected_types: &[TypeSignature]) -> String { let mut expected_types_joined = format!("'{}'", expected_types[0]); if expected_types.len() > 2 { diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index d563dce6e8..8dde917df9 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -17,7 +17,6 @@ pub mod analysis_db; pub mod arithmetic_checker; pub mod contract_interface_builder; -#[allow(clippy::result_large_err)] pub mod errors; pub mod read_only_checker; pub mod trait_checker; @@ -52,7 +51,7 @@ pub fn mem_type_check( epoch: StacksEpochId, ) -> CheckResult<(Option, ContractAnalysis)> { let contract_identifier = QualifiedContractIdentifier::transient(); - let mut contract = build_ast_with_rules( + let contract = build_ast_with_rules( &contract_identifier, snippet, &mut (), @@ -68,7 +67,7 @@ pub fn mem_type_check( let cost_tracker = LimitedCostTracker::new_free(); match run_analysis( &QualifiedContractIdentifier::transient(), - &mut contract, + &contract, &mut analysis_db, false, cost_tracker, @@ -120,6 +119,7 @@ pub fn type_check( .map_err(|(e, _cost_tracker)| e) } +#[allow(clippy::too_many_arguments)] pub fn run_analysis( contract_identifier: &QualifiedContractIdentifier, expressions: &[SymbolicExpression], diff --git a/clarity/src/vm/analysis/read_only_checker/mod.rs b/clarity/src/vm/analysis/read_only_checker/mod.rs index 006b4f0cfe..f60ce11a44 100644 --- a/clarity/src/vm/analysis/read_only_checker/mod.rs +++ b/clarity/src/vm/analysis/read_only_checker/mod.rs @@ -50,7 +50,7 @@ pub struct ReadOnlyChecker<'a, 'b> { clarity_version: ClarityVersion, } -impl<'a, 'b> AnalysisPass for ReadOnlyChecker<'a, 'b> { +impl AnalysisPass for ReadOnlyChecker<'_, '_> { fn run_pass( epoch: &StacksEpochId, contract_analysis: &mut ContractAnalysis, @@ -250,13 +250,12 @@ impl<'a, 'b> ReadOnlyChecker<'a, 'b> { Ok(result) } - /// Checks the native function application of the function named by the - /// string `function` to `args` to determine whether it is read-only - /// compliant. + /// Checks the native function application of the function named by the string `function` + /// to `args` to determine whether it is read-only compliant. /// /// - Returns `None` if there is no native function named `function`. - /// - If there is such a native function, returns `true` iff this function application is - /// read-only. + /// - If there is such a native function, returns `true` iff this function + /// application is read-only. /// /// # Errors /// - Contract parsing errors @@ -414,15 +413,15 @@ impl<'a, 'b> ReadOnlyChecker<'a, 'b> { } } - /// Checks the native and user-defined function applications implied by `expressions`. The - /// first expression is used as the function name, and the tail expressions are used as the - /// arguments. + /// Checks the native and user-defined function applications implied by `expressions`. + /// + /// The first expression is used as the function name, and the tail expressions are used as the arguments. /// /// Returns `true` iff the function application is read-only. /// /// # Errors /// - `CheckErrors::NonFunctionApplication` if there is no first expression, or if the first - /// expression is not a `ClarityName`. + /// expression is not a `ClarityName`. /// - `CheckErrors::UnknownFunction` if the first expression does not name a known function. fn check_expression_application_is_read_only( &mut self, diff --git a/clarity/src/vm/analysis/type_checker/contexts.rs b/clarity/src/vm/analysis/type_checker/contexts.rs index 936cc47bc4..356ebf5944 100644 --- a/clarity/src/vm/analysis/type_checker/contexts.rs +++ b/clarity/src/vm/analysis/type_checker/contexts.rs @@ -92,7 +92,7 @@ impl TypeMap { } } -impl<'a> TypingContext<'a> { +impl TypingContext<'_> { pub fn new(epoch: StacksEpochId, clarity_version: ClarityVersion) -> TypingContext<'static> { TypingContext { epoch, diff --git a/clarity/src/vm/analysis/type_checker/mod.rs b/clarity/src/vm/analysis/type_checker/mod.rs index b4f6557c2e..36aa2519cc 100644 --- a/clarity/src/vm/analysis/type_checker/mod.rs +++ b/clarity/src/vm/analysis/type_checker/mod.rs @@ -55,7 +55,7 @@ impl FunctionType { | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => self.check_args_2_1(accounting, args, clarity_version), StacksEpochId::Epoch10 => { - return Err(CheckErrors::Expects("Epoch10 is not supported".into()).into()) + Err(CheckErrors::Expects("Epoch10 is not supported".into()).into()) } } } @@ -81,17 +81,14 @@ impl FunctionType { self.check_args_by_allowing_trait_cast_2_1(db, clarity_version, func_args) } StacksEpochId::Epoch10 => { - return Err(CheckErrors::Expects("Epoch10 is not supported".into()).into()) + Err(CheckErrors::Expects("Epoch10 is not supported".into()).into()) } } } } fn is_reserved_word_v3(word: &str) -> bool { - match word { - "block-height" => true, - _ => false, - } + word == "block-height" } /// Is this a reserved word that should trigger an analysis error for the given diff --git a/clarity/src/vm/analysis/type_checker/v2_05/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/mod.rs index 2b913a3ac9..77083b88cf 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/mod.rs @@ -239,10 +239,7 @@ impl FunctionType { Ok(TypeSignature::BoolType) } FunctionType::Binary(_, _, _) => { - return Err(CheckErrors::Expects( - "Binary type should not be reached in 2.05".into(), - ) - .into()) + Err(CheckErrors::Expects("Binary type should not be reached in 2.05".into()).into()) } } } @@ -286,8 +283,8 @@ impl FunctionType { )?; } (expected_type, value) => { - if !expected_type.admits(&StacksEpochId::Epoch2_05, &value)? { - let actual_type = TypeSignature::type_of(&value)?; + if !expected_type.admits(&StacksEpochId::Epoch2_05, value)? { + let actual_type = TypeSignature::type_of(value)?; return Err( CheckErrors::TypeError(expected_type.clone(), actual_type).into() ); @@ -438,41 +435,39 @@ impl<'a, 'b> TypeChecker<'a, 'b> { context: &TypingContext, expected_type: &TypeSignature, ) -> TypeResult { - match (&expr.expr, expected_type) { - ( - LiteralValue(Value::Principal(PrincipalData::Contract(ref contract_identifier))), - TypeSignature::TraitReferenceType(trait_identifier), - ) => { - let contract_to_check = self - .db - .load_contract(&contract_identifier, &StacksEpochId::Epoch2_05)? - .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; - - let contract_defining_trait = self - .db - .load_contract( - &trait_identifier.contract_identifier, - &StacksEpochId::Epoch2_05, - )? - .ok_or(CheckErrors::NoSuchContract( - trait_identifier.contract_identifier.to_string(), - ))?; - - let trait_definition = contract_defining_trait - .get_defined_trait(&trait_identifier.name) - .ok_or(CheckErrors::NoSuchTrait( - trait_identifier.contract_identifier.to_string(), - trait_identifier.name.to_string(), - ))?; - - contract_to_check.check_trait_compliance( + if let ( + LiteralValue(Value::Principal(PrincipalData::Contract(ref contract_identifier))), + TypeSignature::TraitReferenceType(trait_identifier), + ) = (&expr.expr, expected_type) + { + let contract_to_check = self + .db + .load_contract(contract_identifier, &StacksEpochId::Epoch2_05)? + .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; + + let contract_defining_trait = self + .db + .load_contract( + &trait_identifier.contract_identifier, &StacksEpochId::Epoch2_05, - trait_identifier, - trait_definition, - )?; - return Ok(expected_type.clone()); - } - (_, _) => {} + )? + .ok_or(CheckErrors::NoSuchContract( + trait_identifier.contract_identifier.to_string(), + ))?; + + let trait_definition = contract_defining_trait + .get_defined_trait(&trait_identifier.name) + .ok_or(CheckErrors::NoSuchTrait( + trait_identifier.contract_identifier.to_string(), + trait_identifier.name.to_string(), + ))?; + + contract_to_check.check_trait_compliance( + &StacksEpochId::Epoch2_05, + trait_identifier, + trait_definition, + )?; + return Ok(expected_type.clone()); } let actual_type = self.type_check(expr, context)?; diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs index 201c307986..3c5ab99029 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs @@ -776,8 +776,7 @@ impl TypedNativeFunction { | ReplaceAt | GetStacksBlockInfo | GetTenureInfo => { return Err(CheckErrors::Expects( "Clarity 2+ keywords should not show up in 2.05".into(), - ) - .into()) + )) } }; diff --git a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs index 7caf775c19..7899b3e27d 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs @@ -247,7 +247,7 @@ impl FunctionType { Err(CheckErrors::IncorrectArgumentCount(arg_types.len(), arg_index).into()), ); } - return (None, Ok(None)); + (None, Ok(None)) } // For the following function types, the visitor will just // tell the processor that any results greater than len 1 or 2 @@ -260,7 +260,7 @@ impl FunctionType { Err(CheckErrors::IncorrectArgumentCount(1, arg_index).into()), ); } - return (None, Ok(None)); + (None, Ok(None)) } FunctionType::ArithmeticBinary | FunctionType::ArithmeticComparison @@ -271,7 +271,7 @@ impl FunctionType { Err(CheckErrors::IncorrectArgumentCount(2, arg_index).into()), ); } - return (None, Ok(None)); + (None, Ok(None)) } } } @@ -576,8 +576,8 @@ impl FunctionType { )?; } (expected_type, value) => { - if !expected_type.admits(&StacksEpochId::Epoch21, &value)? { - let actual_type = TypeSignature::type_of(&value)?; + if !expected_type.admits(&StacksEpochId::Epoch21, value)? { + let actual_type = TypeSignature::type_of(value)?; return Err( CheckErrors::TypeError(expected_type.clone(), actual_type).into() ); @@ -854,7 +854,7 @@ fn clarity2_inner_type_check_type( TypeSignature::CallableType(CallableSubtype::Trait(expected_trait_id)), ) => { let contract_to_check = match db - .load_contract(&contract_identifier, &StacksEpochId::Epoch21)? + .load_contract(contract_identifier, &StacksEpochId::Epoch21)? { Some(contract) => { runtime_cost( @@ -1014,7 +1014,7 @@ impl<'a, 'b> TypeChecker<'a, 'b> { build_type_map: bool, ) -> TypeChecker<'a, 'b> { Self { - epoch: epoch.clone(), + epoch: *epoch, db, cost_track, contract_context: ContractContext::new(contract_identifier.clone(), *clarity_version), @@ -1240,6 +1240,7 @@ impl<'a, 'b> TypeChecker<'a, 'b> { .cloned() } + #[allow(clippy::unnecessary_lazy_evaluations)] fn type_check_define_function( &mut self, signature: &[SymbolicExpression], @@ -1440,41 +1441,39 @@ impl<'a, 'b> TypeChecker<'a, 'b> { context: &TypingContext, expected_type: &TypeSignature, ) -> TypeResult { - match (&expr.expr, expected_type) { - ( - LiteralValue(Value::Principal(PrincipalData::Contract(ref contract_identifier))), - TypeSignature::CallableType(CallableSubtype::Trait(trait_identifier)), - ) => { - let contract_to_check = self - .db - .load_contract(&contract_identifier, &StacksEpochId::Epoch21)? - .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; - - let contract_defining_trait = self - .db - .load_contract( - &trait_identifier.contract_identifier, - &StacksEpochId::Epoch21, - )? - .ok_or(CheckErrors::NoSuchContract( - trait_identifier.contract_identifier.to_string(), - ))?; - - let trait_definition = contract_defining_trait - .get_defined_trait(&trait_identifier.name) - .ok_or(CheckErrors::NoSuchTrait( - trait_identifier.contract_identifier.to_string(), - trait_identifier.name.to_string(), - ))?; - - contract_to_check.check_trait_compliance( + if let ( + LiteralValue(Value::Principal(PrincipalData::Contract(ref contract_identifier))), + TypeSignature::CallableType(CallableSubtype::Trait(trait_identifier)), + ) = (&expr.expr, expected_type) + { + let contract_to_check = self + .db + .load_contract(contract_identifier, &StacksEpochId::Epoch21)? + .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; + + let contract_defining_trait = self + .db + .load_contract( + &trait_identifier.contract_identifier, &StacksEpochId::Epoch21, - trait_identifier, - &trait_definition, - )?; - return Ok(expected_type.clone()); - } - (_, _) => {} + )? + .ok_or(CheckErrors::NoSuchContract( + trait_identifier.contract_identifier.to_string(), + ))?; + + let trait_definition = contract_defining_trait + .get_defined_trait(&trait_identifier.name) + .ok_or(CheckErrors::NoSuchTrait( + trait_identifier.contract_identifier.to_string(), + trait_identifier.name.to_string(), + ))?; + + contract_to_check.check_trait_compliance( + &StacksEpochId::Epoch21, + trait_identifier, + trait_definition, + )?; + return Ok(expected_type.clone()); } let actual_type = self.type_check(expr, context)?; diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs index 9876062241..95fe6f9bf9 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs @@ -7,9 +7,10 @@ use crate::vm::analysis::CheckError; use crate::vm::types::{BufferLength, SequenceSubtype, TypeSignature}; use crate::vm::SymbolicExpression; -/// to-consensus-buff? admits exactly one argument: -/// * the Clarity value to serialize -/// it returns an `(optional (buff x))` where `x` is the maximum possible +/// `to-consensus-buff?` admits exactly one argument: +/// * the Clarity value to serialize +/// +/// It returns an `(optional (buff x))`, where `x` is the maximum possible /// consensus buffer length based on the inferred type of the supplied value. pub fn check_special_to_consensus_buff( checker: &mut TypeChecker, @@ -25,10 +26,11 @@ pub fn check_special_to_consensus_buff( .map_err(CheckError::from) } -/// from-consensus-buff? admits exactly two arguments: -/// * a type signature indicating the expected return type `t1` -/// * a buffer (of up to max length) -/// it returns an `(optional t1)` +/// `from-consensus-buff?` admits exactly two arguments: +/// * a type signature indicating the expected return type `t1` +/// * a buffer (of up to max length) +/// +/// It returns an `(optional t1)` pub fn check_special_from_consensus_buff( checker: &mut TypeChecker, args: &[SymbolicExpression], diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index b576277a5b..7769652d25 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -79,7 +79,7 @@ fn check_special_list_cons( }); costs.push(cost); - if let Some(cur_size) = entries_size.clone() { + if let Some(cur_size) = entries_size { entries_size = cur_size.checked_add(checked.size()?); } if let Some(cur_size) = entries_size { @@ -263,6 +263,7 @@ pub fn check_special_tuple_cons( Ok(TypeSignature::TupleType(tuple_signature)) } +#[allow(clippy::unnecessary_lazy_evaluations)] fn check_special_let( checker: &mut TypeChecker, args: &[SymbolicExpression], @@ -1016,7 +1017,7 @@ impl TypedNativeFunction { /// The return type of `principal-destruct` is a Response, in which the success /// and error types are the same. fn parse_principal_basic_type() -> Result { - Ok(TupleTypeSignature::try_from(vec![ + TupleTypeSignature::try_from(vec![ ("version".into(), BUFF_1.clone()), ("hash-bytes".into(), BUFF_20.clone()), ( @@ -1032,7 +1033,7 @@ impl TypedNativeFunction { "FAIL: PrincipalDestruct failed to initialize type signature" .into(), ) - })?) + }) } TypeSignature::ResponseType(Box::new(( parse_principal_basic_type()?.into(), diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/options.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/options.rs index 6a097a8cd6..772bdd32a4 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/options.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/options.rs @@ -274,6 +274,7 @@ pub fn check_special_unwrap_err( inner_unwrap_err(input, checker) } +#[allow(clippy::unnecessary_lazy_evaluations)] fn eval_with_new_binding( body: &SymbolicExpression, bind_name: ClarityName, diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 12597c88fa..498b52dcb0 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -3411,7 +3411,7 @@ fn test_trait_args() { }, TraitIdentifier { name: ClarityName::from("trait-bar"), - contract_identifier: contract_identifier, + contract_identifier, }, )]; diff --git a/clarity/src/vm/ast/definition_sorter/mod.rs b/clarity/src/vm/ast/definition_sorter/mod.rs index a5a551298c..bd611851b6 100644 --- a/clarity/src/vm/ast/definition_sorter/mod.rs +++ b/clarity/src/vm/ast/definition_sorter/mod.rs @@ -420,7 +420,7 @@ impl Graph { let list = self .adjacency_list .get_mut(src_expr_index) - .ok_or_else(|| ParseErrors::InterpreterFailure)?; + .ok_or(ParseErrors::InterpreterFailure)?; list.push(dst_expr_index); Ok(()) } @@ -491,7 +491,7 @@ impl GraphWalker { fn get_cycling_dependencies( &mut self, graph: &Graph, - sorted_indexes: &Vec, + sorted_indexes: &[usize], ) -> Option> { let mut tainted: HashSet = HashSet::new(); diff --git a/clarity/src/vm/ast/errors.rs b/clarity/src/vm/ast/errors.rs index c1a0914b5f..56f8e40f86 100644 --- a/clarity/src/vm/ast/errors.rs +++ b/clarity/src/vm/ast/errors.rs @@ -113,10 +113,7 @@ impl ParseError { } pub fn rejectable(&self) -> bool { - match self.err { - ParseErrors::InterpreterFailure => true, - _ => false, - } + matches!(self.err, ParseErrors::InterpreterFailure) } pub fn has_pre_expression(&self) -> bool { diff --git a/clarity/src/vm/ast/parser/v1.rs b/clarity/src/vm/ast/parser/v1.rs index 5c2715e9f7..4cef2e5411 100644 --- a/clarity/src/vm/ast/parser/v1.rs +++ b/clarity/src/vm/ast/parser/v1.rs @@ -219,9 +219,7 @@ fn inner_lex(input: &str, max_nesting: u64) -> ParseResult { if !args.is_empty() { self.probe_for_generics( - args[1..].to_vec().into_iter(), + args[1..].iter().copied(), &mut referenced_traits, false, )?; diff --git a/clarity/src/vm/ast/types.rs b/clarity/src/vm/ast/types.rs index aedd31eae3..2071130131 100644 --- a/clarity/src/vm/ast/types.rs +++ b/clarity/src/vm/ast/types.rs @@ -96,6 +96,10 @@ impl PreExpressionsDrain { pub fn len(&self) -> usize { self.len } + + pub fn is_empty(&self) -> bool { + self.len == 0 + } } impl Iterator for PreExpressionsDrain { diff --git a/clarity/src/vm/callables.rs b/clarity/src/vm/callables.rs index 9cd991ec97..4691025a8d 100644 --- a/clarity/src/vm/callables.rs +++ b/clarity/src/vm/callables.rs @@ -37,6 +37,7 @@ use crate::vm::types::{ }; use crate::vm::{eval, Environment, LocalContext, Value}; +#[allow(clippy::type_complexity)] pub enum CallableType { UserFunction(DefinedFunction), NativeFunction(&'static str, NativeHandle, ClarityCostFunction), @@ -244,7 +245,11 @@ impl DefinedFunction { ) .into()); } - if let Some(_) = context.variables.insert(name.clone(), value.clone()) { + if context + .variables + .insert(name.clone(), value.clone()) + .is_some() + { return Err(CheckErrors::NameAlreadyUsed(name.to_string()).into()); } } @@ -286,7 +291,7 @@ impl DefinedFunction { } } - if let Some(_) = context.variables.insert(name.clone(), cast_value) { + if context.variables.insert(name.clone(), cast_value).is_some() { return Err(CheckErrors::NameAlreadyUsed(name.to_string()).into()); } } @@ -323,7 +328,7 @@ impl DefinedFunction { self.name.to_string(), ))?; - let args = self.arg_types.iter().map(|a| a.clone()).collect(); + let args = self.arg_types.to_vec(); if !expected_sig.check_args_trait_compliance(epoch, args)? { return Err( CheckErrors::BadTraitImplementation(trait_name, self.name.to_string()).into(), @@ -393,16 +398,12 @@ impl CallableType { impl FunctionIdentifier { fn new_native_function(name: &str) -> FunctionIdentifier { let identifier = format!("_native_:{}", name); - FunctionIdentifier { - identifier: identifier, - } + FunctionIdentifier { identifier } } fn new_user_function(name: &str, context: &str) -> FunctionIdentifier { let identifier = format!("{}:{}", context, name); - FunctionIdentifier { - identifier: identifier, - } + FunctionIdentifier { identifier } } } @@ -636,12 +637,9 @@ mod test { let cast_list = clarity2_implicit_cast(&list_opt_ty, &list_opt_contract).unwrap(); let items = cast_list.expect_list().unwrap(); for item in items { - match item.expect_optional().unwrap() { - Some(cast_opt) => { - let cast_trait = cast_opt.expect_callable().unwrap(); - assert_eq!(&cast_trait.trait_identifier.unwrap(), &trait_identifier); - } - None => (), + if let Some(cast_opt) = item.expect_optional().unwrap() { + let cast_trait = cast_opt.expect_callable().unwrap(); + assert_eq!(&cast_trait.trait_identifier.unwrap(), &trait_identifier); } } diff --git a/clarity/src/vm/clarity.rs b/clarity/src/vm/clarity.rs index 11145ab11a..1e503d1425 100644 --- a/clarity/src/vm/clarity.rs +++ b/clarity/src/vm/clarity.rs @@ -113,6 +113,7 @@ pub trait ClarityConnection { self.with_clarity_db_readonly_owned(|mut db| (to_do(&mut db), db)) } + #[allow(clippy::too_many_arguments)] fn with_readonly_clarity_env( &mut self, mainnet: bool, @@ -151,12 +152,15 @@ pub trait ClarityConnection { pub trait TransactionConnection: ClarityConnection { /// Do something with this connection's Clarity environment that can be aborted - /// with `abort_call_back`. + /// with `abort_call_back`. + /// /// This returns the return value of `to_do`: - /// * the generic term `R` - /// * the asset changes during `to_do` in an `AssetMap` - /// * the Stacks events during the transaction - /// and a `bool` value which is `true` if the `abort_call_back` caused the changes to abort + /// * the generic term `R` + /// * the asset changes during `to_do` in an `AssetMap` + /// * the Stacks events during the transaction + /// + /// and a `bool` value which is `true` if the `abort_call_back` caused the changes to abort. + /// /// If `to_do` returns an `Err` variant, then the changes are aborted. fn with_abort_callback( &mut self, @@ -197,14 +201,14 @@ pub trait TransactionConnection: ClarityConnection { ast_rules, ); - let mut contract_ast = match ast_result { + let contract_ast = match ast_result { Ok(x) => x, Err(e) => return (cost_track, Err(e.into())), }; let result = analysis::run_analysis( identifier, - &mut contract_ast.expressions, + &contract_ast.expressions, db, false, cost_track, @@ -272,7 +276,7 @@ pub trait TransactionConnection: ClarityConnection { }, |_, _| false, ) - .and_then(|(value, assets, events, _)| Ok((value, assets, events))) + .map(|(value, assets, events, _)| (value, assets, events)) } /// Execute a contract call in the current block. diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index a559ad59fd..b3eb8c9fe5 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -180,7 +180,7 @@ impl AssetMap { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct EventBatch { pub events: Vec, } @@ -243,6 +243,12 @@ pub type StackTrace = Vec; pub const TRANSIENT_CONTRACT_NAME: &str = "__transient"; +impl Default for AssetMap { + fn default() -> Self { + Self::new() + } +} + impl AssetMap { pub fn new() -> AssetMap { AssetMap { @@ -276,11 +282,11 @@ impl AssetMap { asset: &AssetIdentifier, amount: u128, ) -> Result { - let current_amount = match self.token_map.get(principal) { - Some(principal_map) => *principal_map.get(asset).unwrap_or(&0), - None => 0, - }; - + let current_amount = self + .token_map + .get(principal) + .and_then(|x| x.get(asset)) + .unwrap_or(&0); current_amount .checked_add(amount) .ok_or(RuntimeErrorType::ArithmeticOverflow.into()) @@ -393,17 +399,14 @@ impl AssetMap { for (principal, stx_amount) in self.stx_map.drain() { let output_map = map.entry(principal.clone()).or_default(); - output_map.insert( - AssetIdentifier::STX(), - AssetMapEntry::STX(stx_amount as u128), - ); + output_map.insert(AssetIdentifier::STX(), AssetMapEntry::STX(stx_amount)); } for (principal, stx_burned_amount) in self.burn_map.drain() { let output_map = map.entry(principal.clone()).or_default(); output_map.insert( AssetIdentifier::STX_burned(), - AssetMapEntry::Burn(stx_burned_amount as u128), + AssetMapEntry::Burn(stx_burned_amount), ); } @@ -414,7 +417,7 @@ impl AssetMap { } } - return map; + map } pub fn get_stx(&self, principal: &PrincipalData) -> Option { @@ -440,13 +443,8 @@ impl AssetMap { principal: &PrincipalData, asset_identifier: &AssetIdentifier, ) -> Option { - match self.token_map.get(principal) { - Some(ref assets) => match assets.get(asset_identifier) { - Some(value) => Some(*value), - None => None, - }, - None => None, - } + let assets = self.token_map.get(principal)?; + assets.get(asset_identifier).copied() } pub fn get_nonfungible_tokens( @@ -454,13 +452,8 @@ impl AssetMap { principal: &PrincipalData, asset_identifier: &AssetIdentifier, ) -> Option<&Vec> { - match self.asset_map.get(principal) { - Some(ref assets) => match assets.get(asset_identifier) { - Some(values) => Some(values), - None => None, - }, - None => None, - } + let assets = self.asset_map.get(principal)?; + assets.get(asset_identifier) } } @@ -469,23 +462,23 @@ impl fmt::Display for AssetMap { write!(f, "[")?; for (principal, principal_map) in self.token_map.iter() { for (asset, amount) in principal_map.iter() { - write!(f, "{} spent {} {}\n", principal, amount, asset)?; + writeln!(f, "{principal} spent {amount} {asset}")?; } } for (principal, principal_map) in self.asset_map.iter() { for (asset, transfer) in principal_map.iter() { - write!(f, "{} transfered [", principal)?; + write!(f, "{principal} transfered [")?; for t in transfer { - write!(f, "{}, ", t)?; + write!(f, "{t}, ")?; } - write!(f, "] {}\n", asset)?; + writeln!(f, "] {asset}")?; } } for (principal, stx_amount) in self.stx_map.iter() { - write!(f, "{} spent {} microSTX\n", principal, stx_amount)?; + writeln!(f, "{principal} spent {stx_amount} microSTX")?; } for (principal, stx_burn_amount) in self.burn_map.iter() { - write!(f, "{} burned {} microSTX\n", principal, stx_burn_amount)?; + writeln!(f, "{principal} burned {stx_burn_amount} microSTX")?; } write!(f, "]") } @@ -493,13 +486,13 @@ impl fmt::Display for AssetMap { impl EventBatch { pub fn new() -> EventBatch { - EventBatch { events: vec![] } + EventBatch::default() } } impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { #[cfg(any(test, feature = "testing"))] - pub fn new(database: ClarityDatabase<'a>, epoch: StacksEpochId) -> OwnedEnvironment<'a, '_> { + pub fn new(database: ClarityDatabase<'a>, epoch: StacksEpochId) -> OwnedEnvironment<'a, 'a> { OwnedEnvironment { context: GlobalContext::new( false, @@ -513,7 +506,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { } #[cfg(any(test, feature = "testing"))] - pub fn new_toplevel(mut database: ClarityDatabase<'a>) -> OwnedEnvironment<'a, '_> { + pub fn new_toplevel(mut database: ClarityDatabase<'a>) -> OwnedEnvironment<'a, 'a> { database.begin(); let epoch = database.get_clarity_epoch_version().unwrap(); let version = ClarityVersion::default_for_epoch(epoch); @@ -540,7 +533,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { mut database: ClarityDatabase<'a>, epoch: StacksEpochId, use_mainnet: bool, - ) -> OwnedEnvironment<'a, '_> { + ) -> OwnedEnvironment<'a, 'a> { use crate::vm::tests::test_only_mainnet_to_chain_id; let cost_track = LimitedCostTracker::new_max_limit(&mut database, epoch, use_mainnet) .expect("FAIL: problem instantiating cost tracking"); @@ -557,7 +550,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { chain_id: u32, database: ClarityDatabase<'a>, epoch_id: StacksEpochId, - ) -> OwnedEnvironment<'a, '_> { + ) -> OwnedEnvironment<'a, 'a> { OwnedEnvironment { context: GlobalContext::new( mainnet, @@ -576,7 +569,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { database: ClarityDatabase<'a>, cost_tracker: LimitedCostTracker, epoch_id: StacksEpochId, - ) -> OwnedEnvironment<'a, '_> { + ) -> OwnedEnvironment<'a, 'a> { OwnedEnvironment { context: GlobalContext::new(mainnet, chain_id, database, cost_tracker, epoch_id), call_stack: CallStack::new(), @@ -614,12 +607,11 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { self.begin(); let result = { - let mut initial_context = initial_context.unwrap_or(ContractContext::new( + let initial_context = initial_context.unwrap_or(ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity1, )); - let mut exec_env = - self.get_exec_environment(Some(sender), sponsor, &mut initial_context); + let mut exec_env = self.get_exec_environment(Some(sender), sponsor, &initial_context); f(&mut exec_env) }; @@ -737,7 +729,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { let mut snapshot = env .global_context .database - .get_stx_balance_snapshot(&recipient) + .get_stx_balance_snapshot(recipient) .unwrap(); snapshot.credit(amount).unwrap(); @@ -949,7 +941,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { program: &str, rules: ast::ASTRules, ) -> Result { - let clarity_version = self.contract_context.clarity_version.clone(); + let clarity_version = self.contract_context.clarity_version; let parsed = ast::build_ast_with_rules( contract_identifier, @@ -961,7 +953,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { )? .expressions; - if parsed.len() < 1 { + if parsed.is_empty() { return Err(RuntimeErrorType::ParseError( "Expected a program of at least length 1".to_string(), ) @@ -981,7 +973,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { let result = { let mut nested_env = Environment::new( - &mut self.global_context, + self.global_context, &contract.contract_context, self.call_stack, self.sender.clone(), @@ -1008,7 +1000,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { pub fn eval_raw_with_rules(&mut self, program: &str, rules: ast::ASTRules) -> Result { let contract_id = QualifiedContractIdentifier::transient(); - let clarity_version = self.contract_context.clarity_version.clone(); + let clarity_version = self.contract_context.clarity_version; let parsed = ast::build_ast_with_rules( &contract_id, @@ -1020,15 +1012,14 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { )? .expressions; - if parsed.len() < 1 { + if parsed.is_empty() { return Err(RuntimeErrorType::ParseError( "Expected a program of at least length 1".to_string(), ) .into()); } let local_context = LocalContext::new(); - let result = { eval(&parsed[0], self, &local_context) }; - result + eval(&parsed[0], self, &local_context) } #[cfg(any(test, feature = "testing"))] @@ -1150,7 +1141,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { Ok(value) => { if let Some(handler) = self.global_context.database.get_cc_special_cases_handler() { handler( - &mut self.global_context, + self.global_context, self.sender.as_ref(), self.sponsor.as_ref(), contract_identifier, @@ -1185,7 +1176,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { let result = { let mut nested_env = Environment::new( - &mut self.global_context, + self.global_context, next_contract_context, self.call_stack, self.sender.clone(), @@ -1240,7 +1231,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { contract_content: &str, ast_rules: ASTRules, ) -> Result<()> { - let clarity_version = self.contract_context.clarity_version.clone(); + let clarity_version = self.contract_context.clarity_version; let contract_ast = ast::build_ast_with_rules( &contract_identifier, @@ -1254,7 +1245,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { contract_identifier, clarity_version, &contract_ast, - &contract_content, + contract_content, ) } @@ -1299,7 +1290,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { contract_identifier.clone(), contract_content, self.sponsor.clone(), - &mut self.global_context, + self.global_context, contract_version, ); self.drop_memory(memory_use)?; @@ -1546,7 +1537,7 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { database: ClarityDatabase<'a>, cost_track: LimitedCostTracker, epoch_id: StacksEpochId, - ) -> GlobalContext { + ) -> GlobalContext<'a, 'hooks> { GlobalContext { database, cost_track, @@ -1561,7 +1552,7 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { } pub fn is_top_level(&self) -> bool { - self.asset_maps.len() == 0 + self.asset_maps.is_empty() } fn get_asset_map(&mut self) -> Result<&mut AssetMap> { @@ -1841,6 +1832,12 @@ impl ContractContext { } } +impl Default for LocalContext<'_> { + fn default() -> Self { + Self::new() + } +} + impl<'a> LocalContext<'a> { pub fn new() -> LocalContext<'a> { LocalContext { @@ -1898,6 +1895,12 @@ impl<'a> LocalContext<'a> { } } +impl Default for CallStack { + fn default() -> Self { + Self::new() + } +} + impl CallStack { pub fn new() -> CallStack { CallStack { @@ -1946,10 +1949,10 @@ impl CallStack { } Ok(()) } else { - return Err(InterpreterError::InterpreterError( + Err(InterpreterError::InterpreterError( "Tried to remove item from empty call stack.".to_string(), ) - .into()); + .into()) } } @@ -2149,8 +2152,8 @@ mod test { // not simply rollback the tx and squelch the error as includable. let e = env .stx_transfer( - &PrincipalData::from(u1.clone()), - &PrincipalData::from(u2.clone()), + &PrincipalData::from(u1), + &PrincipalData::from(u2), 1000, &BuffData::empty(), ) diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 2113ec6479..a3c7fa7140 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -46,9 +46,9 @@ type Result = std::result::Result; pub const CLARITY_MEMORY_LIMIT: u64 = 100 * 1000 * 1000; // TODO: factor out into a boot lib? -pub const COSTS_1_NAME: &'static str = "costs"; -pub const COSTS_2_NAME: &'static str = "costs-2"; -pub const COSTS_3_NAME: &'static str = "costs-3"; +pub const COSTS_1_NAME: &str = "costs"; +pub const COSTS_2_NAME: &str = "costs-2"; +pub const COSTS_3_NAME: &str = "costs-3"; lazy_static! { static ref COST_TUPLE_TYPE_SIGNATURE: TypeSignature = { @@ -254,6 +254,7 @@ pub struct TrackerData { } #[derive(Clone)] +#[allow(clippy::large_enum_variant)] pub enum LimitedCostTracker { Limited(TrackerData), Free, @@ -334,11 +335,7 @@ pub enum CostErrors { impl CostErrors { fn rejectable(&self) -> bool { - match self { - CostErrors::InterpreterFailure => true, - CostErrors::Expect(_) => true, - _ => false, - } + matches!(self, CostErrors::InterpreterFailure | CostErrors::Expect(_)) } } @@ -650,7 +647,7 @@ fn load_cost_functions( continue; } for arg in &cost_func_type.args { - if &arg.signature != &TypeSignature::UIntType { + if arg.signature != TypeSignature::UIntType { warn!("Confirmed cost proposal invalid: contains non uint argument"; "confirmed_proposal_id" => confirmed_proposal, ); @@ -872,7 +869,7 @@ impl TrackerData { .map_err(|e| CostErrors::Expect(e.to_string()))?; } - return Ok(()); + Ok(()) } } @@ -884,7 +881,7 @@ impl LimitedCostTracker { } } #[allow(clippy::panic)] - pub fn set_total(&mut self, total: ExecutionCost) -> () { + pub fn set_total(&mut self, total: ExecutionCost) { // used by the miner to "undo" the cost of a transaction when trying to pack a block. match self { Self::Limited(ref mut data) => data.total = total, @@ -982,8 +979,7 @@ fn compute_cost( .cost_contracts .get_mut(&cost_function_reference.contract_id) .ok_or(CostErrors::CostComputationFailed(format!( - "CostFunction not found: {}", - &cost_function_reference + "CostFunction not found: {cost_function_reference}" )))?; let mut program = vec![SymbolicExpression::atom( @@ -1050,7 +1046,7 @@ impl CostTracker for LimitedCostTracker { match self { Self::Free => { // tracker is free, return zero! - return Ok(ExecutionCost::ZERO); + Ok(ExecutionCost::ZERO) } Self::Limited(ref mut data) => { if cost_function == ClarityCostFunction::Unimplemented { @@ -1062,8 +1058,7 @@ impl CostTracker for LimitedCostTracker { .cost_function_references .get(&cost_function) .ok_or(CostErrors::CostComputationFailed(format!( - "CostFunction not defined: {}", - &cost_function + "CostFunction not defined: {cost_function}" )))? .clone(); @@ -1177,20 +1172,16 @@ pub trait CostOverflowingMath { impl CostOverflowingMath for u64 { fn cost_overflow_mul(self, other: u64) -> Result { - self.checked_mul(other) - .ok_or_else(|| CostErrors::CostOverflow) + self.checked_mul(other).ok_or(CostErrors::CostOverflow) } fn cost_overflow_add(self, other: u64) -> Result { - self.checked_add(other) - .ok_or_else(|| CostErrors::CostOverflow) + self.checked_add(other).ok_or(CostErrors::CostOverflow) } fn cost_overflow_sub(self, other: u64) -> Result { - self.checked_sub(other) - .ok_or_else(|| CostErrors::CostOverflow) + self.checked_sub(other).ok_or(CostErrors::CostOverflow) } fn cost_overflow_div(self, other: u64) -> Result { - self.checked_div(other) - .ok_or_else(|| CostErrors::CostOverflow) + self.checked_div(other).ok_or(CostErrors::CostOverflow) } } @@ -1207,7 +1198,7 @@ impl ExecutionCost { pub fn proportion_largest_dimension(&self, numerator: &ExecutionCost) -> u64 { // max() should always return because there are > 0 elements #[allow(clippy::expect_used)] - [ + *[ numerator.runtime / cmp::max(1, self.runtime / 100), numerator.write_length / cmp::max(1, self.write_length / 100), numerator.write_count / cmp::max(1, self.write_count / 100), @@ -1217,7 +1208,6 @@ impl ExecutionCost { .iter() .max() .expect("BUG: should find maximum") - .clone() } /// Returns the dot product of this execution cost with `resolution`/block_limit diff --git a/clarity/src/vm/coverage.rs b/clarity/src/vm/coverage.rs index be8a647e9c..862c035f98 100644 --- a/clarity/src/vm/coverage.rs +++ b/clarity/src/vm/coverage.rs @@ -26,6 +26,12 @@ struct CoverageFileInfo { coverage: HashMap>, } +impl Default for CoverageReporter { + fn default() -> Self { + Self::new() + } +} + impl CoverageReporter { pub fn new() -> CoverageReporter { CoverageReporter { diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 4f6f3f7781..cbb8bcb4de 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -82,7 +82,7 @@ impl TryFrom<&str> for StoreType { fn try_from(value: &str) -> core::result::Result { use self::StoreType::*; - let hex_value = u8::from_str_radix(value, 10).map_err(|e| e.to_string())?; + let hex_value = value.parse::().map_err(|e| e.to_string())?; match hex_value { 0x00 => Ok(DataMap), 0x01 => Ok(Variable), @@ -506,7 +506,7 @@ impl<'a> ClarityDatabase<'a> { } pub fn put_data(&mut self, key: &str, value: &T) -> Result<()> { - self.store.put_data(&key, &value.serialize()) + self.store.put_data(key, &value.serialize()) } /// Like `put()`, but returns the serialized byte size of the stored value @@ -516,7 +516,7 @@ impl<'a> ClarityDatabase<'a> { value: &T, ) -> Result { let serialized = value.serialize(); - self.store.put_data(&key, &serialized)?; + self.store.put_data(key, &serialized)?; Ok(byte_len_of_serialization(&serialized)) } @@ -568,7 +568,7 @@ impl<'a> ClarityDatabase<'a> { let size = serialized.len() as u64; let hex_serialized = to_hex(serialized.as_slice()); - self.store.put_data(&key, &hex_serialized)?; + self.store.put_data(key, &hex_serialized)?; Ok(pre_sanitized_size.unwrap_or(size)) } @@ -755,16 +755,14 @@ impl<'a> ClarityDatabase<'a> { &mut self, contract_identifier: &QualifiedContractIdentifier, ) -> Result> { - let x_opt = self - .store + self.store .get_metadata(contract_identifier, AnalysisDatabase::storage_key()) // treat NoSuchContract error thrown by get_metadata as an Option::None -- // the analysis will propagate that as a CheckError anyways. - .ok(); - match x_opt.flatten() { - None => Ok(None), - Some(x) => ContractAnalysis::deserialize(&x).map(|out| Some(out)), - } + .ok() + .flatten() + .map(|x| ContractAnalysis::deserialize(&x)) + .transpose() } pub fn get_contract_size( @@ -978,7 +976,7 @@ impl<'a> ClarityDatabase<'a> { // Get block information -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { /// Returns the ID of a *Stacks* block, by a *Stacks* block height. /// /// Fails if `block_height` >= the "currently" under construction Stacks block height. @@ -1066,7 +1064,7 @@ impl<'a> ClarityDatabase<'a> { let query_tip = self.get_index_block_header_hash(current_height.saturating_sub(1))?; Ok(self .headers_db - .get_stacks_height_for_tenure_height(&query_tip, tenure_height.into())) + .get_stacks_height_for_tenure_height(&query_tip, tenure_height)) } /// Get the last-known burnchain block height. @@ -1158,7 +1156,7 @@ impl<'a> ClarityDatabase<'a> { /// This is the highest Stacks block in this fork whose consensus hash is known. /// 3. Resolve the parent StacksBlockId to its consensus hash /// 4. Resolve the consensus hash to the associated SortitionId - /// In Epoch 3+: + /// In Epoch 3+: /// 1. Get the SortitionId of the current Stacks tip fn get_sortition_id_for_stacks_tip(&mut self) -> Result> { if !self @@ -1276,8 +1274,7 @@ impl<'a> ClarityDatabase<'a> { InterpreterError::Expect( "FATAL: no winning burnchain token spend record for block".into(), ) - })? - .into()) + })?) } pub fn get_miner_spend_total(&mut self, block_height: u32) -> Result { @@ -1294,8 +1291,7 @@ impl<'a> ClarityDatabase<'a> { InterpreterError::Expect( "FATAL: no total burnchain token spend record for block".into(), ) - })? - .into()) + })?) } pub fn get_block_reward(&mut self, block_height: u32) -> Result> { @@ -1316,7 +1312,6 @@ impl<'a> ClarityDatabase<'a> { let reward: u128 = self .headers_db .get_tokens_earned_for_block(&id_bhh, &epoch) - .map(|x| x.into()) .ok_or_else(|| { InterpreterError::Expect("FATAL: matured block has no recorded reward".into()) })?; @@ -1337,7 +1332,7 @@ impl<'a> ClarityDatabase<'a> { // poison-microblock -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { pub fn make_microblock_pubkey_height_key(pubkey_hash: &Hash160) -> String { format!("microblock-pubkey-hash::{}", pubkey_hash) } @@ -1360,6 +1355,7 @@ impl<'a> ClarityDatabase<'a> { self.store.get_cc_special_cases_handler() } + #[allow(clippy::unnecessary_fallible_conversions)] pub fn insert_microblock_poison( &mut self, height: u32, @@ -1451,11 +1447,11 @@ impl<'a> ClarityDatabase<'a> { if let PrincipalData::Standard(principal_data) = reporter_principal { Ok((principal_data, seq)) } else { - return Err(InterpreterError::Expect( + Err(InterpreterError::Expect( "BUG: poison-microblock report principal is not a standard principal" .into(), ) - .into()); + .into()) } }) .transpose() @@ -1472,7 +1468,7 @@ fn map_no_contract_as_none(res: Result>) -> Result> { } // Variable Functions... -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { pub fn create_variable( &mut self, contract_identifier: &QualifiedContractIdentifier, @@ -1605,7 +1601,7 @@ impl<'a> ClarityDatabase<'a> { } // Data Map Functions -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { pub fn create_map( &mut self, contract_identifier: &QualifiedContractIdentifier, @@ -1951,7 +1947,7 @@ impl<'a> ClarityDatabase<'a> { // Asset Functions -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { pub fn create_fungible_token( &mut self, contract_identifier: &QualifiedContractIdentifier, @@ -2294,19 +2290,13 @@ impl<'a> ClarityDatabase<'a> { let key = ClarityDatabase::make_key_for_account_balance(principal); debug!("Fetching account balance"; "principal" => %principal.to_string()); let result = self.get_data(&key)?; - Ok(match result { - None => STXBalance::zero(), - Some(balance) => balance, - }) + Ok(result.unwrap_or_default()) } pub fn get_account_nonce(&mut self, principal: &PrincipalData) -> Result { let key = ClarityDatabase::make_key_for_account_nonce(principal); let result = self.get_data(&key)?; - Ok(match result { - None => 0, - Some(nonce) => nonce, - }) + Ok(result.unwrap_or_default()) } pub fn set_account_nonce(&mut self, principal: &PrincipalData, nonce: u64) -> Result<()> { @@ -2316,7 +2306,7 @@ impl<'a> ClarityDatabase<'a> { } // access burnchain state -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { pub fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { self.burn_state_db.get_burn_block_height(sortition_id) } @@ -2328,7 +2318,7 @@ impl<'a> ClarityDatabase<'a> { } pub fn get_stacks_epoch_for_block(&self, id_bhh: &StacksBlockId) -> Result { - let burn_block = self.get_burnchain_block_height(&id_bhh).ok_or_else(|| { + let burn_block = self.get_burnchain_block_height(id_bhh).ok_or_else(|| { InterpreterError::Expect(format!( "FATAL: no burnchain block height found for Stacks block {}", id_bhh diff --git a/clarity/src/vm/database/key_value_wrapper.rs b/clarity/src/vm/database/key_value_wrapper.rs index c444aa553e..eecbe092ea 100644 --- a/clarity/src/vm/database/key_value_wrapper.rs +++ b/clarity/src/vm/database/key_value_wrapper.rs @@ -76,11 +76,11 @@ fn rollback_value_check(value: &String, check: &RollbackValueCheck) { assert_eq!(value, check) } #[cfg(feature = "rollback_value_check")] -fn rollback_edits_push(edits: &mut Vec<(T, RollbackValueCheck)>, key: T, value: &String) +fn rollback_edits_push(edits: &mut Vec<(T, RollbackValueCheck)>, key: T, value: &str) where T: Eq + Hash + Clone, { - edits.push((key, value.clone())); + edits.push((key, value.to_owned())); } // this function is used to check the lookup map when committing at the "bottom" of the // wrapper -- i.e., when committing to the underlying store. @@ -88,7 +88,7 @@ where fn rollback_check_pre_bottom_commit( edits: Vec<(T, RollbackValueCheck)>, lookup_map: &mut HashMap>, -) -> Vec<(T, String)> +) -> Result, InterpreterError> where T: Eq + Hash + Clone, { @@ -96,10 +96,10 @@ where edit_history.reverse(); } for (key, value) in edits.iter() { - rollback_lookup_map(key, &value, lookup_map); + let _ = rollback_lookup_map(key, value, lookup_map); } assert!(lookup_map.is_empty()); - edits + Ok(edits) } /// Result structure for fetched values from the @@ -205,7 +205,7 @@ where } impl<'a> RollbackWrapper<'a> { - pub fn new(store: &'a mut dyn ClarityBackingStore) -> RollbackWrapper { + pub fn new(store: &'a mut dyn ClarityBackingStore) -> RollbackWrapper<'a> { RollbackWrapper { store, lookup_map: HashMap::new(), @@ -218,7 +218,7 @@ impl<'a> RollbackWrapper<'a> { pub fn from_persisted_log( store: &'a mut dyn ClarityBackingStore, log: RollbackWrapperPersistedLog, - ) -> RollbackWrapper { + ) -> RollbackWrapper<'a> { RollbackWrapper { store, lookup_map: log.lookup_map, @@ -283,7 +283,7 @@ impl<'a> RollbackWrapper<'a> { // stack is empty, committing to the backing store let all_edits = rollback_check_pre_bottom_commit(last_item.edits, &mut self.lookup_map)?; - if all_edits.len() > 0 { + if !all_edits.is_empty() { self.store.put_all_data(all_edits).map_err(|e| { InterpreterError::Expect(format!( "ERROR: Failed to commit data to sql store: {e:?}" @@ -295,7 +295,7 @@ impl<'a> RollbackWrapper<'a> { last_item.metadata_edits, &mut self.metadata_lookup_map, )?; - if metadata_edits.len() > 0 { + if !metadata_edits.is_empty() { self.store.put_all_metadata(metadata_edits).map_err(|e| { InterpreterError::Expect(format!( "ERROR: Failed to commit data to sql store: {e:?}" @@ -316,12 +316,12 @@ fn inner_put_data( ) where T: Eq + Hash + Clone, { - let key_edit_deque = lookup_map.entry(key.clone()).or_insert_with(|| Vec::new()); + let key_edit_deque = lookup_map.entry(key.clone()).or_default(); rollback_edits_push(edits, key, &value); key_edit_deque.push(value); } -impl<'a> RollbackWrapper<'a> { +impl RollbackWrapper<'_> { pub fn put_data(&mut self, key: &str, value: &str) -> InterpreterResult<()> { let current = self.stack.last_mut().ok_or_else(|| { InterpreterError::Expect( @@ -329,12 +329,13 @@ impl<'a> RollbackWrapper<'a> { ) })?; - Ok(inner_put_data( + inner_put_data( &mut self.lookup_map, &mut current.edits, key.to_string(), value.to_string(), - )) + ); + Ok(()) } /// @@ -347,13 +348,12 @@ impl<'a> RollbackWrapper<'a> { bhh: StacksBlockId, query_pending_data: bool, ) -> InterpreterResult { - self.store.set_block_hash(bhh).map(|x| { + self.store.set_block_hash(bhh).inspect(|_| { // use and_then so that query_pending_data is only set once set_block_hash succeeds // this doesn't matter in practice, because a set_block_hash failure always aborts // the transaction with a runtime error (destroying its environment), but it's much // better practice to do this, especially if the abort behavior changes in the future. self.query_pending_data = query_pending_data; - x }) } @@ -501,12 +501,13 @@ impl<'a> RollbackWrapper<'a> { let metadata_key = (contract.clone(), key.to_string()); - Ok(inner_put_data( + inner_put_data( &mut self.metadata_lookup_map, &mut current.metadata_edits, metadata_key, value.to_string(), - )) + ); + Ok(()) } // Throws a NoSuchContract error if contract doesn't exist, diff --git a/clarity/src/vm/database/mod.rs b/clarity/src/vm/database/mod.rs index d16d944d55..a9c2182806 100644 --- a/clarity/src/vm/database/mod.rs +++ b/clarity/src/vm/database/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . - use hashbrown::HashMap; #[cfg(feature = "canonical")] pub use sqlite::MemoryBackingStore; diff --git a/clarity/src/vm/database/structures.rs b/clarity/src/vm/database/structures.rs index e4fab929bd..b88420ff6a 100644 --- a/clarity/src/vm/database/structures.rs +++ b/clarity/src/vm/database/structures.rs @@ -257,7 +257,7 @@ impl ClaritySerializable for STXBalance { impl ClarityDeserializable for STXBalance { fn deserialize(input: &str) -> Result { - let bytes = hex_bytes(&input).map_err(|_| { + let bytes = hex_bytes(input).map_err(|_| { InterpreterError::Expect("STXBalance deserialization: failed decoding bytes.".into()) })?; let result = if bytes.len() == STXBalance::unlocked_and_v1_size { @@ -555,7 +555,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { ); } - if !(self.balance.amount_locked() <= new_total_locked) { + if self.balance.amount_locked() > new_total_locked { return Err(InterpreterError::Expect( "FATAL: account must lock more after `increase_lock_v2`".into(), ) @@ -623,7 +623,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { } // caller needs to have checked this - if !(amount_to_lock > 0) { + if amount_to_lock == 0 { return Err(InterpreterError::Expect("BUG: cannot lock 0 tokens".into()).into()); } @@ -980,6 +980,12 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { } } +impl Default for STXBalance { + fn default() -> Self { + STXBalance::zero() + } +} + // NOTE: do _not_ add mutation methods to this struct. Put them in STXBalanceSnapshot! impl STXBalance { pub const unlocked_and_v1_size: usize = 40; diff --git a/clarity/src/vm/diagnostic.rs b/clarity/src/vm/diagnostic.rs index 81939237d7..164875151f 100644 --- a/clarity/src/vm/diagnostic.rs +++ b/clarity/src/vm/diagnostic.rs @@ -66,24 +66,26 @@ impl Diagnostic { impl fmt::Display for Diagnostic { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self.level)?; - if self.spans.len() == 1 { - write!( + match self.spans.len().cmp(&1) { + std::cmp::Ordering::Equal => write!( f, " (line {}, column {})", self.spans[0].start_line, self.spans[0].start_column - )?; - } else if self.spans.len() > 1 { - let lines: Vec = self - .spans - .iter() - .map(|s| format!("line: {}", s.start_line)) - .collect(); - write!(f, " ({})", lines.join(", "))?; + )?, + std::cmp::Ordering::Greater => { + let lines: Vec = self + .spans + .iter() + .map(|s| format!("line: {}", s.start_line)) + .collect(); + write!(f, " ({})", lines.join(", "))?; + } + _ => {} } write!(f, ": {}.", &self.message)?; if let Some(suggestion) = &self.suggestion { - write!(f, "\n{}", suggestion)?; + write!(f, "\n{suggestion}")?; } - write!(f, "\n") + writeln!(f) } } diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index b23e356dea..5b2302a9b2 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -814,19 +814,19 @@ pub fn get_output_type_string(function_type: &FunctionType) -> String { FunctionType::Binary(left, right, ref out_sig) => match out_sig { FunctionReturnsSignature::Fixed(out_type) => format!("{}", out_type), FunctionReturnsSignature::TypeOfArgAtPosition(pos) => { - let arg_sig: &FunctionArgSignature; - match pos { - 0 => arg_sig = left, - 1 => arg_sig = right, - _ => panic!("Index out of range: TypeOfArgAtPosition for FunctionType::Binary can only handle two arguments, zero-indexed (0 or 1).") - } + let arg_sig = match pos { + 0 => left, + 1 => right, + _ => panic!("Index out of range: TypeOfArgAtPosition for FunctionType::Binary can only handle two arguments, zero-indexed (0 or 1).") + }; + match arg_sig { - FunctionArgSignature::Single(arg_type) => format!("{}", arg_type), - FunctionArgSignature::Union(arg_types) => { - let out_types: Vec = - arg_types.iter().map(|x| format!("{}", x)).collect(); - out_types.join(" | ") - } + FunctionArgSignature::Single(arg_type) => arg_type.to_string(), + FunctionArgSignature::Union(arg_types) => arg_types + .iter() + .map(ToString::to_string) + .collect::>() + .join(" | "), } } }, @@ -835,15 +835,12 @@ pub fn get_output_type_string(function_type: &FunctionType) -> String { pub fn get_signature(function_name: &str, function_type: &FunctionType) -> Option { if let FunctionType::Fixed(FixedFunction { ref args, .. }) = function_type { - let in_names: Vec = args - .iter() - .map(|x| format!("{}", x.name.as_str())) - .collect(); + let in_names: Vec = args.iter().map(|x| x.name.to_string()).collect(); let arg_examples = in_names.join(" "); Some(format!( "({}{}{})", function_name, - if arg_examples.len() == 0 { "" } else { " " }, + if arg_examples.is_empty() { "" } else { " " }, arg_examples )) } else { @@ -860,7 +857,7 @@ fn make_for_simple_native( ) -> FunctionAPI { let (input_type, output_type) = { if let TypedNativeFunction::Simple(SimpleNativeFunction(function_type)) = - TypedNativeFunction::type_native_function(&function) + TypedNativeFunction::type_native_function(function) .expect("Failed to type a native function") { let input_type = get_input_type_string(&function_type); @@ -877,8 +874,8 @@ fn make_for_simple_native( FunctionAPI { name: api.name.map_or(name, |x| x.to_string()), snippet: api.snippet.to_string(), - input_type: input_type, - output_type: output_type, + input_type, + output_type, signature: api.signature.to_string(), description: api.description.to_string(), example: api.example.to_string(), @@ -1402,7 +1399,7 @@ You _may not_ use this function to call a public function defined in the current function returns _err_, any database changes resulting from calling `contract-call?` are aborted. If the function returns _ok_, database changes occurred.", example: " -;; instantiate the sample-contracts/tokens.clar contract first! +;; instantiate the sample/contracts/tokens.clar contract first! (as-contract (contract-call? .tokens mint! u19)) ;; Returns (ok u19)" }; @@ -2526,35 +2523,35 @@ pub fn make_api_reference(function: &NativeFunctions) -> FunctionAPI { use crate::vm::functions::NativeFunctions::*; let name = function.get_name(); match function { - Add => make_for_simple_native(&ADD_API, &function, name), - ToUInt => make_for_simple_native(&TO_UINT_API, &function, name), - ToInt => make_for_simple_native(&TO_INT_API, &function, name), - Subtract => make_for_simple_native(&SUB_API, &function, name), - Multiply => make_for_simple_native(&MUL_API, &function, name), - Divide => make_for_simple_native(&DIV_API, &function, name), - BuffToIntLe => make_for_simple_native(&BUFF_TO_INT_LE_API, &function, name), - BuffToUIntLe => make_for_simple_native(&BUFF_TO_UINT_LE_API, &function, name), - BuffToIntBe => make_for_simple_native(&BUFF_TO_INT_BE_API, &function, name), - BuffToUIntBe => make_for_simple_native(&BUFF_TO_UINT_BE_API, &function, name), - IsStandard => make_for_simple_native(&IS_STANDARD_API, &function, name), - PrincipalDestruct => make_for_simple_native(&PRINCPIPAL_DESTRUCT_API, &function, name), - PrincipalConstruct => make_for_special(&PRINCIPAL_CONSTRUCT_API, &function), - StringToInt => make_for_simple_native(&STRING_TO_INT_API, &function, name), - StringToUInt => make_for_simple_native(&STRING_TO_UINT_API, &function, name), - IntToAscii => make_for_simple_native(&INT_TO_ASCII_API, &function, name), - IntToUtf8 => make_for_simple_native(&INT_TO_UTF8_API, &function, name), - CmpGeq => make_for_simple_native(&GEQ_API, &function, name), - CmpLeq => make_for_simple_native(&LEQ_API, &function, name), - CmpLess => make_for_simple_native(&LESS_API, &function, name), - CmpGreater => make_for_simple_native(&GREATER_API, &function, name), - Modulo => make_for_simple_native(&MOD_API, &function, name), - Power => make_for_simple_native(&POW_API, &function, name), - Sqrti => make_for_simple_native(&SQRTI_API, &function, name), - Log2 => make_for_simple_native(&LOG2_API, &function, name), - BitwiseXor => make_for_simple_native(&XOR_API, &function, name), - And => make_for_simple_native(&AND_API, &function, name), - Or => make_for_simple_native(&OR_API, &function, name), - Not => make_for_simple_native(&NOT_API, &function, name), + Add => make_for_simple_native(&ADD_API, function, name), + ToUInt => make_for_simple_native(&TO_UINT_API, function, name), + ToInt => make_for_simple_native(&TO_INT_API, function, name), + Subtract => make_for_simple_native(&SUB_API, function, name), + Multiply => make_for_simple_native(&MUL_API, function, name), + Divide => make_for_simple_native(&DIV_API, function, name), + BuffToIntLe => make_for_simple_native(&BUFF_TO_INT_LE_API, function, name), + BuffToUIntLe => make_for_simple_native(&BUFF_TO_UINT_LE_API, function, name), + BuffToIntBe => make_for_simple_native(&BUFF_TO_INT_BE_API, function, name), + BuffToUIntBe => make_for_simple_native(&BUFF_TO_UINT_BE_API, function, name), + IsStandard => make_for_simple_native(&IS_STANDARD_API, function, name), + PrincipalDestruct => make_for_simple_native(&PRINCPIPAL_DESTRUCT_API, function, name), + PrincipalConstruct => make_for_special(&PRINCIPAL_CONSTRUCT_API, function), + StringToInt => make_for_simple_native(&STRING_TO_INT_API, function, name), + StringToUInt => make_for_simple_native(&STRING_TO_UINT_API, function, name), + IntToAscii => make_for_simple_native(&INT_TO_ASCII_API, function, name), + IntToUtf8 => make_for_simple_native(&INT_TO_UTF8_API, function, name), + CmpGeq => make_for_simple_native(&GEQ_API, function, name), + CmpLeq => make_for_simple_native(&LEQ_API, function, name), + CmpLess => make_for_simple_native(&LESS_API, function, name), + CmpGreater => make_for_simple_native(&GREATER_API, function, name), + Modulo => make_for_simple_native(&MOD_API, function, name), + Power => make_for_simple_native(&POW_API, function, name), + Sqrti => make_for_simple_native(&SQRTI_API, function, name), + Log2 => make_for_simple_native(&LOG2_API, function, name), + BitwiseXor => make_for_simple_native(&XOR_API, function, name), + And => make_for_simple_native(&AND_API, function, name), + Or => make_for_simple_native(&OR_API, function, name), + Not => make_for_simple_native(&NOT_API, function, name), Equals => make_for_special(&EQUALS_API, function), If => make_for_special(&IF_API, function), Let => make_for_special(&LET_API, function), @@ -2620,20 +2617,20 @@ pub fn make_api_reference(function: &NativeFunctions) -> FunctionAPI { BurnAsset => make_for_special(&BURN_ASSET, function), GetTokenSupply => make_for_special(&GET_TOKEN_SUPPLY, function), AtBlock => make_for_special(&AT_BLOCK, function), - GetStxBalance => make_for_simple_native(&STX_GET_BALANCE, &function, name), - StxGetAccount => make_for_simple_native(&STX_GET_ACCOUNT, &function, name), + GetStxBalance => make_for_simple_native(&STX_GET_BALANCE, function, name), + StxGetAccount => make_for_simple_native(&STX_GET_ACCOUNT, function, name), StxTransfer => make_for_special(&STX_TRANSFER, function), StxTransferMemo => make_for_special(&STX_TRANSFER_MEMO, function), - StxBurn => make_for_simple_native(&STX_BURN, &function, name), + StxBurn => make_for_simple_native(&STX_BURN, function, name), ToConsensusBuff => make_for_special(&TO_CONSENSUS_BUFF, function), FromConsensusBuff => make_for_special(&FROM_CONSENSUS_BUFF, function), ReplaceAt => make_for_special(&REPLACE_AT, function), - BitwiseXor2 => make_for_simple_native(&BITWISE_XOR_API, &function, name), - BitwiseAnd => make_for_simple_native(&BITWISE_AND_API, &function, name), - BitwiseOr => make_for_simple_native(&BITWISE_OR_API, &function, name), - BitwiseNot => make_for_simple_native(&BITWISE_NOT_API, &function, name), - BitwiseLShift => make_for_simple_native(&BITWISE_LEFT_SHIFT_API, &function, name), - BitwiseRShift => make_for_simple_native(&BITWISE_RIGHT_SHIFT_API, &function, name), + BitwiseXor2 => make_for_simple_native(&BITWISE_XOR_API, function, name), + BitwiseAnd => make_for_simple_native(&BITWISE_AND_API, function, name), + BitwiseOr => make_for_simple_native(&BITWISE_OR_API, function, name), + BitwiseNot => make_for_simple_native(&BITWISE_NOT_API, function, name), + BitwiseLShift => make_for_simple_native(&BITWISE_LEFT_SHIFT_API, function, name), + BitwiseRShift => make_for_simple_native(&BITWISE_RIGHT_SHIFT_API, function, name), } } @@ -2726,7 +2723,7 @@ fn make_all_api_reference() -> ReferenceAPIs { .filter_map(make_keyword_reference) .collect(); - keywords.sort_by(|x, y| x.name.cmp(&y.name)); + keywords.sort_by_key(|x| x.name); ReferenceAPIs { functions, @@ -2737,10 +2734,9 @@ fn make_all_api_reference() -> ReferenceAPIs { #[allow(clippy::expect_used)] pub fn make_json_api_reference() -> String { let api_out = make_all_api_reference(); - format!( - "{}", - serde_json::to_string(&api_out).expect("Failed to serialize documentation") - ) + serde_json::to_string(&api_out) + .expect("Failed to serialize documentation") + .to_string() } #[cfg(test)] @@ -2777,7 +2773,7 @@ mod test { const DOC_HEADER_DB: DocHeadersDB = DocHeadersDB {}; impl MemoryBackingStore { - pub fn as_docs_clarity_db<'a>(&'a mut self) -> ClarityDatabase<'a> { + pub fn as_docs_clarity_db(&mut self) -> ClarityDatabase<'_> { ClarityDatabase::new(self, &DOC_HEADER_DB, &DOC_POX_STATE_DB) } } @@ -3001,13 +2997,13 @@ mod test { let mut current_segment: String = "".into(); for line in program.lines() { current_segment.push_str(line); - current_segment.push_str("\n"); + current_segment.push('\n'); if line.contains(";;") && line.contains("Returns ") { segments.push(current_segment); current_segment = "".into(); } } - if current_segment.len() > 0 { + if !current_segment.is_empty() { segments.push(current_segment); } @@ -3067,7 +3063,7 @@ mod test { .type_map .as_ref() .unwrap() - .get_type_expected(&analysis.expressions.last().unwrap()) + .get_type_expected(analysis.expressions.last().unwrap()) .cloned(), ); } @@ -3135,7 +3131,7 @@ mod test { fn test_examples() { // Execute test examples against the latest version of Clarity let apis = make_all_api_reference(); - let token_contract_content = include_str!("../../../../sample-contracts/tokens.clar"); + let token_contract_content = include_str!("../../../../sample/contracts/tokens.clar"); for func_api in apis.functions.iter() { if func_api.name == "at-block" { eprintln!("Skipping at-block, because it cannot be evaluated without a MARF"); @@ -3162,7 +3158,7 @@ mod test { let mut analysis_db = store.as_analysis_db(); let mut parsed = ast::build_ast( &contract_id, - &token_contract_content, + token_contract_content, &mut (), ClarityVersion::latest(), StacksEpochId::latest(), @@ -3178,7 +3174,7 @@ mod test { &StacksEpochId::latest(), &ClarityVersion::latest(), ) - .expect("Failed to type check sample-contracts/tokens"); + .expect("Failed to type check sample/contracts/tokens"); } { @@ -3201,7 +3197,7 @@ mod test { &StacksEpochId::latest(), &ClarityVersion::latest(), ) - .expect("Failed to type check sample-contracts/tokens"); + .expect("Failed to type check sample/contracts/tokens"); } let conn = store.as_docs_clarity_db(); @@ -3232,7 +3228,7 @@ mod test { env.initialize_contract( contract_id, - &token_contract_content, + token_contract_content, None, ASTRules::PrecheckSize, ) diff --git a/clarity/src/vm/errors.rs b/clarity/src/vm/errors.rs index b3b0ca5fea..911465d4ba 100644 --- a/clarity/src/vm/errors.rs +++ b/clarity/src/vm/errors.rs @@ -37,6 +37,7 @@ pub struct IncomparableError { } #[derive(Debug)] +#[allow(clippy::large_enum_variant)] pub enum Error { /// UncheckedErrors are errors that *should* be caught by the /// TypeChecker and other check passes. Test executions may @@ -117,7 +118,7 @@ pub type InterpreterResult = Result; impl PartialEq> for IncomparableError { fn eq(&self, _other: &IncomparableError) -> bool { - return false; + false } } @@ -137,19 +138,16 @@ impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Error::Runtime(ref err, ref stack) => { - match err { - _ => write!(f, "{}", err), - }?; - + write!(f, "{err}")?; if let Some(ref stack_trace) = stack { - write!(f, "\n Stack Trace: \n")?; + writeln!(f, "\n Stack Trace: ")?; for item in stack_trace.iter() { - write!(f, "{}\n", item)?; + writeln!(f, "{item}")?; } } Ok(()) } - _ => write!(f, "{:?}", self), + _ => write!(f, "{self:?}"), } } } @@ -226,9 +224,9 @@ impl From for () { fn from(err: Error) -> Self {} } -impl Into for ShortReturnType { - fn into(self) -> Value { - match self { +impl From for Value { + fn from(val: ShortReturnType) -> Self { + match val { ShortReturnType::ExpectedValue(v) => v, ShortReturnType::AssertionFailed(v) => v, } diff --git a/clarity/src/vm/functions/assets.rs b/clarity/src/vm/functions/assets.rs index 0d004a846a..3dca730928 100644 --- a/clarity/src/vm/functions/assets.rs +++ b/clarity/src/vm/functions/assets.rs @@ -210,6 +210,7 @@ pub fn special_stx_transfer_memo( } } +#[allow(clippy::unnecessary_fallible_conversions)] pub fn special_stx_account( args: &[SymbolicExpression], env: &mut Environment, @@ -286,10 +287,7 @@ pub fn special_stx_burn( env.add_memory(TypeSignature::PrincipalType.size()? as u64)?; env.add_memory(STXBalance::unlocked_and_v1_size as u64)?; - let mut burner_snapshot = env - .global_context - .database - .get_stx_balance_snapshot(&from)?; + let mut burner_snapshot = env.global_context.database.get_stx_balance_snapshot(from)?; if !burner_snapshot.can_transfer(amount)? { return clarity_ecode!(StxErrorCodes::NOT_ENOUGH_BALANCE); } diff --git a/clarity/src/vm/functions/conversions.rs b/clarity/src/vm/functions/conversions.rs index 090f0d2107..142c1308eb 100644 --- a/clarity/src/vm/functions/conversions.rs +++ b/clarity/src/vm/functions/conversions.rs @@ -57,13 +57,13 @@ pub fn buff_to_int_generic( > BufferLength::try_from(16_u32) .map_err(|_| InterpreterError::Expect("Failed to construct".into()))? { - return Err(CheckErrors::TypeValueError( + Err(CheckErrors::TypeValueError( SequenceType(BufferType(BufferLength::try_from(16_u32).map_err( |_| InterpreterError::Expect("Failed to construct".into()), )?)), value, ) - .into()); + .into()) } else { let mut transfer_buffer = [0u8; 16]; let original_slice = sequence_data.as_slice(); @@ -82,15 +82,13 @@ pub fn buff_to_int_generic( Ok(value) } } - _ => { - return Err(CheckErrors::TypeValueError( - SequenceType(BufferType(BufferLength::try_from(16_u32).map_err( - |_| InterpreterError::Expect("Failed to construct".into()), - )?)), - value, - ) - .into()) - } + _ => Err(CheckErrors::TypeValueError( + SequenceType(BufferType(BufferLength::try_from(16_u32).map_err( + |_| InterpreterError::Expect("Failed to construct".into()), + )?)), + value, + ) + .into()), } } diff --git a/clarity/src/vm/functions/crypto.rs b/clarity/src/vm/functions/crypto.rs index dd55f3a56f..86d92283ca 100644 --- a/clarity/src/vm/functions/crypto.rs +++ b/clarity/src/vm/functions/crypto.rs @@ -126,8 +126,8 @@ pub fn special_principal_of( pubkey_to_address_v1(pub_key)? }; let principal = addr.to_account_principal(); - return Ok(Value::okay(Value::Principal(principal)) - .map_err(|_| InterpreterError::Expect("Failed to construct ok".into()))?); + Ok(Value::okay(Value::Principal(principal)) + .map_err(|_| InterpreterError::Expect("Failed to construct ok".into()))?) } else { Ok(Value::err_uint(1)) } @@ -169,17 +169,14 @@ pub fn special_secp256k1_recover( _ => return Err(CheckErrors::TypeValueError(BUFF_65.clone(), param1).into()), }; - match secp256k1_recover(&message, &signature).map_err(|_| CheckErrors::InvalidSecp65k1Signature) - { - Ok(pubkey) => { - return Ok(Value::okay( - Value::buff_from(pubkey.to_vec()) - .map_err(|_| InterpreterError::Expect("Failed to construct buff".into()))?, - ) - .map_err(|_| InterpreterError::Expect("Failed to construct ok".into()))?) - } - _ => return Ok(Value::err_uint(1)), - }; + match secp256k1_recover(message, signature).map_err(|_| CheckErrors::InvalidSecp65k1Signature) { + Ok(pubkey) => Ok(Value::okay( + Value::buff_from(pubkey.to_vec()) + .map_err(|_| InterpreterError::Expect("Failed to construct buff".into()))?, + ) + .map_err(|_| InterpreterError::Expect("Failed to construct ok".into()))?), + _ => Ok(Value::err_uint(1)), + } } pub fn special_secp256k1_verify( diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index ff14507ead..12fb1cd3da 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -730,14 +730,12 @@ pub fn special_delete_entry_v205( /// - `miner-spend-winner` returns the number of satoshis spent by the winning miner for the block at `block-height` /// - `miner-spend-total` returns the total number of satoshis spent by all miners for the block at `block-height` /// - `block-reward` returns the block reward for the block at `block-height` - /// /// # Errors: /// - CheckErrors::IncorrectArgumentCount if there aren't 2 arguments. /// - CheckErrors::GetStacksBlockInfoExpectPropertyName if `args[0]` isn't a ClarityName. /// - CheckErrors::NoSuchStacksBlockInfoProperty if `args[0]` isn't a StacksBlockInfoProperty. /// - CheckErrors::TypeValueError if `args[1]` isn't a `uint`. - pub fn special_get_block_info( args: &[SymbolicExpression], env: &mut Environment, diff --git a/clarity/src/vm/functions/mod.rs b/clarity/src/vm/functions/mod.rs index 6482493a29..a8971b3fa0 100644 --- a/clarity/src/vm/functions/mod.rs +++ b/clarity/src/vm/functions/mod.rs @@ -79,7 +79,6 @@ mod boolean; mod conversions; mod crypto; mod database; -#[allow(clippy::result_large_err)] pub mod define; mod options; pub mod principals; diff --git a/clarity/src/vm/functions/options.rs b/clarity/src/vm/functions/options.rs index 26829618af..e3305395a5 100644 --- a/clarity/src/vm/functions/options.rs +++ b/clarity/src/vm/functions/options.rs @@ -212,7 +212,7 @@ pub fn special_match( match input { Value::Response(data) => special_match_resp(data, &args[1..], env, context), Value::Optional(data) => special_match_opt(data, &args[1..], env, context), - _ => return Err(CheckErrors::BadMatchInput(TypeSignature::type_of(&input)?).into()), + _ => Err(CheckErrors::BadMatchInput(TypeSignature::type_of(&input)?).into()), } } diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index ff991f5513..8680c06224 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(clippy::result_large_err)] pub mod diagnostic; pub mod errors; @@ -172,33 +173,31 @@ fn lookup_variable(name: &str, context: &LocalContext, env: &mut Environment) -> name )) .into()) + } else if let Some(value) = variables::lookup_reserved_variable(name, context, env)? { + Ok(value) } else { - if let Some(value) = variables::lookup_reserved_variable(name, context, env)? { + runtime_cost( + ClarityCostFunction::LookupVariableDepth, + env, + context.depth(), + )?; + if let Some(value) = context.lookup_variable(name) { + runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; + Ok(value.clone()) + } else if let Some(value) = env.contract_context.lookup_variable(name).cloned() { + runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; + let (value, _) = + Value::sanitize_value(env.epoch(), &TypeSignature::type_of(&value)?, value) + .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; Ok(value) - } else { - runtime_cost( - ClarityCostFunction::LookupVariableDepth, - env, - context.depth(), - )?; - if let Some(value) = context.lookup_variable(name) { - runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; - Ok(value.clone()) - } else if let Some(value) = env.contract_context.lookup_variable(name).cloned() { - runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; - let (value, _) = - Value::sanitize_value(env.epoch(), &TypeSignature::type_of(&value)?, value) - .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; - Ok(value) - } else if let Some(callable_data) = context.lookup_callable_contract(name) { - if env.contract_context.get_clarity_version() < &ClarityVersion::Clarity2 { - Ok(callable_data.contract_identifier.clone().into()) - } else { - Ok(Value::CallableContract(callable_data.clone())) - } + } else if let Some(callable_data) = context.lookup_callable_contract(name) { + if env.contract_context.get_clarity_version() < &ClarityVersion::Clarity2 { + Ok(callable_data.contract_identifier.clone().into()) } else { - Err(CheckErrors::UndefinedVariable(name.to_string()).into()) + Ok(Value::CallableContract(callable_data.clone())) } + } else { + Err(CheckErrors::UndefinedVariable(name.to_string()).into()) } } } @@ -238,11 +237,7 @@ pub fn apply( // only enough to do recursion detection. // do recursion check on user functions. - let track_recursion = match function { - CallableType::UserFunction(_) => true, - _ => false, - }; - + let track_recursion = matches!(function, CallableType::UserFunction(_)); if track_recursion && env.call_stack.contains(&identifier) { return Err(CheckErrors::CircularReference(vec![identifier.to_string()]).into()); } @@ -311,9 +306,9 @@ pub fn apply( } } -pub fn eval<'a>( +pub fn eval( exp: &SymbolicExpression, - env: &'a mut Environment, + env: &mut Environment, context: &LocalContext, ) -> Result { use crate::vm::representations::SymbolicExpressionType::{ @@ -329,7 +324,7 @@ pub fn eval<'a>( let res = match exp.expr { AtomValue(ref value) | LiteralValue(ref value) => Ok(value.clone()), - Atom(ref value) => lookup_variable(&value, context, env), + Atom(ref value) => lookup_variable(value, context, env), List(ref children) => { let (function_variable, rest) = children .split_first() @@ -338,8 +333,8 @@ pub fn eval<'a>( let function_name = function_variable .match_atom() .ok_or(CheckErrors::BadFunctionName)?; - let f = lookup_function(&function_name, env)?; - apply(&f, &rest, env, context) + let f = lookup_function(function_name, env)?; + apply(&f, rest, env, context) } TraitReference(_, _) | Field(_) => { return Err(InterpreterError::BadSymbolicRepresentation( @@ -360,13 +355,8 @@ pub fn eval<'a>( } pub fn is_reserved(name: &str, version: &ClarityVersion) -> bool { - if let Some(_result) = functions::lookup_reserved_functions(name, version) { - true - } else if variables::is_reserved_name(name, version) { - true - } else { - false - } + functions::lookup_reserved_functions(name, version).is_some() + || variables::is_reserved_name(name, version) } /// This function evaluates a list of expressions, sharing a global context. @@ -629,7 +619,7 @@ mod test { func_body, DefineType::Private, &"do_work".into(), - &"", + "", ); let context = LocalContext::new(); diff --git a/clarity/src/vm/representations.rs b/clarity/src/vm/representations.rs index c80e3c7467..0f779b479f 100644 --- a/clarity/src/vm/representations.rs +++ b/clarity/src/vm/representations.rs @@ -125,8 +125,8 @@ impl StacksMessageCodec for ClarityName { impl StacksMessageCodec for ContractName { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - if self.as_bytes().len() < CONTRACT_MIN_NAME_LENGTH as usize - || self.as_bytes().len() > CONTRACT_MAX_NAME_LENGTH as usize + if self.as_bytes().len() < CONTRACT_MIN_NAME_LENGTH + || self.as_bytes().len() > CONTRACT_MAX_NAME_LENGTH { return Err(codec_error::SerializeError(format!( "Failed to serialize contract name: too short or too long: {}", diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index 295909859f..861c88ad0a 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -70,7 +70,7 @@ pub fn execute_on_network(s: &str, use_mainnet: bool) -> Value { pub fn symbols_from_values(vec: Vec) -> Vec { vec.into_iter() - .map(|value| SymbolicExpression::atom_value(value)) + .map(SymbolicExpression::atom_value) .collect() } diff --git a/clarity/src/vm/tests/assets.rs b/clarity/src/vm/tests/assets.rs index e42f2c59da..e332f72d46 100644 --- a/clarity/src/vm/tests/assets.rs +++ b/clarity/src/vm/tests/assets.rs @@ -1006,7 +1006,7 @@ fn test_simple_naming_system( _ => panic!(), }; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); let tokens_contract_id = @@ -1107,7 +1107,7 @@ fn test_simple_naming_system( assert!(is_committed(&result)); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); assert_eq!( env.eval_read_only(&names_contract_id.clone(), "(nft-get-owner? names 1)") .unwrap(), @@ -1378,7 +1378,7 @@ fn test_simple_naming_system( assert_eq!(asset_map.to_table().len(), 0); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); assert_eq!( env.eval_read_only(&names_contract_id.clone(), "(nft-get-owner? names 5)") .unwrap(), diff --git a/clarity/src/vm/tests/contracts.rs b/clarity/src/vm/tests/contracts.rs index 9cb5aea4b1..94433958c4 100644 --- a/clarity/src/vm/tests/contracts.rs +++ b/clarity/src/vm/tests/contracts.rs @@ -119,7 +119,7 @@ fn test_get_block_info_eval( Ok(Value::none()), ]; - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); @@ -138,7 +138,7 @@ fn test_get_block_info_eval( ) .unwrap(); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); eprintln!("{}", contracts[i]); let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); match expected[i] { @@ -172,13 +172,13 @@ fn test_contract_caller(epoch: StacksEpochId, mut env_factory: MemoryEnvironment (as-contract (contract-call? .contract-a get-caller)))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-a").unwrap(), contract_a, @@ -200,7 +200,7 @@ fn test_contract_caller(epoch: StacksEpochId, mut env_factory: MemoryEnvironment let mut env = owned_env.get_exec_environment( Some(p1.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -312,7 +312,7 @@ fn test_tx_sponsor(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener .expect_principal() .unwrap(); let p2 = execute("'SM2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQVX8X0G"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); @@ -324,11 +324,8 @@ fn test_tx_sponsor(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener }; { - let mut env = owned_env.get_exec_environment( - Some(p1.clone()), - sponsor.clone(), - &mut placeholder_context, - ); + let mut env = + owned_env.get_exec_environment(Some(p1.clone()), sponsor.clone(), &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-a").unwrap(), contract_a, @@ -345,11 +342,8 @@ fn test_tx_sponsor(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener // Sponsor is equal to some(principal) in this code block. { - let mut env = owned_env.get_exec_environment( - Some(p1.clone()), - sponsor.clone(), - &mut placeholder_context, - ); + let mut env = + owned_env.get_exec_environment(Some(p1.clone()), sponsor.clone(), &placeholder_context); tx_sponsor_contract_asserts(&mut env, sponsor); } @@ -357,7 +351,7 @@ fn test_tx_sponsor(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener { let sponsor = None; let mut env = - owned_env.get_exec_environment(Some(p1), sponsor.clone(), &mut placeholder_context); + owned_env.get_exec_environment(Some(p1), sponsor.clone(), &placeholder_context); tx_sponsor_contract_asserts(&mut env, sponsor); } } @@ -381,13 +375,13 @@ fn test_fully_qualified_contract_call( (as-contract (contract-call? .contract-a get-caller)))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-a").unwrap(), contract_a, @@ -409,7 +403,7 @@ fn test_fully_qualified_contract_call( let mut env = owned_env.get_exec_environment( Some(p1.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -520,13 +514,13 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let name_hash_expensive_0 = execute("(hash160 1)"); let name_hash_expensive_1 = execute("(hash160 2)"); let name_hash_cheap_0 = execute("(hash160 100001)"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("tokens").unwrap(); env.initialize_contract(contract_identifier, tokens_contract, ASTRules::PrecheckSize) @@ -541,7 +535,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(p2.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert!(is_err_code( @@ -560,7 +554,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(p1.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert!(is_committed( &env.execute_contract( @@ -588,7 +582,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(p2.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert!(is_err_code( &env.execute_contract( @@ -607,7 +601,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert!(is_committed( &env.execute_contract( @@ -625,7 +619,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(p2.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert!(is_committed( &env.execute_contract( @@ -690,7 +684,7 @@ fn test_simple_contract_call(epoch: StacksEpochId, mut env_factory: MemoryEnviro (contract-call? .factorial-contract compute 8008)) "; - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); @@ -698,7 +692,7 @@ fn test_simple_contract_call(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(get_principal().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let contract_identifier = QualifiedContractIdentifier::local("factorial-contract").unwrap(); @@ -776,12 +770,12 @@ fn test_aborts(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator (contract-call? .contract-1 modify-data 105 105) (err 1))) "; - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("contract-1").unwrap(); env.initialize_contract(contract_identifier, contract_1, ASTRules::PrecheckSize) @@ -890,12 +884,12 @@ fn test_aborts(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator fn test_factorial_contract(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator) { let mut owned_env = env_factory.get_env(epoch); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("factorial").unwrap(); env.initialize_contract( @@ -1092,9 +1086,9 @@ fn test_cc_stack_depth( 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1)) (bar) "; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("c-foo").unwrap(); env.initialize_contract(contract_identifier, contract_one, ASTRules::PrecheckSize) @@ -1133,9 +1127,9 @@ fn test_cc_trait_stack_depth( (bar .c-foo) "; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("c-foo").unwrap(); env.initialize_contract(contract_identifier, contract_one, ASTRules::PrecheckSize) @@ -1156,7 +1150,7 @@ fn test_eval_with_non_existing_contract( ) { let mut owned_env = env_factory.get_env(epoch); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); @@ -1164,7 +1158,7 @@ fn test_eval_with_non_existing_contract( let mut env = owned_env.get_exec_environment( Some(get_principal().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let result = env.eval_read_only( diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index 5fa58b507b..cada7e973b 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -36,7 +36,7 @@ mod traits; mod variables; #[cfg(any(test, feature = "testing"))] -impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { +impl OwnedEnvironment<'_, '_> { pub fn set_tenure_height(&mut self, tenure_height: u32) { self.context.database.begin(); self.context diff --git a/clarity/src/vm/tests/principals.rs b/clarity/src/vm/tests/principals.rs index 44f3447bad..98db149273 100644 --- a/clarity/src/vm/tests/principals.rs +++ b/clarity/src/vm/tests/principals.rs @@ -711,7 +711,7 @@ fn test_principal_construct_good() { data: Box::new(Value::Principal(PrincipalData::Contract( QualifiedContractIdentifier::new( StandardPrincipalData(22, transfer_buffer), - "hello-world".try_into().unwrap() + "hello-world".into() ) ))) }), @@ -735,7 +735,7 @@ fn test_principal_construct_good() { data: Box::new(Value::Principal(PrincipalData::Contract( QualifiedContractIdentifier::new( StandardPrincipalData(20, transfer_buffer), - "hello-world".try_into().unwrap() + "hello-world".into() ) ))) }), @@ -799,7 +799,7 @@ fn test_principal_construct_good() { data: Box::new(Value::Principal(PrincipalData::Contract( QualifiedContractIdentifier::new( StandardPrincipalData(26, transfer_buffer), - "hello-world".try_into().unwrap() + "hello-world".into() ) ))) }), @@ -823,7 +823,7 @@ fn test_principal_construct_good() { data: Box::new(Value::Principal(PrincipalData::Contract( QualifiedContractIdentifier::new( StandardPrincipalData(21, transfer_buffer), - "hello-world".try_into().unwrap() + "hello-world".into() ) ))) }), @@ -854,7 +854,7 @@ fn create_principal_from_strings( // contract principal requested Value::Principal(PrincipalData::Contract(QualifiedContractIdentifier::new( StandardPrincipalData(version_array[0], principal_array), - name.try_into().unwrap(), + name.into(), ))) } else { // standard principal requested diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index d9e52c0222..f6dbd87090 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -73,7 +73,7 @@ fn test_simple_let(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId (+ z y)) x))"; let contract_id = QualifiedContractIdentifier::transient(); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); if let Ok(parsed_program) = parse(&contract_id, program, version, epoch) { let context = LocalContext::new(); @@ -84,7 +84,7 @@ fn test_simple_let(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId Ok(Value::Int(7)), eval( &parsed_program[0], - &mut env.get_exec_environment(None, None, &mut placeholder_context), + &mut env.get_exec_environment(None, None, &placeholder_context), &context ) ); diff --git a/clarity/src/vm/tests/traits.rs b/clarity/src/vm/tests/traits.rs index 97c4292b0d..d3fcfb7779 100644 --- a/clarity/src/vm/tests/traits.rs +++ b/clarity/src/vm/tests/traits.rs @@ -40,11 +40,11 @@ fn test_dynamic_dispatch_by_defining_trait( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -66,7 +66,7 @@ fn test_dynamic_dispatch_by_defining_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -98,11 +98,11 @@ fn test_dynamic_dispatch_pass_trait_nested_in_let( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -124,7 +124,7 @@ fn test_dynamic_dispatch_pass_trait_nested_in_let( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -155,11 +155,11 @@ fn test_dynamic_dispatch_pass_trait( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -181,7 +181,7 @@ fn test_dynamic_dispatch_pass_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -211,11 +211,11 @@ fn test_dynamic_dispatch_intra_contract_call( (define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, @@ -237,7 +237,7 @@ fn test_dynamic_dispatch_intra_contract_call( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -270,11 +270,11 @@ fn test_dynamic_dispatch_by_implementing_imported_trait( (define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, @@ -302,7 +302,7 @@ fn test_dynamic_dispatch_by_implementing_imported_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -335,11 +335,11 @@ fn test_dynamic_dispatch_by_implementing_imported_trait_mul_funcs( (define-public (get-2 (x uint)) (ok u2))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, @@ -367,7 +367,7 @@ fn test_dynamic_dispatch_by_implementing_imported_trait_mul_funcs( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -397,11 +397,11 @@ fn test_dynamic_dispatch_by_importing_trait( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, @@ -429,7 +429,7 @@ fn test_dynamic_dispatch_by_importing_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -466,11 +466,11 @@ fn test_dynamic_dispatch_including_nested_trait( let target_nested_contract = "(define-public (get-a (x uint)) (ok u99))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-nested-trait").unwrap(), contract_defining_nested_trait, @@ -513,7 +513,7 @@ fn test_dynamic_dispatch_including_nested_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -542,11 +542,11 @@ fn test_dynamic_dispatch_mismatched_args( let target_contract = "(define-public (get-1 (x int)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -568,7 +568,7 @@ fn test_dynamic_dispatch_mismatched_args( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -599,11 +599,11 @@ fn test_dynamic_dispatch_mismatched_returned( let target_contract = "(define-public (get-1 (x uint)) (ok 1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -625,7 +625,7 @@ fn test_dynamic_dispatch_mismatched_returned( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -659,11 +659,11 @@ fn test_reentrant_dynamic_dispatch( "(define-public (get-1 (x uint)) (contract-call? .dispatching-contract wrapped-get-1 .target-contract))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -685,7 +685,7 @@ fn test_reentrant_dynamic_dispatch( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -716,11 +716,11 @@ fn test_readwrite_dynamic_dispatch( let target_contract = "(define-read-only (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -742,7 +742,7 @@ fn test_readwrite_dynamic_dispatch( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -773,11 +773,11 @@ fn test_readwrite_violation_dynamic_dispatch( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -799,7 +799,7 @@ fn test_readwrite_violation_dynamic_dispatch( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -837,11 +837,11 @@ fn test_bad_call_with_trait( (contract-call? .dispatch wrapped-get-1 contract))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, @@ -872,7 +872,7 @@ fn test_bad_call_with_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -905,11 +905,11 @@ fn test_good_call_with_trait( (contract-call? .dispatch wrapped-get-1 .implem))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, @@ -940,7 +940,7 @@ fn test_good_call_with_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -974,11 +974,11 @@ fn test_good_call_2_with_trait( (contract-call? .dispatch wrapped-get-1 contract))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, @@ -1012,7 +1012,7 @@ fn test_good_call_2_with_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( @@ -1045,11 +1045,11 @@ fn test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functio (define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, @@ -1077,7 +1077,7 @@ fn test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functio let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1108,11 +1108,11 @@ fn test_contract_of_value( (define-public (get-1 (x uint)) (ok u99))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, @@ -1141,7 +1141,7 @@ fn test_contract_of_value( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( @@ -1175,11 +1175,11 @@ fn test_contract_of_no_impl( (define-public (get-1 (x uint)) (ok u99))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, @@ -1208,7 +1208,7 @@ fn test_contract_of_no_impl( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( @@ -1240,11 +1240,11 @@ fn test_return_trait_with_contract_of_wrapped_in_begin( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1266,7 +1266,7 @@ fn test_return_trait_with_contract_of_wrapped_in_begin( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1297,11 +1297,11 @@ fn test_return_trait_with_contract_of_wrapped_in_let( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1323,7 +1323,7 @@ fn test_return_trait_with_contract_of_wrapped_in_let( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1352,11 +1352,11 @@ fn test_return_trait_with_contract_of( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1378,7 +1378,7 @@ fn test_return_trait_with_contract_of( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1414,13 +1414,13 @@ fn test_pass_trait_to_subtrait(epoch: StacksEpochId, mut env_factory: MemoryEnvi (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1443,7 +1443,7 @@ fn test_pass_trait_to_subtrait(epoch: StacksEpochId, mut env_factory: MemoryEnvi let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1476,13 +1476,13 @@ fn test_embedded_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentG let target_contract = "(define-public (echo (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1506,7 +1506,7 @@ fn test_embedded_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentG let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1549,13 +1549,13 @@ fn test_pass_embedded_trait_to_subtrait_optional( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1578,7 +1578,7 @@ fn test_pass_embedded_trait_to_subtrait_optional( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1621,13 +1621,13 @@ fn test_pass_embedded_trait_to_subtrait_ok( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1650,7 +1650,7 @@ fn test_pass_embedded_trait_to_subtrait_ok( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1693,13 +1693,13 @@ fn test_pass_embedded_trait_to_subtrait_err( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1722,7 +1722,7 @@ fn test_pass_embedded_trait_to_subtrait_err( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1765,13 +1765,13 @@ fn test_pass_embedded_trait_to_subtrait_list( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1794,7 +1794,7 @@ fn test_pass_embedded_trait_to_subtrait_list( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1840,13 +1840,13 @@ fn test_pass_embedded_trait_to_subtrait_list_option( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1869,7 +1869,7 @@ fn test_pass_embedded_trait_to_subtrait_list_option( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1915,13 +1915,13 @@ fn test_pass_embedded_trait_to_subtrait_option_list( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1944,7 +1944,7 @@ fn test_pass_embedded_trait_to_subtrait_option_list( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1976,13 +1976,13 @@ fn test_let_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenera let target_contract = "(define-public (echo (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -2005,7 +2005,7 @@ fn test_let_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenera let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -2041,13 +2041,13 @@ fn test_let3_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener let target_contract = "(define-public (echo (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -2070,7 +2070,7 @@ fn test_let3_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -2102,13 +2102,13 @@ fn test_pass_principal_literal_to_trait( let target_contract = "(define-public (get-1 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -2131,7 +2131,7 @@ fn test_pass_principal_literal_to_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( diff --git a/clarity/src/vm/tests/variables.rs b/clarity/src/vm/tests/variables.rs index 5b392bb678..e862aeb0df 100644 --- a/clarity/src/vm/tests/variables.rs +++ b/clarity/src/vm/tests/variables.rs @@ -36,13 +36,13 @@ fn test_block_height( ) { let contract = "(define-read-only (test-func) block-height)"; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); let mut owned_env = tl_env_factory.get_env(epoch); let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); - let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut exprs = parse(&contract_identifier, contract, version, epoch).unwrap(); let mut marf = MemoryBackingStore::new(); let mut db = marf.as_analysis_db(); let analysis = db.execute(|db| { @@ -70,7 +70,7 @@ fn test_block_height( ASTRules::PrecheckSize, ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); // Call the function let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); @@ -94,13 +94,13 @@ fn test_stacks_block_height( ) { let contract = "(define-read-only (test-func) stacks-block-height)"; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); let mut owned_env = tl_env_factory.get_env(epoch); let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); - let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut exprs = parse(&contract_identifier, contract, version, epoch).unwrap(); let mut marf = MemoryBackingStore::new(); let mut db = marf.as_analysis_db(); let analysis = db.execute(|db| { @@ -128,7 +128,7 @@ fn test_stacks_block_height( ASTRules::PrecheckSize, ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); // Call the function let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); @@ -154,13 +154,13 @@ fn test_tenure_height( ) { let contract = "(define-read-only (test-func) tenure-height)"; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); let mut owned_env = tl_env_factory.get_env(epoch); let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); - let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut exprs = parse(&contract_identifier, contract, version, epoch).unwrap(); let mut marf = MemoryBackingStore::new(); let mut db = marf.as_analysis_db(); let analysis = db.execute(|db| { @@ -188,7 +188,7 @@ fn test_tenure_height( ASTRules::PrecheckSize, ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); // Call the function let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); @@ -213,6 +213,7 @@ enum WhenError { } #[cfg(test)] +#[allow(clippy::type_complexity)] fn expect_contract_error( version: ClarityVersion, epoch: StacksEpochId, @@ -226,13 +227,13 @@ fn expect_contract_error( )], expected_success: Value, ) { - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::local(name).unwrap(), version); let mut owned_env = tl_env_factory.get_env(epoch); let contract_identifier = QualifiedContractIdentifier::local(name).unwrap(); - let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut exprs = parse(&contract_identifier, contract, version, epoch).unwrap(); let mut marf = MemoryBackingStore::new(); let mut db = marf.as_analysis_db(); let analysis = db.execute(|db| { @@ -280,7 +281,7 @@ fn expect_contract_error( } } - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); // Call the function let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); diff --git a/clarity/src/vm/tooling/mod.rs b/clarity/src/vm/tooling/mod.rs index f218b2ccab..5b89145588 100644 --- a/clarity/src/vm/tooling/mod.rs +++ b/clarity/src/vm/tooling/mod.rs @@ -21,7 +21,7 @@ pub fn mem_type_check( epoch: StacksEpochId, ) -> CheckResult<(Option, ContractAnalysis)> { let contract_identifier = QualifiedContractIdentifier::transient(); - let mut contract = build_ast_with_rules( + let contract = build_ast_with_rules( &contract_identifier, snippet, &mut (), @@ -37,7 +37,7 @@ pub fn mem_type_check( let cost_tracker = LimitedCostTracker::new_free(); match run_analysis( &QualifiedContractIdentifier::transient(), - &mut contract, + &contract, &mut analysis_db, false, cost_tracker, @@ -51,7 +51,7 @@ pub fn mem_type_check( .type_map .as_ref() .unwrap() - .get_type_expected(&x.expressions.last().unwrap()) + .get_type_expected(x.expressions.last().unwrap()) .cloned(); Ok((first_type, x)) } diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index e1837ee034..ef4b565834 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -14,9 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[allow(clippy::result_large_err)] pub mod serialization; -#[allow(clippy::result_large_err)] pub mod signatures; use std::collections::btree_map::Entry; @@ -279,6 +277,10 @@ impl SequenceData { } } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn element_at(self, index: usize) -> Result> { if self.len() <= index { return Ok(None); @@ -613,7 +615,7 @@ pub trait SequencedValue { fn atom_values(&mut self) -> Result> { self.drained_items() .iter() - .map(|item| Ok(SymbolicExpression::atom_value(Self::to_value(&item)?))) + .map(|item| Ok(SymbolicExpression::atom_value(Self::to_value(item)?))) .collect() } } @@ -751,11 +753,11 @@ define_named_enum!(TenureInfoProperty { impl OptionalData { pub fn type_signature(&self) -> std::result::Result { let type_result = match self.data { - Some(ref v) => TypeSignature::new_option(TypeSignature::type_of(&v)?), + Some(ref v) => TypeSignature::new_option(TypeSignature::type_of(v)?), None => TypeSignature::new_option(TypeSignature::NoType), }; type_result.map_err(|_| { - CheckErrors::Expects("Should not have constructed too large of a type.".into()).into() + CheckErrors::Expects("Should not have constructed too large of a type.".into()) }) } } @@ -773,7 +775,7 @@ impl ResponseData { ), }; type_result.map_err(|_| { - CheckErrors::Expects("Should not have constructed too large of a type.".into()).into() + CheckErrors::Expects("Should not have constructed too large of a type.".into()) }) } } @@ -1265,6 +1267,10 @@ impl ListData { .map_err(|_| InterpreterError::Expect("Data length should be valid".into()).into()) } + pub fn is_empty(&self) -> bool { + self.data.is_empty() + } + fn append(&mut self, epoch: &StacksEpochId, other_seq: ListData) -> Result<()> { let entry_type_a = self.type_signature.get_list_item_type(); let entry_type_b = other_seq.type_signature.get_list_item_type(); diff --git a/clarity/src/vm/types/serialization.rs b/clarity/src/vm/types/serialization.rs index 7dcda788a8..48030519c8 100644 --- a/clarity/src/vm/types/serialization.rs +++ b/clarity/src/vm/types/serialization.rs @@ -782,14 +782,12 @@ impl Value { expected_type.unwrap(), )); } - } else { - if len as u64 != tuple_type.len() { - // unwrap is safe because of the match condition - #[allow(clippy::unwrap_used)] - return Err(SerializationError::DeserializeExpected( - expected_type.unwrap(), - )); - } + } else if u64::from(len) != tuple_type.len() { + // unwrap is safe because of the match condition + #[allow(clippy::unwrap_used)] + return Err(SerializationError::DeserializeExpected( + expected_type.unwrap(), + )); } Some(tuple_type) } @@ -1344,7 +1342,7 @@ impl ClaritySerializable for u32 { impl ClarityDeserializable for u32 { fn deserialize(input: &str) -> Result { - let bytes = hex_bytes(&input).map_err(|_| { + let bytes = hex_bytes(input).map_err(|_| { InterpreterError::Expect("u32 deserialization: failed decoding bytes.".into()) })?; assert_eq!(bytes.len(), 4); @@ -1419,13 +1417,10 @@ pub mod tests { } fn test_bad_expectation(v: Value, e: TypeSignature) { - assert!( - match Value::try_deserialize_hex(&v.serialize_to_hex().unwrap(), &e, false).unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - } - ) + assert!(matches!( + Value::try_deserialize_hex(&v.serialize_to_hex().unwrap(), &e, false).unwrap_err(), + SerializationError::DeserializeExpected(_) + )); } #[test] @@ -1704,40 +1699,37 @@ pub mod tests { ); // field number not equal to expectations - assert!(match Value::try_deserialize_hex( - &t_3.serialize_to_hex().unwrap(), - &TypeSignature::type_of(&t_1).unwrap(), - false - ) - .unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - }); + assert!(matches!( + Value::try_deserialize_hex( + &t_3.serialize_to_hex().unwrap(), + &TypeSignature::type_of(&t_1).unwrap(), + false + ) + .unwrap_err(), + SerializationError::DeserializeExpected(_) + )); // field type mismatch - assert!(match Value::try_deserialize_hex( - &t_2.serialize_to_hex().unwrap(), - &TypeSignature::type_of(&t_1).unwrap(), - false - ) - .unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - }); + assert!(matches!( + Value::try_deserialize_hex( + &t_2.serialize_to_hex().unwrap(), + &TypeSignature::type_of(&t_1).unwrap(), + false + ) + .unwrap_err(), + SerializationError::DeserializeExpected(_) + )); // field not-present in expected - assert!(match Value::try_deserialize_hex( - &t_1.serialize_to_hex().unwrap(), - &TypeSignature::type_of(&t_4).unwrap(), - false - ) - .unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - }); + assert!(matches!( + Value::try_deserialize_hex( + &t_1.serialize_to_hex().unwrap(), + &TypeSignature::type_of(&t_4).unwrap(), + false + ) + .unwrap_err(), + SerializationError::DeserializeExpected(_) + )); } #[apply(test_clarity_versions)] diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index b3984c5251..a85c56ff3e 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -589,9 +589,7 @@ impl TypeSignature { | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => self.admits_type_v2_1(other), - StacksEpochId::Epoch10 => { - return Err(CheckErrors::Expects("epoch 1.0 not supported".into())) - } + StacksEpochId::Epoch10 => Err(CheckErrors::Expects("epoch 1.0 not supported".into())), } } @@ -678,16 +676,12 @@ impl TypeSignature { } } NoType => Err(CheckErrors::CouldNotDetermineType), - CallableType(_) => { - return Err(CheckErrors::Expects( - "CallableType should not be used in epoch v2.0".into(), - )) - } - ListUnionType(_) => { - return Err(CheckErrors::Expects( - "ListUnionType should not be used in epoch v2.0".into(), - )) - } + CallableType(_) => Err(CheckErrors::Expects( + "CallableType should not be used in epoch v2.0".into(), + )), + ListUnionType(_) => Err(CheckErrors::Expects( + "ListUnionType should not be used in epoch v2.0".into(), + )), _ => Ok(other == self), } } @@ -1162,9 +1156,7 @@ impl TypeSignature { | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => Self::least_supertype_v2_1(a, b), - StacksEpochId::Epoch10 => { - return Err(CheckErrors::Expects("epoch 1.0 not supported".into())) - } + StacksEpochId::Epoch10 => Err(CheckErrors::Expects("epoch 1.0 not supported".into())), } } @@ -1455,8 +1447,7 @@ impl TypeSignature { // Checks if resulting type signature is of valid size. pub fn construct_parent_list_type(args: &[Value]) -> Result { - let children_types: Result> = - args.iter().map(|x| TypeSignature::type_of(x)).collect(); + let children_types: Result> = args.iter().map(TypeSignature::type_of).collect(); TypeSignature::parent_list_type(&children_types?) } @@ -1660,7 +1651,7 @@ impl TypeSignature { ) -> Result> { let mut trait_signature: BTreeMap = BTreeMap::new(); let functions_types = type_args - .get(0) + .first() .ok_or_else(|| CheckErrors::InvalidTypeDescription)? .match_list() .ok_or(CheckErrors::DefineTraitBadSignature)?; @@ -1682,11 +1673,10 @@ impl TypeSignature { let fn_args_exprs = args[1] .match_list() .ok_or(CheckErrors::DefineTraitBadSignature)?; - let mut fn_args = Vec::with_capacity(fn_args_exprs.len()); - for arg_type in fn_args_exprs.into_iter() { - let arg_t = TypeSignature::parse_type_repr(epoch, arg_type, accounting)?; - fn_args.push(arg_t); - } + let fn_args = fn_args_exprs + .iter() + .map(|arg_type| TypeSignature::parse_type_repr(epoch, arg_type, accounting)) + .collect::>()?; // Extract function's type return - must be a response let fn_return = match TypeSignature::parse_type_repr(epoch, &args[2], accounting) { @@ -1766,7 +1756,6 @@ impl TypeSignature { "FAIL: .size() overflowed on too large of a type. construction should have failed!" .into(), ) - .into() }) } @@ -1885,9 +1874,8 @@ impl TupleTypeSignature { } pub fn size(&self) -> Result { - self.inner_size()?.ok_or_else(|| { - CheckErrors::Expects("size() overflowed on a constructed type.".into()).into() - }) + self.inner_size()? + .ok_or_else(|| CheckErrors::Expects("size() overflowed on a constructed type.".into())) } fn max_depth(&self) -> u8 { diff --git a/contrib/helm/stacks-blockchain/README.md b/contrib/helm/stacks-blockchain/README.md index 6cbd1fd957..c54107cdc4 100644 --- a/contrib/helm/stacks-blockchain/README.md +++ b/contrib/helm/stacks-blockchain/README.md @@ -86,7 +86,7 @@ The following tables lists the configurable parameters of the stacks-blockchain | node.volumeMounts | Additional volumeMounts for the node | [] | | node.extraContainers | Additional containers to run alongside the node. Useful if adding a sidecar | [] | | node.initContainers | Containers which are run before the node container is started | [] | -| config | More configs can be added than what's shown below.All children fields under the node, burnchain, and ustx_balance fields will be converted from YAML to valid TOML format in the Configmap.

For info on more available config fields, please reference to our [example config files located here](https://github.com/blockstack/stacks-blockchain/tree/master/testnet/stacks-node/conf). | | +| config | More configs can be added than what's shown below.All children fields under the node, burnchain, and ustx_balance fields will be converted from YAML to valid TOML format in the Configmap.

For info on more available config fields, please reference to our [example config files located here](https://github.com/blockstack/stacks-blockchain/tree/master/sample/conf). | | | config.node.rpc_bind | | 0.0.0.0:20443 | | config.node.p2p_bind | | 0.0.0.0:20444 | | config.node.seed | Replace with your private key if deploying a miner node | nil | diff --git a/contrib/helm/stacks-blockchain/values.yaml b/contrib/helm/stacks-blockchain/values.yaml index 2f46aadb12..efa64f2aba 100644 --- a/contrib/helm/stacks-blockchain/values.yaml +++ b/contrib/helm/stacks-blockchain/values.yaml @@ -212,7 +212,7 @@ node: ## ## For more info, please reference our docs and example config files: ## https://docs.stacks.co/docs/nodes-and-miners/run-a-node -## https://github.com/blockstack/stacks-blockchain/tree/master/testnet/stacks-node/conf +## https://github.com/blockstack/stacks-blockchain/tree/master/sample/conf ## config: # More configs can be added than what's shown below. diff --git a/docs/init.md b/docs/init.md index 5bf157e721..b07833c99e 100644 --- a/docs/init.md +++ b/docs/init.md @@ -14,7 +14,7 @@ The MacOS configuration assumes stacks-blockchain will be set up for the current ## Configuration -For an example configuration file that describes the configuration settings, see [mainnet-follower-conf.toml](../testnet/stacks-node/conf/mainnet-follower-conf.toml). +For an example configuration file that describes the configuration settings, see [mainnet-follower-conf.toml](../sample/conf/mainnet-follower-conf.toml). Available configuration options are [documented here](https://docs.stacks.co/stacks-in-depth/nodes-and-miners/stacks-node-configuration). ## Paths diff --git a/docs/mining.md b/docs/mining.md index 10f49c5620..dfdfdd7909 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -1,7 +1,7 @@ # Stacks Mining Stacks tokens (STX) are mined by transferring BTC via PoX. To run as a miner, -you should make sure to add the following config fields to your [config file](../testnet/stacks-node/conf/mainnet-miner-conf.toml): +you should make sure to add the following config fields to your [config file](../sample/conf/mainnet-miner-conf.toml): ```toml [node] diff --git a/docs/profiling.md b/docs/profiling.md index 4b8343aae9..f830d5af65 100644 --- a/docs/profiling.md +++ b/docs/profiling.md @@ -16,18 +16,18 @@ Note that all bash commands in this document are run from the [stacks-core repos Validating the config file using `stacks-node check-config`: ``` -$ cargo run -r -p stacks-node --bin stacks-node check-config --config testnet/stacks-node/conf/mainnet-mockminer-conf.toml +$ cargo run -r -p stacks-node --bin stacks-node check-config --config ./sample/conf/mainnet-follower-conf.toml INFO [1661276562.220137] [testnet/stacks-node/src/main.rs:82] [main] stacks-node 0.1.0 (:, release build, linux [x86_64]) -INFO [1661276562.220363] [testnet/stacks-node/src/main.rs:115] [main] Loading config at path testnet/stacks-node/conf/mainnet-mockminer-conf.toml +INFO [1661276562.220363] [testnet/stacks-node/src/main.rs:115] [main] Loading config at path ./sample/conf/mainnet-follower-conf.toml INFO [1661276562.233071] [testnet/stacks-node/src/main.rs:128] [main] Valid config! ``` Enabling debug logging using environment variable `STACKS_LOG_DEBUG=1`: ``` -$ STACKS_LOG_DEBUG=1 cargo run -r -p stacks-node --bin stacks-node check-config --config testnet/stacks-node/conf/mainnet-mockminer-conf.toml +$ STACKS_LOG_DEBUG=1 cargo run -r -p stacks-node --bin stacks-node check-config --config ./sample/conf/mainnet-follower-conf.toml INFO [1661276562.220137] [testnet/stacks-node/src/main.rs:82] [main] stacks-node 0.1.0 (tip-mine:c90476aa8a+, release build, macos [aarch64]) -INFO [1661276562.220363] [testnet/stacks-node/src/main.rs:115] [main] Loading config at path testnet/stacks-node/conf/mainnet-mockminer-conf.toml +INFO [1661276562.220363] [testnet/stacks-node/src/main.rs:115] [main] Loading config at path ./sample/conf/mainnet-follower-conf.toml DEBG [1661276562.222450] [testnet/stacks-node/src/main.rs:118] [main] Loaded config file: ConfigFile { burnchain: Some(BurnchainConfigFile { chain: Some("bitcoin"), burn_fee_cap: Some(1), mode: Some("mainnet"), commit_anchor_block_within: None, peer_host: Some("localhost"), peer_port: Some(8333), rpc_port: Some(8332), rpc_ssl: None, username: Some("btcuser"), password: Some("btcpass"), timeout: None, magic_bytes: None, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: None, satoshis_per_byte: None, leader_key_tx_estimated_size: None, block_commit_tx_estimated_size: None, rbf_fee_increment: None, max_rbf: None, epochs: None }), node: Some(NodeConfigFile { name: None, seed: None, deny_nodes: None, working_dir: Some("/Users/igor/w/stacks-work/working_dir"), rpc_bind: Some("0.0.0.0:20443"), p2p_bind: Some("0.0.0.0:20444"), p2p_address: None, data_url: None, bootstrap_node: Some("02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444"), local_peer_seed: None, miner: Some(true), mock_mining: Some(true), mine_microblocks: None, microblock_frequency: None, max_microblocks: None, wait_time_for_microblocks: None, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: None, pox_sync_sample_secs: None, use_test_genesis_chainstate: None }), ustx_balance: None, events_observer: Some([EventObserverConfigFile { endpoint: "localhost:3700", events_keys: ["*"] }]), connection_options: None, fee_estimation: None, miner: None } INFO [1661276562.233071] [testnet/stacks-node/src/main.rs:128] [main] Valid config! ``` @@ -35,9 +35,9 @@ INFO [1661276562.233071] [testnet/stacks-node/src/main.rs:128] [main] Valid conf Enabling json logging using environment variable `STACKS_LOG_JSON=1` and feature flag `slog_json`: ``` -$ STACKS_LOG_JSON=1 cargo run -F slog_json -r -p stacks-node --bin stacks-node check-config --config testnet/stacks-node/conf/mainnet-mockminer-conf.toml +$ STACKS_LOG_JSON=1 cargo run -F slog_json -r -p stacks-node --bin stacks-node check-config --config ./sample/conf/mainnet-follower-conf.toml {"msg":"stacks-node 0.1.0 (tip-mine:c90476aa8a+, release build, macos [aarch64])","level":"INFO","ts":"2022-08-23T12:44:28.072462-05:00","thread":"main","line":82,"file":"testnet/stacks-node/src/main.rs"} -{"msg":"Loading config at path testnet/stacks-node/conf/mainnet-mockminer-conf.toml","level":"INFO","ts":"2022-08-23T12:44:28.074238-05:00","thread":"main","line":115,"file":"testnet/stacks-node/src/main.rs"} +{"msg":"Loading config at path ./sample/conf/mainnet-follower-conf.toml","level":"INFO","ts":"2022-08-23T12:44:28.074238-05:00","thread":"main","line":115,"file":"testnet/stacks-node/src/main.rs"} {"msg":"Valid config!","level":"INFO","ts":"2022-08-23T12:44:28.089960-05:00","thread":"main","line":128,"file":"testnet/stacks-node/src/main.rs"} ``` @@ -56,18 +56,18 @@ $ export STACKS_SNAPSHOT_DIR=$STACKS_DIR/snapshot Download and extract an archived snapshot of mainnet working directory, provided by Hiro. ``` -$ wget -P $STACKS_DIR https://storage.googleapis.com/blockstack-publish/archiver-main/follower/mainnet-follower-latest.tar.gz -$ tar xzvf $STACKS_DIR/mainnet-follower-latest.tar.gz -C $STACKS_DIR +$ wget -P $STACKS_DIR https://archive.hiro.so/mainnet/stacks-blockchain/mainnet-stacks-blockchain-latest.tar.gz +$ tar xzvf $STACKS_DIR/mainnet-stacks-blockchain-latest.tar.gz -C $STACKS_DIR ``` We'll be using the `stacks-node` config file available at: -`testnet/stacks-node/conf/mocknet-miner-conf.toml` +`sample/conf/mainnet-mockminer-conf.toml` Note that, for convenience, the `stacks-node` binary uses the environment variable `$STACKS_WORKING_DIR` to override the working directory location in the config file. ``` -$ cargo run -r -p stacks-node --bin stacks-node start --config testnet/stacks-node/conf/mocknet-miner-conf.toml +$ cargo run -r -p stacks-node --bin stacks-node start --config ./sample/conf/mainnet-mockminer-conf.toml ``` The `stacks-node` process will receive blocks starting from the latest block available in the Hiro archive. @@ -97,7 +97,7 @@ $ cargo run -r -p stacks-node --bin stacks-events | tee $STACKS_DIR/events.log Run `stacks-node` with an event observer: ``` -$ STACKS_EVENT_OBSERVER=localhost:3700 cargo run -r -p stacks-node --bin stacks-node start --config testnet/stacks-node/conf/mocknet-miner-conf.toml +$ STACKS_EVENT_OBSERVER=localhost:3700 cargo run -r -p stacks-node --bin stacks-node start --config ./sample/conf/mainnet-mockminer-conf.toml ``` You should see output from `stacks-events` in `events.logs` similar to: diff --git a/docs/testnet.md b/docs/testnet.md index 13cabc41b4..41392a2899 100644 --- a/docs/testnet.md +++ b/docs/testnet.md @@ -1,6 +1,6 @@ # Stacks testnet -[`testnet-follower-conf.toml`](../testnet/stacks-node/conf/testnet-follower-conf.toml) is a configuration file that you can use for setting genesis balances or configuring event observers. You can grant an address an initial account balance by adding the following entries: +[`testnet-follower-conf.toml`](../sample/conf/testnet-follower-conf.toml) is a configuration file that you can use for setting genesis balances or configuring event observers. You can grant an address an initial account balance by adding the following entries: ``` [[ustx_balance]] @@ -27,7 +27,7 @@ cargo run --bin blockstack-cli generate-sk --testnet # } ``` -This keypair is already registered in the [`testnet-follower-conf.toml`](../testnet/stacks-node/conf/testnet-follower-conf.toml) file, so it can be used as presented here. +This keypair is already registered in the [`testnet-follower-conf.toml`](../sample/conf/testnet-follower-conf.toml) file, so it can be used as presented here. We will interact with the following simple contract `kv-store`. In our examples, we will assume this contract is saved locally to `./kv-store.clar`: diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 1de0e34f09..52a77e2bb8 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -114,6 +114,13 @@ pub enum SignerEvent { /// the time at which this event was received by the signer's event processor received_time: SystemTime, }, + /// A new processed Stacks block was received from the node with the given block hash + NewBlock { + /// The block header hash for the newly processed stacks block + block_hash: Sha512Trunc256Sum, + /// The block height for the newly processed stacks block + block_height: u64, + }, } /// Trait to implement a stop-signaler for the event receiver thread. @@ -298,29 +305,25 @@ impl EventReceiver for SignerEventReceiver { &request.method(), ))); } + debug!("Processing {} event", request.url()); if request.url() == "/stackerdb_chunks" { - process_stackerdb_event(event_receiver.local_addr, request) - .map_err(|e| { - error!("Error processing stackerdb_chunks message"; "err" => ?e); - e - }) + process_event::(request) } else if request.url() == "/proposal_response" { - process_proposal_response(request) + process_event::(request) } else if request.url() == "/new_burn_block" { - process_new_burn_block_event(request) + process_event::(request) } else if request.url() == "/shutdown" { event_receiver.stop_signal.store(true, Ordering::SeqCst); - return Err(EventError::Terminated); + Err(EventError::Terminated) + } else if request.url() == "/new_block" { + process_event::(request) } else { let url = request.url().to_string(); - // `/new_block` is expected, but not specifically handled. do not log. - if &url != "/new_block" { - debug!( - "[{:?}] next_event got request with unexpected url {}, return OK so other side doesn't keep sending this", - event_receiver.local_addr, - url - ); - } + debug!( + "[{:?}] next_event got request with unexpected url {}, return OK so other side doesn't keep sending this", + event_receiver.local_addr, + url + ); ack_dispatcher(request); Err(EventError::UnrecognizedEvent(url)) } @@ -385,12 +388,13 @@ fn ack_dispatcher(request: HttpRequest) { // TODO: add tests from mutation testing results #4835 #[cfg_attr(test, mutants::skip)] -/// Process a stackerdb event from the node -fn process_stackerdb_event( - local_addr: Option, - mut request: HttpRequest, -) -> Result, EventError> { +fn process_event(mut request: HttpRequest) -> Result, EventError> +where + T: SignerEventTrait, + E: serde::de::DeserializeOwned + TryInto, Error = EventError>, +{ let mut body = String::new(); + if let Err(e) = request.as_reader().read_to_string(&mut body) { error!("Failed to read body: {:?}", &e); ack_dispatcher(request); @@ -399,27 +403,12 @@ fn process_stackerdb_event( &e ))); } - - debug!("Got stackerdb_chunks event"; "chunks_event_body" => %body); - let event: StackerDBChunksEvent = serde_json::from_slice(body.as_bytes()) + // Regardless of whether we successfully deserialize, we should ack the dispatcher so they don't keep resending it + ack_dispatcher(request); + let json_event: E = serde_json::from_slice(body.as_bytes()) .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - let event_contract_id = event.contract_id.clone(); - - let signer_event = match SignerEvent::try_from(event) { - Err(e) => { - info!( - "[{:?}] next_event got event from an unexpected contract id {}, return OK so other side doesn't keep sending this", - local_addr, - event_contract_id - ); - ack_dispatcher(request); - return Err(e); - } - Ok(x) => x, - }; - - ack_dispatcher(request); + let signer_event: SignerEvent = json_event.try_into()?; Ok(signer_event) } @@ -466,78 +455,69 @@ impl TryFrom for SignerEvent { } } -/// Process a proposal response from the node -fn process_proposal_response( - mut request: HttpRequest, -) -> Result, EventError> { - debug!("Got proposal_response event"); - let mut body = String::new(); - if let Err(e) = request.as_reader().read_to_string(&mut body) { - error!("Failed to read body: {:?}", &e); +impl TryFrom for SignerEvent { + type Error = EventError; - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); - } - return Err(EventError::MalformedRequest(format!( - "Failed to read body: {:?}", - &e - ))); + fn try_from(block_validate_response: BlockValidateResponse) -> Result { + Ok(SignerEvent::BlockValidationResponse( + block_validate_response, + )) } +} - let event: BlockValidateResponse = serde_json::from_slice(body.as_bytes()) - .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; +#[derive(Debug, Deserialize)] +struct BurnBlockEvent { + burn_block_hash: String, + burn_block_height: u64, + reward_recipients: Vec, + reward_slot_holders: Vec, + burn_amount: u64, +} - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); +impl TryFrom for SignerEvent { + type Error = EventError; + + fn try_from(burn_block_event: BurnBlockEvent) -> Result { + let burn_header_hash = burn_block_event + .burn_block_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + BurnchainHeaderHash::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + + Ok(SignerEvent::NewBurnBlock { + burn_height: burn_block_event.burn_block_height, + received_time: SystemTime::now(), + burn_header_hash, + }) } +} - Ok(SignerEvent::BlockValidationResponse(event)) +#[derive(Debug, Deserialize)] +struct BlockEvent { + block_hash: String, + block_height: u64, } -/// Process a new burn block event from the node -fn process_new_burn_block_event( - mut request: HttpRequest, -) -> Result, EventError> { - debug!("Got burn_block event"); - let mut body = String::new(); - if let Err(e) = request.as_reader().read_to_string(&mut body) { - error!("Failed to read body: {:?}", &e); +impl TryFrom for SignerEvent { + type Error = EventError; - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); - } - return Err(EventError::MalformedRequest(format!( - "Failed to read body: {:?}", - &e - ))); - } - #[derive(Debug, Deserialize)] - struct TempBurnBlockEvent { - burn_block_hash: String, - burn_block_height: u64, - reward_recipients: Vec, - reward_slot_holders: Vec, - burn_amount: u64, - } - let temp: TempBurnBlockEvent = serde_json::from_slice(body.as_bytes()) - .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - let burn_header_hash = temp - .burn_block_hash - .get(2..) - .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) - .and_then(|hex| { - BurnchainHeaderHash::from_hex(hex) - .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) - })?; - let event = SignerEvent::NewBurnBlock { - burn_height: temp.burn_block_height, - received_time: SystemTime::now(), - burn_header_hash, - }; - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); + fn try_from(block_event: BlockEvent) -> Result { + let block_hash: Sha512Trunc256Sum = block_event + .block_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + Sha512Trunc256Sum::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + Ok(SignerEvent::NewBlock { + block_hash, + block_height: block_event.block_height, + }) } - Ok(event) } pub fn get_signers_db_signer_set_message_id(name: &str) -> Option<(u32, u32)> { diff --git a/libsigner/src/runloop.rs b/libsigner/src/runloop.rs index 0a5ed49a6d..40a097088e 100644 --- a/libsigner/src/runloop.rs +++ b/libsigner/src/runloop.rs @@ -120,9 +120,8 @@ impl, R, T: SignerEventTrait> RunningSigner { pub fn join(self) -> Option { debug!("Try join event loop..."); // wait for event receiver join - let _ = self.event_join.join().map_err(|thread_panic| { + let _ = self.event_join.join().inspect_err(|thread_panic| { error!("Event thread panicked with: '{:?}'", &thread_panic); - thread_panic }); info!("Event receiver thread joined"); @@ -131,9 +130,8 @@ impl, R, T: SignerEventTrait> RunningSigner { let result_opt = self .signer_join .join() - .map_err(|thread_panic| { + .inspect_err(|thread_panic| { error!("Event thread panicked with: '{:?}'", &thread_panic); - thread_panic }) .unwrap_or(None); diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 087c4ba7a3..5f716cea2f 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -686,6 +686,22 @@ impl BlockResponse { BlockResponse::Rejected(rejection) => rejection.signer_signature_hash, } } + + /// Get the block accept data from the block response + pub fn as_block_accepted(&self) -> Option<&BlockAccepted> { + match self { + BlockResponse::Accepted(accepted) => Some(accepted), + _ => None, + } + } + + /// Get the block accept data from the block response + pub fn as_block_rejection(&self) -> Option<&BlockRejection> { + match self { + BlockResponse::Rejected(rejection) => Some(rejection), + _ => None, + } + } } impl StacksMessageCodec for BlockResponse { @@ -1237,7 +1253,7 @@ mod test { txs: vec![], }; let tx_merkle_root = { - let txid_vecs = block + let txid_vecs: Vec<_> = block .txs .iter() .map(|tx| tx.txid().as_bytes().to_vec()) diff --git a/pox-locking/Cargo.toml b/pox-locking/Cargo.toml index 4fbc9885dc..2ebca921d9 100644 --- a/pox-locking/Cargo.toml +++ b/pox-locking/Cargo.toml @@ -28,3 +28,4 @@ mutants = "0.0.3" [features] slog_json = ["stacks_common/slog_json", "clarity/slog_json"] +testing = [] diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/sample/conf/mainnet-follower-conf.toml similarity index 100% rename from testnet/stacks-node/conf/mainnet-follower-conf.toml rename to sample/conf/mainnet-follower-conf.toml diff --git a/testnet/stacks-node/conf/mainnet-miner-conf.toml b/sample/conf/mainnet-miner-conf.toml similarity index 100% rename from testnet/stacks-node/conf/mainnet-miner-conf.toml rename to sample/conf/mainnet-miner-conf.toml diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/sample/conf/mainnet-mockminer-conf.toml similarity index 100% rename from testnet/stacks-node/conf/mainnet-mockminer-conf.toml rename to sample/conf/mainnet-mockminer-conf.toml diff --git a/testnet/stacks-node/conf/mainnet-signer.toml b/sample/conf/mainnet-signer.toml similarity index 100% rename from testnet/stacks-node/conf/mainnet-signer.toml rename to sample/conf/mainnet-signer.toml diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/sample/conf/testnet-follower-conf.toml similarity index 97% rename from testnet/stacks-node/conf/testnet-follower-conf.toml rename to sample/conf/testnet-follower-conf.toml index c294a628b4..ced6f90669 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/sample/conf/testnet-follower-conf.toml @@ -77,8 +77,8 @@ start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" -start_height = 56_457 +start_height = 1900 [[burnchain.epochs]] epoch_name = "3.1" -start_height = 77_770 +start_height = 2000 diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/sample/conf/testnet-miner-conf.toml similarity index 97% rename from testnet/stacks-node/conf/testnet-miner-conf.toml rename to sample/conf/testnet-miner-conf.toml index 65f8cace68..854f982f1d 100644 --- a/testnet/stacks-node/conf/testnet-miner-conf.toml +++ b/sample/conf/testnet-miner-conf.toml @@ -73,8 +73,8 @@ start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" -start_height = 56_457 +start_height = 1900 [[burnchain.epochs]] epoch_name = "3.1" -start_height = 77_770 +start_height = 2000 diff --git a/testnet/stacks-node/conf/testnet-signer.toml b/sample/conf/testnet-signer.toml similarity index 95% rename from testnet/stacks-node/conf/testnet-signer.toml rename to sample/conf/testnet-signer.toml index f4a9bc3b71..2fbff7a235 100644 --- a/testnet/stacks-node/conf/testnet-signer.toml +++ b/sample/conf/testnet-signer.toml @@ -75,4 +75,8 @@ start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" -start_height = 56_457 +start_height = 1900 + +[[burnchain.epochs]] +epoch_name = "3.1" +start_height = 2000 diff --git a/sample-contracts/names.clar b/sample/contracts/names.clar similarity index 100% rename from sample-contracts/names.clar rename to sample/contracts/names.clar diff --git a/sample-contracts/tokens-ft-mint.clar b/sample/contracts/tokens-ft-mint.clar similarity index 100% rename from sample-contracts/tokens-ft-mint.clar rename to sample/contracts/tokens-ft-mint.clar diff --git a/sample-contracts/tokens-ft.clar b/sample/contracts/tokens-ft.clar similarity index 100% rename from sample-contracts/tokens-ft.clar rename to sample/contracts/tokens-ft.clar diff --git a/sample-contracts/tokens-mint.clar b/sample/contracts/tokens-mint.clar similarity index 100% rename from sample-contracts/tokens-mint.clar rename to sample/contracts/tokens-mint.clar diff --git a/sample-contracts/tokens.clar b/sample/contracts/tokens.clar similarity index 100% rename from sample-contracts/tokens.clar rename to sample/contracts/tokens.clar diff --git a/sample/expected_consts.json b/sample/expected_consts.json new file mode 100644 index 0000000000..2c6da73a25 --- /dev/null +++ b/sample/expected_consts.json @@ -0,0 +1,12 @@ +{ + "chain_id_mainnet": 1, + "chain_id_testnet": 2147483648, + "microstacks_per_stacks": 1000000, + "miner_reward_maturity": 100, + "network_id_mainnet": 385875968, + "network_id_testnet": 4278190080, + "peer_version_mainnet_major": 402653184, + "peer_version_testnet_major": 4207599104, + "signer_slots_per_user": 13, + "stacks_epoch_max": 9223372036854775807 +} diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 81b4326d4c..a0b3e4f4c4 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -19,13 +19,11 @@ path = "./src/libcommon.rs" [dependencies] rand = { workspace = true } -serde = "1" +serde = { version = "1.0", features = ["derive"] } serde_derive = "1" -serde_stacker = "0.1" sha3 = "0.10.1" ripemd = "0.1.1" lazy_static = "1.4.0" -percent-encoding = "2.1.0" slog = { version = "2.5.2", features = [ "max_level_trace" ] } slog-term = "2.6.0" slog-json = { version = "2.3.0", optional = true } @@ -63,9 +61,6 @@ version = "0.2.23" features = ["std"] [dev-dependencies] -rstest = "0.11.0" -rstest_reuse = "0.1.3" -assert-json-diff = "1.0.0" rand_core = { workspace = true } [features] @@ -77,7 +72,6 @@ testing = ["canonical"] serde = [] bech32_std = [] bech32_strict = [] -strason = [] [target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(any(target_os="windows"))))'.dependencies] sha2 = { version = "0.10", features = ["asm"] } diff --git a/stacks-common/src/bitvec.rs b/stacks-common/src/bitvec.rs index 6602f62e5c..7c77e5da32 100644 --- a/stacks-common/src/bitvec.rs +++ b/stacks-common/src/bitvec.rs @@ -129,7 +129,7 @@ pub struct BitVecIter<'a, const MAX_SIZE: u16> { bitvec: &'a BitVec, } -impl<'a, const MAX_SIZE: u16> Iterator for BitVecIter<'a, MAX_SIZE> { +impl Iterator for BitVecIter<'_, MAX_SIZE> { type Item = bool; fn next(&mut self) -> Option { @@ -172,7 +172,7 @@ impl BitVec { } pub fn iter(&self) -> BitVecIter { - let byte = self.data.get(0); + let byte = self.data.first(); BitVecIter { index: 0, bitvec: self, @@ -184,6 +184,10 @@ impl BitVec { self.len } + pub fn is_empty(&self) -> bool { + self.len == 0 + } + /// Return the number of bytes needed to store `len` bits. fn data_len(len: u16) -> u16 { len / 8 + if len % 8 == 0 { 0 } else { 1 } diff --git a/stacks-common/src/deps_common/bech32/mod.rs b/stacks-common/src/deps_common/bech32/mod.rs index 655f2b1a82..8e7eca9540 100644 --- a/stacks-common/src/deps_common/bech32/mod.rs +++ b/stacks-common/src/deps_common/bech32/mod.rs @@ -201,7 +201,7 @@ impl<'a> Bech32Writer<'a> { } } -impl<'a> WriteBase32 for Bech32Writer<'a> { +impl WriteBase32 for Bech32Writer<'_> { type Err = fmt::Error; /// Writes a single 5 bit value of the data part @@ -211,7 +211,7 @@ impl<'a> WriteBase32 for Bech32Writer<'a> { } } -impl<'a> Drop for Bech32Writer<'a> { +impl Drop for Bech32Writer<'_> { fn drop(&mut self) { self.write_checksum() .expect("Unhandled error writing the checksum on drop.") diff --git a/stacks-common/src/deps_common/bitcoin/blockdata/opcodes.rs b/stacks-common/src/deps_common/bitcoin/blockdata/opcodes.rs index 5e628b06f8..6b70031f6f 100644 --- a/stacks-common/src/deps_common/bitcoin/blockdata/opcodes.rs +++ b/stacks-common/src/deps_common/bitcoin/blockdata/opcodes.rs @@ -606,7 +606,7 @@ impl All { Class::PushBytes(*self as u32) // 60 opcodes } else { - Class::Ordinary(unsafe { transmute(*self) }) + Class::Ordinary(unsafe { transmute::(*self) }) } } } diff --git a/stacks-common/src/deps_common/bitcoin/blockdata/script.rs b/stacks-common/src/deps_common/bitcoin/blockdata/script.rs index be5a6144c7..34ee5897c3 100644 --- a/stacks-common/src/deps_common/bitcoin/blockdata/script.rs +++ b/stacks-common/src/deps_common/bitcoin/blockdata/script.rs @@ -648,7 +648,7 @@ impl<'de> serde::Deserialize<'de> for Script { where E: serde::de::Error, { - let v: Vec = ::hex::decode(v).map_err(E::custom)?; + let v: Vec = crate::util::hash::hex_bytes(v).map_err(E::custom)?; Ok(Script::from(v)) } @@ -834,15 +834,18 @@ mod test { } #[test] - #[cfg(all(feature = "serde", feature = "strason"))] + #[cfg(feature = "serde")] fn script_json_serialize() { - use strason::Json; + use serde_json; let original = hex_script!("827651a0698faaa9a8a7a687"); - let json = Json::from_serialize(&original).unwrap(); - assert_eq!(json.to_bytes(), b"\"827651a0698faaa9a8a7a687\""); - assert_eq!(json.string(), Some("827651a0698faaa9a8a7a687")); - let des = json.into_deserialize().unwrap(); + let json_value = serde_json::to_value(&original).unwrap(); + assert_eq!( + serde_json::to_vec(&json_value).unwrap(), + b"\"827651a0698faaa9a8a7a687\"" + ); + assert_eq!(json_value.to_string(), "\"827651a0698faaa9a8a7a687\""); + let des = serde_json::from_value(json_value).unwrap(); assert_eq!(original, des); } diff --git a/stacks-common/src/deps_common/bitcoin/blockdata/transaction.rs b/stacks-common/src/deps_common/bitcoin/blockdata/transaction.rs index 1ece07c511..c2d4c4e0a2 100644 --- a/stacks-common/src/deps_common/bitcoin/blockdata/transaction.rs +++ b/stacks-common/src/deps_common/bitcoin/blockdata/transaction.rs @@ -342,7 +342,7 @@ impl Transaction { let mut script_len_bytes = serialize(&script_len).expect("FATAL: failed to encode varint"); length_script.append(&mut script_len_bytes); - length_script.extend_from_slice(&script_bytes); + length_script.extend_from_slice(script_bytes); length_script } } @@ -361,7 +361,7 @@ impl Transaction { let mut script_len_bytes = serialize(&script_len).expect("FATAL: failed to encode varint"); raw_vec.append(&mut script_len_bytes); - raw_vec.extend_from_slice(&script_bytes); + raw_vec.extend_from_slice(script_bytes); } Sha256dHash::from_data(&raw_vec) } else if sighash_type == SigHashType::Single && input_index < self.output.len() { diff --git a/stacks-common/src/deps_common/bitcoin/internal_macros.rs b/stacks-common/src/deps_common/bitcoin/internal_macros.rs index 2d0873498c..e74eebe0ad 100644 --- a/stacks-common/src/deps_common/bitcoin/internal_macros.rs +++ b/stacks-common/src/deps_common/bitcoin/internal_macros.rs @@ -106,16 +106,16 @@ macro_rules! user_enum { } #[cfg(feature = "serde")] - impl<'de> $crate::serde::Deserialize<'de> for $name { + impl<'de> serde::Deserialize<'de> for $name { #[inline] fn deserialize(deserializer: D) -> Result where - D: $crate::serde::Deserializer<'de>, + D: serde::Deserializer<'de>, { - use $crate::std::fmt::{self, Formatter}; + use std::fmt::{self, Formatter}; struct Visitor; - impl<'de> $crate::serde::de::Visitor<'de> for Visitor { + impl<'de> serde::de::Visitor<'de> for Visitor { type Value = $name; fn expecting(&self, formatter: &mut Formatter) -> fmt::Result { @@ -124,7 +124,7 @@ macro_rules! user_enum { fn visit_str(self, v: &str) -> Result where - E: $crate::serde::de::Error, + E: serde::de::Error, { static FIELDS: &'static [&'static str] = &[$(stringify!($txt)),*]; @@ -136,14 +136,14 @@ macro_rules! user_enum { fn visit_borrowed_str(self, v: &'de str) -> Result where - E: $crate::serde::de::Error, + E: serde::de::Error, { self.visit_str(v) } fn visit_string(self, v: String) -> Result where - E: $crate::serde::de::Error, + E: serde::de::Error, { self.visit_str(&v) } @@ -222,19 +222,19 @@ macro_rules! hex_script (($s:expr) => (crate::deps_common::bitcoin::blockdata::s macro_rules! serde_struct_impl { ($name:ident, $($fe:ident),*) => ( #[cfg(feature = "serde")] - impl<'de> $crate::serde::Deserialize<'de> for $name { + impl<'de> serde::Deserialize<'de> for $name { fn deserialize(deserializer: D) -> Result<$name, D::Error> where - D: $crate::serde::de::Deserializer<'de>, + D: serde::de::Deserializer<'de>, { - use $crate::std::fmt::{self, Formatter}; - use $crate::serde::de::IgnoredAny; + use std::fmt::{self, Formatter}; + use serde::de::IgnoredAny; #[allow(non_camel_case_types)] enum Enum { Unknown__Field, $($fe),* } struct EnumVisitor; - impl<'de> $crate::serde::de::Visitor<'de> for EnumVisitor { + impl<'de> serde::de::Visitor<'de> for EnumVisitor { type Value = Enum; fn expecting(&self, formatter: &mut Formatter) -> fmt::Result { @@ -243,7 +243,7 @@ macro_rules! serde_struct_impl { fn visit_str(self, v: &str) -> Result where - E: $crate::serde::de::Error, + E: serde::de::Error, { match v { $( @@ -254,7 +254,7 @@ macro_rules! serde_struct_impl { } } - impl<'de> $crate::serde::Deserialize<'de> for Enum { + impl<'de> serde::Deserialize<'de> for Enum { fn deserialize(deserializer: D) -> Result where D: ::serde::de::Deserializer<'de>, @@ -265,7 +265,7 @@ macro_rules! serde_struct_impl { struct Visitor; - impl<'de> $crate::serde::de::Visitor<'de> for Visitor { + impl<'de> serde::de::Visitor<'de> for Visitor { type Value = $name; fn expecting(&self, formatter: &mut Formatter) -> fmt::Result { @@ -274,9 +274,9 @@ macro_rules! serde_struct_impl { fn visit_map(self, mut map: A) -> Result where - A: $crate::serde::de::MapAccess<'de>, + A: serde::de::MapAccess<'de>, { - use $crate::serde::de::Error; + use serde::de::Error; $(let mut $fe = None;)* @@ -317,12 +317,12 @@ macro_rules! serde_struct_impl { } #[cfg(feature = "serde")] - impl<'de> $crate::serde::Serialize for $name { + impl<'de> serde::Serialize for $name { fn serialize(&self, serializer: S) -> Result where - S: $crate::serde::Serializer, + S: serde::Serializer, { - use $crate::serde::ser::SerializeStruct; + use serde::ser::SerializeStruct; // Only used to get the struct length. static FIELDS: &'static [&'static str] = &[$(stringify!($fe)),*]; diff --git a/stacks-common/src/deps_common/bitcoin/mod.rs b/stacks-common/src/deps_common/bitcoin/mod.rs index b70da5deb2..097bfa2b65 100644 --- a/stacks-common/src/deps_common/bitcoin/mod.rs +++ b/stacks-common/src/deps_common/bitcoin/mod.rs @@ -27,7 +27,6 @@ // Clippy flags #![allow(clippy::needless_range_loop)] // suggests making a big mess of array newtypes -#![allow(clippy::extend_from_slice)] // `extend_from_slice` only available since 1.6 // Coding conventions #![deny(non_upper_case_globals)] diff --git a/stacks-common/src/deps_common/bitcoin/network/encodable.rs b/stacks-common/src/deps_common/bitcoin/network/encodable.rs index f14ee1fb85..b6e372e62e 100644 --- a/stacks-common/src/deps_common/bitcoin/network/encodable.rs +++ b/stacks-common/src/deps_common/bitcoin/network/encodable.rs @@ -30,7 +30,7 @@ //! use std::hash::Hash; -use std::{mem, u32}; +use std::mem; use hashbrown::HashMap; diff --git a/stacks-common/src/deps_common/bitcoin/network/message_network.rs b/stacks-common/src/deps_common/bitcoin/network/message_network.rs index 4f913ee48f..0cf486ba85 100644 --- a/stacks-common/src/deps_common/bitcoin/network/message_network.rs +++ b/stacks-common/src/deps_common/bitcoin/network/message_network.rs @@ -22,7 +22,7 @@ use crate::deps_common::bitcoin::network::address::Address; use crate::deps_common::bitcoin::network::constants; use crate::util; -/// Some simple messages +// Some simple messages /// The `version` message #[derive(PartialEq, Eq, Clone, Debug)] diff --git a/stacks-common/src/deps_common/bitcoin/network/serialize.rs b/stacks-common/src/deps_common/bitcoin/network/serialize.rs index f33a347133..7fd75391c8 100644 --- a/stacks-common/src/deps_common/bitcoin/network/serialize.rs +++ b/stacks-common/src/deps_common/bitcoin/network/serialize.rs @@ -157,9 +157,9 @@ impl BitcoinHash for Vec { } /// Encode an object into a vector -pub fn serialize(data: &T) -> Result, Error> +pub fn serialize(data: &T) -> Result, Error> where - T: ConsensusEncodable>>>, + T: ConsensusEncodable>>> + ?Sized, { let mut encoder = RawEncoder::new(Cursor::new(vec![])); data.consensus_encode(&mut encoder)?; @@ -167,9 +167,9 @@ where } /// Encode an object into a hex-encoded string -pub fn serialize_hex(data: &T) -> Result +pub fn serialize_hex(data: &T) -> Result where - T: ConsensusEncodable>>>, + T: ConsensusEncodable>>> + ?Sized, { let serial = serialize(data)?; Ok(hex_encode(&serial[..])) diff --git a/stacks-common/src/deps_common/bitcoin/util/hash.rs b/stacks-common/src/deps_common/bitcoin/util/hash.rs index daa1de3360..e1a9455e99 100644 --- a/stacks-common/src/deps_common/bitcoin/util/hash.rs +++ b/stacks-common/src/deps_common/bitcoin/util/hash.rs @@ -450,7 +450,7 @@ pub fn bitcoin_merkle_root(data: Vec) -> Sha256dHash { bitcoin_merkle_root(next) } -impl<'a, T: BitcoinHash> MerkleRoot for &'a [T] { +impl MerkleRoot for &[T] { fn merkle_root(&self) -> Sha256dHash { bitcoin_merkle_root(self.iter().map(|obj| obj.bitcoin_hash()).collect()) } diff --git a/stacks-common/src/deps_common/ctrlc/mod.rs b/stacks-common/src/deps_common/ctrlc/mod.rs index 836ae7dfb6..70514b842c 100644 --- a/stacks-common/src/deps_common/ctrlc/mod.rs +++ b/stacks-common/src/deps_common/ctrlc/mod.rs @@ -7,7 +7,7 @@ // notice may not be copied, modified, or distributed except // according to those terms. -#[macro_use] +#![macro_use] mod error; mod platform; diff --git a/stacks-common/src/deps_common/httparse/mod.rs b/stacks-common/src/deps_common/httparse/mod.rs index 67ca2c52cd..b4c9250546 100644 --- a/stacks-common/src/deps_common/httparse/mod.rs +++ b/stacks-common/src/deps_common/httparse/mod.rs @@ -169,14 +169,14 @@ impl<'a> Bytes<'a> { } } -impl<'a> AsRef<[u8]> for Bytes<'a> { +impl AsRef<[u8]> for Bytes<'_> { #[inline] fn as_ref(&self) -> &[u8] { &self.slice_peek()[self.pos..] } } -impl<'a> Iterator for Bytes<'a> { +impl Iterator for Bytes<'_> { type Item = u8; #[inline] @@ -701,6 +701,7 @@ pub fn parse_headers<'b: 'h, 'h>( } #[inline] +#[allow(clippy::never_loop)] fn parse_headers_iter<'a>(headers: &mut &mut [Header<'a>], bytes: &mut Bytes<'a>) -> Result { let mut num_headers: usize = 0; let mut count: usize = 0; diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index 59347ed36a..630ce70c9d 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -1,4 +1,4 @@ -use std::fmt; +use std::fmt::{self, Display}; use std::io::{Read, Write}; use std::str::FromStr; @@ -48,7 +48,7 @@ impl TrieHash { /// TrieHash from bytes pub fn from_data(data: &[u8]) -> TrieHash { - if data.len() == 0 { + if data.is_empty() { return TrieHash::from_empty_data(); } @@ -62,7 +62,7 @@ impl TrieHash { } pub fn from_data_array>(data: &[B]) -> TrieHash { - if data.len() == 0 { + if data.is_empty() { return TrieHash::from_empty_data(); } @@ -78,6 +78,10 @@ impl TrieHash { } /// Convert to a String that can be used in e.g. sqlite + /// If we did not implement this seperate from Display, + /// we would use the stacks_common::util::hash::to_hex function + /// which is the unrolled version of this function. + #[allow(clippy::inherent_to_string_shadow_display)] pub fn to_string(&self) -> String { let s = format!("{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}", self.0[0], self.0[1], self.0[2], self.0[3], diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 49fdfa84fd..93ebd17bc0 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -18,6 +18,7 @@ use std::cell::LazyCell; use std::cmp::Ordering; use std::fmt; use std::ops::{Deref, DerefMut, Index, IndexMut}; +use std::sync::LazyLock; #[cfg(feature = "canonical")] pub mod sqlite; @@ -121,22 +122,21 @@ pub struct CoinbaseInterval { pub effective_start_height: u64, } -/// From SIP-029: -/// -/// | Coinbase Interval | Bitcoin Height | Offset Height | Approx. Supply | STX Reward | Annual Inflation | -/// |--------------------|----------------|---------------------|------------------|------------|------------------| -/// | Current | - | - | 1,552,452,847 | 1000 | - | -/// | 1st | 945,000 | 278,950 | 1,627,352,847 | 500 (50%) | 3.23% | -/// | 2nd | 1,050,000 | 383,950 | 1,679,852,847 | 250 (50%) | 1.57% | -/// | 3rd | 1,260,000 | 593,950 | 1,732,352,847 | 125 (50%) | 0.76% | -/// | 4th | 1,470,000 | 803,950 | 1,758,602,847 | 62.5 (50%) | 0.37% | -/// | - | 2,197,560 | 1,531,510 | 1,804,075,347 | 62.5 (0%) | 0.18% | -/// -/// The above is for mainnet, which has a burnchain year of 52596 blocks and starts at burnchain height 666050. -/// The `Offset Height` column is simply the difference between `Bitcoin Height` and 666050. +// From SIP-029: +// +// | Coinbase Interval | Bitcoin Height | Offset Height | Approx. Supply | STX Reward | Annual Inflation | +// |--------------------|----------------|---------------------|------------------|------------|------------------| +// | Current | - | - | 1,552,452,847 | 1000 | - | +// | 1st | 945,000 | 278,950 | 1,627,352,847 | 500 (50%) | 3.23% | +// | 2nd | 1,050,000 | 383,950 | 1,679,852,847 | 250 (50%) | 1.57% | +// | 3rd | 1,260,000 | 593,950 | 1,732,352,847 | 125 (50%) | 0.76% | +// | 4th | 1,470,000 | 803,950 | 1,758,602,847 | 62.5 (50%) | 0.37% | +// | - | 2,197,560 | 1,531,510 | 1,804,075,347 | 62.5 (0%) | 0.18% | +// +// The above is for mainnet, which has a burnchain year of 52596 blocks and starts at burnchain height 666050. /// Mainnet coinbase intervals, as of SIP-029 -pub const COINBASE_INTERVALS_MAINNET: LazyCell<[CoinbaseInterval; 5]> = LazyCell::new(|| { +pub static COINBASE_INTERVALS_MAINNET: LazyLock<[CoinbaseInterval; 5]> = LazyLock::new(|| { let emissions_schedule = [ CoinbaseInterval { coinbase: 1_000 * u128::from(MICROSTACKS_PER_STACKS), @@ -164,7 +164,7 @@ pub const COINBASE_INTERVALS_MAINNET: LazyCell<[CoinbaseInterval; 5]> = LazyCell }); /// Testnet coinbase intervals, as of SIP-029 -pub const COINBASE_INTERVALS_TESTNET: LazyCell<[CoinbaseInterval; 5]> = LazyCell::new(|| { +pub static COINBASE_INTERVALS_TESTNET: LazyLock<[CoinbaseInterval; 5]> = LazyLock::new(|| { let emissions_schedule = [ CoinbaseInterval { coinbase: 1_000 * u128::from(MICROSTACKS_PER_STACKS), @@ -245,11 +245,11 @@ impl CoinbaseInterval { } let mut ht = intervals[0].effective_start_height; - for i in 1..intervals.len() { - if intervals[i].effective_start_height < ht { + for interval in intervals.iter().skip(1) { + if interval.effective_start_height < ht { return false; } - ht = intervals[i].effective_start_height; + ht = interval.effective_start_height; } true } @@ -537,14 +537,11 @@ impl StacksEpochId { | StacksEpochId::Epoch30 => { self.coinbase_reward_pre_sip029(first_burnchain_height, current_burnchain_height) } - StacksEpochId::Epoch31 => { - let cb = self.coinbase_reward_sip029( - mainnet, - first_burnchain_height, - current_burnchain_height, - ); - cb - } + StacksEpochId::Epoch31 => self.coinbase_reward_sip029( + mainnet, + first_burnchain_height, + current_burnchain_height, + ), } } } diff --git a/stacks-common/src/util/chunked_encoding.rs b/stacks-common/src/util/chunked_encoding.rs index 235a9d14e8..445ec5a831 100644 --- a/stacks-common/src/util/chunked_encoding.rs +++ b/stacks-common/src/util/chunked_encoding.rs @@ -316,7 +316,7 @@ impl HttpChunkedTransferReaderState { } } -impl<'a, R: Read> Read for HttpChunkedTransferReader<'a, R> { +impl Read for HttpChunkedTransferReader<'_, R> { /// Read a HTTP chunk-encoded stream. /// Returns number of decoded bytes (i.e. number of bytes copied to buf, as expected) fn read(&mut self, buf: &mut [u8]) -> io::Result { @@ -401,7 +401,7 @@ impl<'a, 'state, W: Write> HttpChunkedTransferWriter<'a, 'state, W> { } } -impl<'a, 'state, W: Write> Write for HttpChunkedTransferWriter<'a, 'state, W> { +impl Write for HttpChunkedTransferWriter<'_, '_, W> { fn write(&mut self, buf: &[u8]) -> io::Result { let mut written = 0; while written < buf.len() && !self.state.corked { @@ -526,7 +526,7 @@ mod test { #[test] fn test_http_chunked_encode() { - let tests = vec![ + let tests = [ // (chunk size, byte string, expected encoding) (10, "aaaaaaaaaabbbbbbbbbbccccccccccdddddddddd", "a\r\naaaaaaaaaa\r\na\r\nbbbbbbbbbb\r\na\r\ncccccccccc\r\na\r\ndddddddddd\r\n0\r\n\r\n"), (10, "aaaaaaaaaabbbbbbbbbbccccccccccdddddddddde", "a\r\naaaaaaaaaa\r\na\r\nbbbbbbbbbb\r\na\r\ncccccccccc\r\na\r\ndddddddddd\r\n1\r\ne\r\n0\r\n\r\n"), @@ -551,7 +551,7 @@ mod test { #[test] fn test_http_chunked_encode_multi() { - let tests = vec![ + let tests = [ // chunk size, sequence of writes, expected encoding (10, vec!["aaaaaaaaaa", "bbbbb", "bbbbb", "ccc", "ccc", "ccc", "c", "dd", "ddddd", "ddd"], "a\r\naaaaaaaaaa\r\na\r\nbbbbbbbbbb\r\na\r\ncccccccccc\r\na\r\ndddddddddd\r\n0\r\n\r\n"), (10, vec!["a", "a", "a", "a", "a", "a", "a", "a", "a", "a"], "a\r\naaaaaaaaaa\r\n0\r\n\r\n"), @@ -576,7 +576,7 @@ mod test { #[test] fn test_http_chunked_decode() { - let tests = vec![ + let tests = [ ("a\r\naaaaaaaaaa\r\na\r\nbbbbbbbbbb\r\na\r\ncccccccccc\r\na\r\ndddddddddd\r\n0\r\n\r\n", "aaaaaaaaaabbbbbbbbbbccccccccccdddddddddd"), ("A\r\naaaaaaaaaa\r\nA\r\nbbbbbbbbbb\r\nA\r\ncccccccccc\r\nA\r\ndddddddddd\r\n0\r\n\r\n", "aaaaaaaaaabbbbbbbbbbccccccccccdddddddddd"), ("1\r\na\r\n2\r\nbb\r\n3\r\nccc\r\n4\r\ndddd\r\n0\r\n\r\n", "abbcccdddd"), @@ -598,7 +598,7 @@ mod test { #[test] fn test_http_chunked_decode_multi() { - let tests = vec![ + let tests = [ (vec_u8(vec!["1\r\na", "\r\n", "0\r\n\r\n"]), "a"), (vec_u8(vec!["1\r\na\r", "\n0\r\n\r\n"]), "a"), (vec_u8(vec!["1\r\na\r\n", "0\r\n\r", "\n"]), "a"), @@ -694,7 +694,7 @@ mod test { #[test] fn test_http_chunked_decode_err() { - let tests = vec![ + let tests = [ ( "1; reallyreallyreallyreallylongextension;\r\na\r\n0\r\n\r\n", 1, diff --git a/stacks-common/src/util/hash.rs b/stacks-common/src/util/hash.rs index a5e4341b60..666e72c8e2 100644 --- a/stacks-common/src/util/hash.rs +++ b/stacks-common/src/util/hash.rs @@ -382,9 +382,11 @@ pub struct MerklePathPoint { pub type MerklePath = Vec>; /// Merkle tree implementation with tagged nodes: -/// * a leaf hash is H(0x00 + data) -/// * a node hash is H(0x01 + left.hash + right.hash) -/// An empty tree has root hash 0x00000...00000 +/// +/// * A leaf hash is `H(0x00 + data)` +/// * A node hash is `H(0x01 + left.hash + right.hash)` +/// +/// An empty tree has a root hash of `0x00000...00000`. /// /// NOTE: This is consensus-critical code, because it is used to generate the transaction Merkle /// tree roots in Stacks blocks. @@ -396,7 +398,7 @@ where MerkleTree { nodes: vec![] } } - pub fn new(data: &Vec>) -> MerkleTree { + pub fn new(data: &[Vec]) -> MerkleTree { if data.is_empty() { return MerkleTree { nodes: vec![] }; } diff --git a/stacks-common/src/util/log.rs b/stacks-common/src/util/log.rs index 534f3f9969..b0ac704f0c 100644 --- a/stacks-common/src/util/log.rs +++ b/stacks-common/src/util/log.rs @@ -224,8 +224,7 @@ fn make_logger() -> Logger { let decorator = get_decorator(); let atty = isatty(Stream::Stderr); let drain = TermFormat::new(decorator, pretty_print, debug, atty); - let logger = Logger::root(drain.ignore_res(), o!()); - logger + Logger::root(drain.ignore_res(), o!()) } } diff --git a/stacks-common/src/util/macros.rs b/stacks-common/src/util/macros.rs index b1b26ee014..4e332179e6 100644 --- a/stacks-common/src/util/macros.rs +++ b/stacks-common/src/util/macros.rs @@ -47,8 +47,8 @@ macro_rules! define_named_enum { $($Variant),*, } impl $Name { - pub const ALL: &'static [$Name] = &[$($Name::$Variant),*]; - pub const ALL_NAMES: &'static [&'static str] = &[$($VarName),*]; + pub const ALL: &[$Name] = &[$($Name::$Variant),*]; + pub const ALL_NAMES: &[&str] = &[$($VarName),*]; pub fn lookup_by_name(name: &str) -> Option { match name { @@ -113,8 +113,8 @@ macro_rules! define_versioned_named_enum_internal { } impl $Name { - pub const ALL: &'static [$Name] = &[$($Name::$Variant),*]; - pub const ALL_NAMES: &'static [&'static str] = &[$($VarName),*]; + pub const ALL: &[$Name] = &[$($Name::$Variant),*]; + pub const ALL_NAMES: &[&str] = &[$($VarName),*]; pub fn lookup_by_name(name: &str) -> Option { match name { @@ -538,7 +538,7 @@ macro_rules! impl_byte_array_newtype { /// Instantiates from a vector of bytes #[allow(dead_code)] - pub fn from_vec(inp: &Vec) -> Option<$thing> { + pub fn from_vec(inp: &[u8]) -> Option<$thing> { match inp.len() { $len => { let mut ret = [0; $len]; @@ -552,7 +552,7 @@ macro_rules! impl_byte_array_newtype { /// Instantiates from a big-endian vector of bytes, converting to host byte order #[allow(dead_code)] - pub fn from_vec_be(b: &Vec) -> Option<$thing> { + pub fn from_vec_be(b: &[u8]) -> Option<$thing> { match b.len() { $len => { let mut ret = [0; $len]; diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index a9dfc47806..95ca7eeec0 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -31,10 +31,33 @@ pub mod vrf; use std::collections::HashMap; use std::fs::File; use std::io::{BufReader, BufWriter, Write}; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::time::{SystemTime, UNIX_EPOCH}; use std::{error, fmt, thread, time}; +/// Given a relative path inside the Cargo workspace, return the absolute path +pub fn cargo_workspace

(relative_path: P) -> PathBuf +where + P: AsRef, +{ + let output = std::process::Command::new(env!("CARGO")) + .arg("locate-project") + .arg("--workspace") + .arg("--message-format=plain") + .output() + .expect("Failed to run command"); + let cargo_toml = std::str::from_utf8(&output.stdout) + .expect("Failed to parse utf8") + .trim(); + Path::new(cargo_toml) + .parent() + .expect("Failed to get parent directory") + .join(relative_path) +} + +#[cfg(any(test, feature = "testing"))] +pub mod tests; + pub fn get_epoch_time_secs() -> u64 { let start = SystemTime::now(); let since_the_epoch = start diff --git a/stacks-common/src/util/pipe.rs b/stacks-common/src/util/pipe.rs index bb7482f949..86d92abd61 100644 --- a/stacks-common/src/util/pipe.rs +++ b/stacks-common/src/util/pipe.rs @@ -25,10 +25,11 @@ use crate::util::log; /// Inter-thread pipe for streaming messages, built on channels. /// Used mainly in conjunction with networking. -/// * The read endpoint lives inside the connection, and will consume data from another thread to -/// be sent out on the network. -/// * The write endpoint gets fed into calls to consensus_serialize(), to be sent out on the -/// network. +/// +/// * The read endpoint lives inside the connection and will consume data from another thread +/// to be sent out on the network. +/// * The write endpoint gets fed into calls to `consensus_serialize()` to be sent out on the +/// network. #[derive(Debug)] pub struct PipeRead { input: Receiver>, @@ -187,7 +188,7 @@ impl PipeWrite { } fn write_or_buffer(&mut self, buf: &[u8]) -> io::Result { - if buf.len() == 0 { + if buf.is_empty() { return Ok(0); } diff --git a/stacks-common/src/util/retry.rs b/stacks-common/src/util/retry.rs index d296e4ae79..e7f6c0b140 100644 --- a/stacks-common/src/util/retry.rs +++ b/stacks-common/src/util/retry.rs @@ -61,7 +61,7 @@ impl<'a, R: Read> RetryReader<'a, R> { } } -impl<'a, R: Read> Read for RetryReader<'a, R> { +impl Read for RetryReader<'_, R> { fn read(&mut self, buf: &mut [u8]) -> io::Result { let nr_buf = if self.i < self.buf.len() { // consume from inner buffer @@ -98,7 +98,7 @@ impl<'a, R: Read> BoundReader<'a, R> { } } -impl<'a, R: Read> Read for BoundReader<'a, R> { +impl Read for BoundReader<'_, R> { fn read(&mut self, buf: &mut [u8]) -> io::Result { let intended_read = self .read_so_far @@ -134,7 +134,7 @@ impl<'a, R: Read> LogReader<'a, R> { } } -impl<'a, R: Read> Read for LogReader<'a, R> { +impl Read for LogReader<'_, R> { fn read(&mut self, buf: &mut [u8]) -> io::Result { let nr = self.fd.read(buf)?; let read = buf[0..nr].to_vec(); diff --git a/stacks-common/src/util/secp256k1.rs b/stacks-common/src/util/secp256k1.rs index c3b80acac5..5c64838855 100644 --- a/stacks-common/src/util/secp256k1.rs +++ b/stacks-common/src/util/secp256k1.rs @@ -72,10 +72,10 @@ impl MessageSignature { #[cfg(any(test, feature = "testing"))] // test method for generating place-holder data - pub fn from_raw(sig: &Vec) -> MessageSignature { + pub fn from_raw(sig: &[u8]) -> MessageSignature { let mut buf = [0u8; 65]; if sig.len() < 65 { - buf.copy_from_slice(&sig[..]); + buf.copy_from_slice(sig); } else { buf.copy_from_slice(&sig[..65]); } diff --git a/stacks-common/src/util/tests.rs b/stacks-common/src/util/tests.rs new file mode 100644 index 0000000000..b87e913718 --- /dev/null +++ b/stacks-common/src/util/tests.rs @@ -0,0 +1,99 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::{Arc, Mutex}; +/// `TestFlag` is a thread-safe utility designed for managing shared state in testing scenarios. It wraps +/// a value of type `T` inside an `Arc>>`, allowing you to set and retrieve a value +/// across different parts of your codebase while ensuring thread safety. +/// +/// This structure is particularly useful when: +/// - You need a global or static variable in tests. +/// - You want to control the execution of custom test code paths by setting and checking a shared value. +/// +/// # Type Parameter +/// - `T`: The type of the value managed by the `TestFlag`. It must implement the `Default` and `Clone` traits. +/// +/// # Examples +/// +/// ```rust +/// use stacks_common::util::tests::TestFlag; +/// use std::sync::{Arc, Mutex}; +/// +/// // Create a TestFlag instance +/// let test_flag = TestFlag::default(); +/// +/// // Set a value in the test flag +/// test_flag.set("test_value".to_string()); +/// +/// // Retrieve the value +/// assert_eq!(test_flag.get(), "test_value".to_string()); +/// +/// // Reset the value to default +/// test_flag.set("".to_string()); +/// assert_eq!(test_flag.get(), "".to_string()); +/// ``` +#[derive(Clone)] +pub struct TestFlag(pub Arc>>); + +impl Default for TestFlag { + fn default() -> Self { + Self(Arc::new(Mutex::new(None))) + } +} + +impl TestFlag { + /// Sets the value of the test flag. + /// + /// This method updates the value stored inside the `TestFlag`, replacing any existing value. + /// + /// # Arguments + /// - `value`: The new value to set for the `TestFlag`. + /// + /// # Examples + /// + /// ```rust + /// let test_flag = TestFlag::default(); + /// test_flag.set(42); + /// assert_eq!(test_flag.get(), 42); + /// ``` + pub fn set(&self, value: T) { + *self.0.lock().unwrap() = Some(value); + } + + /// Retrieves the current value of the test flag. + /// + /// If no value has been set, this method returns the default value for the type `T`. + /// + /// # Returns + /// - The current value of the test flag, or the default value of `T` if none has been set. + /// + /// # Examples + /// + /// ```rust + /// let test_flag = TestFlag::default(); + /// + /// // Get the default value + /// assert_eq!(test_flag.get(), 0); // For T = i32, default is 0 + /// + /// // Set a value + /// test_flag.set(123); + /// + /// // Get the updated value + /// assert_eq!(test_flag.get(), 123); + /// ``` + pub fn get(&self) -> T { + self.0.lock().unwrap().clone().unwrap_or_default().clone() + } +} diff --git a/stacks-common/src/util/vrf.rs b/stacks-common/src/util/vrf.rs index ddfdedfaa8..0c2b2c3dad 100644 --- a/stacks-common/src/util/vrf.rs +++ b/stacks-common/src/util/vrf.rs @@ -158,10 +158,7 @@ impl VRFPublicKey { // that's what the docs say to do! let checked_pubkey = CompressedEdwardsY(pubkey_slice); - if checked_pubkey.decompress().is_none() { - // invalid - return None; - } + checked_pubkey.decompress()?; let key = ed25519_dalek::VerifyingKey::from_bytes(&pubkey_slice).ok()?; Some(VRFPublicKey(key)) @@ -432,7 +429,7 @@ impl VRF { /// * its public key (an ed25519 curve point) /// * a new private key derived from the hash of the private key /// * a truncated hash of the private key - /// Idea borroed from Algorand (https://github.com/algorand/libsodium/blob/draft-irtf-cfrg-vrf-03/src/libsodium/crypto_vrf/ietfdraft03/prove.c) + /// Idea borrowed from Algorand (https://github.com/algorand/libsodium/blob/draft-irtf-cfrg-vrf-03/src/libsodium/crypto_vrf/ietfdraft03/prove.c) fn expand_privkey(secret: &VRFPrivateKey) -> (VRFPublicKey, ed25519_Scalar, [u8; 32]) { let mut hasher = Sha512::new(); let mut h = [0u8; 64]; diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index dc5507b065..e634d73172 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,6 +11,24 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## Changed +## [3.1.0.0.3.0] + +## Added + +- Introduced the `block_proposal_max_age_secs` configuration option for signers, enabling them to automatically ignore block proposals that exceed the specified age in seconds. + +## Changed +- Improvements to the stale signer cleanup logic: deletes the prior signer if it has no remaining unprocessed blocks in its database +- Signers now listen to new block events from the stacks node to determine whether a block has been successfully appended to the chain tip + +# [3.1.0.0.2.1] + +## Added + +## Changed + +- Prevent old reward cycle signers from processing block validation response messages that do not apply to blocks from their cycle. + # [3.1.0.0.2.1] ## Added diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 3beba641f2..eb58164a6e 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -40,7 +40,7 @@ stacks-common = { path = "../stacks-common" } stackslib = { path = "../stackslib" } thiserror = { workspace = true } tiny_http = { version = "0.12", optional = true } -toml = "0.5.6" +toml = { workspace = true } tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } rand = { workspace = true } @@ -49,7 +49,7 @@ rusqlite = { workspace = true, features = ["functions"] } [dev-dependencies] clarity = { path = "../clarity", features = ["testing"] } -polynomial = "0.2.6" +stacks-common = { path = "../stacks-common", features = ["testing"] } num-traits = "0.2.18" [dependencies.serde_json] @@ -62,4 +62,4 @@ features = ["serde", "recovery"] [features] monitoring_prom = ["libsigner/monitoring_prom", "prometheus", "tiny_http"] -testing = [] \ No newline at end of file +testing = [] diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index f2f042dffb..31454c96b6 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -176,7 +176,7 @@ enum ProposedBy<'a> { CurrentSortition(&'a SortitionState), } -impl<'a> ProposedBy<'a> { +impl ProposedBy<'_> { pub fn state(&self) -> &SortitionState { match self { ProposedBy::LastSortition(x) => x, @@ -367,7 +367,7 @@ impl SortitionsView { tenure_extend.burn_view_consensus_hash != sortition_consensus_hash; let extend_timestamp = signer_db.calculate_tenure_extend_timestamp( self.config.tenure_idle_timeout, - &block, + block, false, ); let epoch_time = get_epoch_time_secs(); @@ -505,7 +505,7 @@ impl SortitionsView { /// Get the last block from the given tenure /// Returns the last locally accepted block if it is not timed out, otherwise it will return the last globally accepted block. - fn get_tenure_last_block_info( + pub fn get_tenure_last_block_info( consensus_hash: &ConsensusHash, signer_db: &SignerDb, tenure_last_block_proposal_timeout: Duration, @@ -517,7 +517,7 @@ impl SortitionsView { if let Some(local_info) = last_locally_accepted_block { if let Some(signed_over_time) = local_info.signed_self { - if signed_over_time + tenure_last_block_proposal_timeout.as_secs() + if signed_over_time.saturating_add(tenure_last_block_proposal_timeout.as_secs()) > get_epoch_time_secs() { // The last locally accepted block is not timed out, return it @@ -539,7 +539,7 @@ impl SortitionsView { /// /// The rationale here is that the signer DB can be out-of-sync with the node. For example, /// the signer may have been added to an already-running node. - fn check_tenure_change_confirms_parent( + pub fn check_tenure_change_confirms_parent( tenure_change: &TenureChangePayload, block: &NakamotoBlock, signer_db: &mut SignerDb, diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 4e9067498d..7b666d3762 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -41,11 +41,9 @@ use stacks_common::types::chainstate::StacksPrivateKey; extern crate alloc; -#[derive(Parser, Debug)] -#[command(author, version, about)] -#[command(long_version = VERSION_STRING.as_str())] - /// The CLI arguments for the stacks signer +#[derive(Parser, Debug)] +#[command(author, version, about, long_version = VERSION_STRING.as_str())] pub struct Cli { /// Subcommand action to take #[command(subcommand)] diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index ba55bd9810..bdaa368567 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -414,6 +414,7 @@ pub(crate) mod tests { tenure_last_block_proposal_timeout: config.tenure_last_block_proposal_timeout, block_proposal_validation_timeout: config.block_proposal_validation_timeout, tenure_idle_timeout: config.tenure_idle_timeout, + block_proposal_max_age_secs: config.block_proposal_max_age_secs, } } diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 934686d1c2..0316976a4c 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -270,7 +270,7 @@ mod tests { txs: vec![], }; let tx_merkle_root = { - let txid_vecs = block + let txid_vecs: Vec<_> = block .txs .iter() .map(|tx| tx.txid().as_bytes().to_vec()) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 18412bc5f2..c100703fc9 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -21,6 +21,7 @@ use std::path::PathBuf; use std::time::Duration; use blockstack_lib::chainstate::stacks::TransactionVersion; +use blockstack_lib::net::connection::DEFAULT_BLOCK_PROPOSAL_MAX_AGE_SECS; use clarity::util::hash::to_hex; use libsigner::SignerEntries; use serde::Deserialize; @@ -138,6 +139,8 @@ pub struct SignerConfig { pub block_proposal_validation_timeout: Duration, /// How much idle time must pass before allowing a tenure extend pub tenure_idle_timeout: Duration, + /// The maximum age of a block proposal in seconds that will be processed by the signer + pub block_proposal_max_age_secs: u64, } /// The parsed configuration for the signer @@ -176,6 +179,8 @@ pub struct GlobalConfig { pub block_proposal_validation_timeout: Duration, /// How much idle time must pass before allowing a tenure extend pub tenure_idle_timeout: Duration, + /// The maximum age of a block proposal that will be processed by the signer + pub block_proposal_max_age_secs: u64, } /// Internal struct for loading up the config file @@ -213,6 +218,8 @@ struct RawConfigFile { pub block_proposal_validation_timeout_ms: Option, /// How much idle time (in seconds) must pass before a tenure extend is allowed pub tenure_idle_timeout_secs: Option, + /// The maximum age of a block proposal (in secs) that will be processed by the signer. + pub block_proposal_max_age_secs: Option, } impl RawConfigFile { @@ -310,6 +317,10 @@ impl TryFrom for GlobalConfig { .unwrap_or(TENURE_IDLE_TIMEOUT_SECS), ); + let block_proposal_max_age_secs = raw_data + .block_proposal_max_age_secs + .unwrap_or(DEFAULT_BLOCK_PROPOSAL_MAX_AGE_SECS); + Ok(Self { node_host: raw_data.node_host, endpoint, @@ -326,6 +337,7 @@ impl TryFrom for GlobalConfig { tenure_last_block_proposal_timeout, block_proposal_validation_timeout, tenure_idle_timeout, + block_proposal_max_age_secs, }) } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 11faadf871..69dc2dd843 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -26,6 +26,8 @@ use stacks_common::{debug, error, info, warn}; use crate::chainstate::SortitionsView; use crate::client::{retry_with_exponential_backoff, ClientError, StacksClient}; use crate::config::{GlobalConfig, SignerConfig}; +#[cfg(any(test, feature = "testing"))] +use crate::v0::tests::TEST_SKIP_SIGNER_CLEANUP; use crate::Signer as SignerTrait; #[derive(thiserror::Error, Debug)] @@ -46,6 +48,8 @@ pub struct StateInfo { pub runloop_state: State, /// the current reward cycle info pub reward_cycle_info: Option, + /// The current running signers reward cycles + pub running_signers: Vec, } /// The signer result that can be sent across threads @@ -286,6 +290,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo tenure_last_block_proposal_timeout: self.config.tenure_last_block_proposal_timeout, block_proposal_validation_timeout: self.config.block_proposal_validation_timeout, tenure_idle_timeout: self.config.tenure_idle_timeout, + block_proposal_max_age_secs: self.config.block_proposal_max_age_secs, })) } @@ -420,26 +425,23 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo } fn cleanup_stale_signers(&mut self, current_reward_cycle: u64) { + #[cfg(any(test, feature = "testing"))] + if TEST_SKIP_SIGNER_CLEANUP.get() { + warn!("Skipping signer cleanup due to testing directive."); + return; + } let mut to_delete = Vec::new(); for (idx, signer) in &mut self.stacks_signers { let reward_cycle = signer.reward_cycle(); - let next_reward_cycle = reward_cycle.wrapping_add(1); - let stale = match next_reward_cycle.cmp(¤t_reward_cycle) { - std::cmp::Ordering::Less => true, // We are more than one reward cycle behind, so we are stale - std::cmp::Ordering::Equal => { - // We are the next reward cycle, so check if we were registered and have any pending blocks to process - match signer { - ConfiguredSigner::RegisteredSigner(signer) => { - !signer.has_unprocessed_blocks() - } - _ => true, - } + if reward_cycle >= current_reward_cycle { + // We are either the current or a future reward cycle, so we are not stale. + continue; + } + if let ConfiguredSigner::RegisteredSigner(signer) = signer { + if !signer.has_unprocessed_blocks() { + debug!("{signer}: Signer's tenure has completed."); + to_delete.push(*idx); } - std::cmp::Ordering::Greater => false, // We are the current reward cycle, so we are not stale - }; - if stale { - debug!("{signer}: Signer's tenure has completed."); - to_delete.push(*idx); } } for idx in to_delete { @@ -474,6 +476,11 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> if let Err(e) = res.send(vec![StateInfo { runloop_state: self.state, reward_cycle_info: self.current_reward_cycle_info, + running_signers: self + .stacks_signers + .values() + .map(|s| s.reward_cycle()) + .collect(), } .into()]) { diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 9c4c348f8e..67321c7218 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -244,18 +244,10 @@ impl BlockInfo { } match state { BlockState::Unprocessed => false, - BlockState::LocallyAccepted => { - matches!( - prev_state, - BlockState::Unprocessed | BlockState::LocallyAccepted - ) - } - BlockState::LocallyRejected => { - matches!( - prev_state, - BlockState::Unprocessed | BlockState::LocallyRejected - ) - } + BlockState::LocallyAccepted | BlockState::LocallyRejected => !matches!( + prev_state, + BlockState::GloballyRejected | BlockState::GloballyAccepted + ), BlockState::GloballyAccepted => !matches!(prev_state, BlockState::GloballyRejected), BlockState::GloballyRejected => !matches!(prev_state, BlockState::GloballyAccepted), } @@ -942,12 +934,8 @@ impl SignerDb { block_sighash: &Sha512Trunc256Sum, ts: u64, ) -> Result<(), DBError> { - let qry = "UPDATE blocks SET broadcasted = ?1, block_info = json_set(block_info, '$.state', ?2), state = ?2 WHERE signer_signature_hash = ?3"; - let args = params![ - u64_to_sql(ts)?, - BlockState::GloballyAccepted.to_string(), - block_sighash - ]; + let qry = "UPDATE blocks SET broadcasted = ?1 WHERE signer_signature_hash = ?2"; + let args = params![u64_to_sql(ts)?, block_sighash]; debug!("Marking block {} as broadcasted at {}", block_sighash, ts); self.db.execute(qry, args)?; @@ -972,22 +960,6 @@ impl SignerDb { Ok(Some(broadcasted)) } - /// Get the current state of a given block in the database - pub fn get_block_state( - &self, - block_sighash: &Sha512Trunc256Sum, - ) -> Result, DBError> { - let qry = "SELECT state FROM blocks WHERE signer_signature_hash = ?1 LIMIT 1"; - let args = params![block_sighash]; - let state_opt: Option = query_row(&self.db, qry, args)?; - let Some(state) = state_opt else { - return Ok(None); - }; - Ok(Some( - BlockState::try_from(state.as_str()).map_err(|_| DBError::Corruption)?, - )) - } - /// Return the start time (epoch time in seconds) and the processing time in milliseconds of the tenure (idenfitied by consensus_hash). fn get_tenure_times(&self, tenure: &ConsensusHash) -> Result<(u64, u64), DBError> { let query = "SELECT tenure_change, proposed_time, validation_time_ms FROM blocks WHERE consensus_hash = ?1 AND state = ?2 ORDER BY stacks_height DESC"; @@ -1145,13 +1117,6 @@ mod tests { .expect("Unable to get block from db"); assert_eq!(BlockInfo::from(block_proposal_2.clone()), block_info); - // test getting the block state - let block_state = db - .get_block_state(&block_proposal_1.block.header.signer_signature_hash()) - .unwrap() - .expect("Unable to get block state from db"); - - assert_eq!(block_state, BlockInfo::from(block_proposal_1.clone()).state); } #[test] @@ -1394,14 +1359,7 @@ mod tests { .expect("Unable to get block from db") .expect("Unable to get block from db") .state, - BlockState::GloballyAccepted - ); - assert_eq!( - db.get_last_globally_accepted_block(&block_info_1.block.header.consensus_hash) - .unwrap() - .unwrap() - .signer_signature_hash(), - block_info_1.block.header.signer_signature_hash() + BlockState::Unprocessed ); db.insert_block(&block_info_1) .expect("Unable to insert block into db a second time"); @@ -1428,7 +1386,14 @@ mod tests { assert_eq!(block.state, BlockState::LocallyAccepted); assert!(!block.check_state(BlockState::Unprocessed)); assert!(block.check_state(BlockState::LocallyAccepted)); - assert!(!block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::GloballyAccepted)); + assert!(block.check_state(BlockState::GloballyRejected)); + + block.move_to(BlockState::LocallyRejected).unwrap(); + assert!(!block.check_state(BlockState::Unprocessed)); + assert!(block.check_state(BlockState::LocallyAccepted)); + assert!(block.check_state(BlockState::LocallyRejected)); assert!(block.check_state(BlockState::GloballyAccepted)); assert!(block.check_state(BlockState::GloballyRejected)); @@ -1440,15 +1405,8 @@ mod tests { assert!(block.check_state(BlockState::GloballyAccepted)); assert!(!block.check_state(BlockState::GloballyRejected)); - // Must manually override as will not be able to move from GloballyAccepted to LocallyAccepted - block.state = BlockState::LocallyRejected; - assert!(!block.check_state(BlockState::Unprocessed)); - assert!(!block.check_state(BlockState::LocallyAccepted)); - assert!(block.check_state(BlockState::LocallyRejected)); - assert!(block.check_state(BlockState::GloballyAccepted)); - assert!(block.check_state(BlockState::GloballyRejected)); - - block.move_to(BlockState::GloballyRejected).unwrap(); + // Must manually override as will not be able to move from GloballyAccepted to GloballyRejected + block.state = BlockState::GloballyRejected; assert!(!block.check_state(BlockState::Unprocessed)); assert!(!block.check_state(BlockState::LocallyAccepted)); assert!(!block.check_state(BlockState::LocallyRejected)); diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 2037a25def..92b7a6ed53 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -452,8 +452,12 @@ fn check_sortition_timeout() { fs::create_dir_all(signer_db_dir).unwrap(); let mut signer_db = SignerDb::new(signer_db_path).unwrap(); + let block_sk = StacksPrivateKey::from_seed(&[0, 1]); + let block_pk = StacksPublicKey::from_private(&block_sk); + let block_pkh = Hash160::from_node_public_key(&block_pk); + let mut sortition = SortitionState { - miner_pkh: Hash160([0; 20]), + miner_pkh: block_pkh, miner_pubkey: None, prior_sortition: ConsensusHash([0; 20]), parent_tenure_id: ConsensusHash([0; 20]), diff --git a/stacks-signer/src/v0/mod.rs b/stacks-signer/src/v0/mod.rs index 520fb36ca1..34b363311e 100644 --- a/stacks-signer/src/v0/mod.rs +++ b/stacks-signer/src/v0/mod.rs @@ -17,6 +17,10 @@ /// The signer module for processing events pub mod signer; +#[cfg(any(test, feature = "testing"))] +/// Test specific functions for the signer module +pub mod tests; + use libsigner::v0::messages::SignerMessage; use crate::v0::signer::Signer; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 5bf8976784..fb52394771 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -24,7 +24,7 @@ use blockstack_lib::net::api::postblock_proposal::{ use blockstack_lib::util_lib::db::Error as DBError; use clarity::types::chainstate::StacksPrivateKey; use clarity::types::{PrivateKey, StacksEpochId}; -use clarity::util::hash::MerkleHashFunc; +use clarity::util::hash::{MerkleHashFunc, Sha512Trunc256Sum}; use clarity::util::secp256k1::Secp256k1PublicKey; use libsigner::v0::messages::{ BlockAccepted, BlockRejection, BlockResponse, MessageSlotID, MockProposal, MockSignature, @@ -44,29 +44,13 @@ use crate::runloop::SignerResult; use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; -#[cfg(any(test, feature = "testing"))] -/// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list -pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: std::sync::Mutex< - Option>, -> = std::sync::Mutex::new(None); - -#[cfg(any(test, feature = "testing"))] -/// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list -pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: std::sync::Mutex< - Option>, -> = std::sync::Mutex::new(None); - -#[cfg(any(test, feature = "testing"))] -/// Pause the block broadcast -pub static TEST_PAUSE_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); - -#[cfg(any(test, feature = "testing"))] -/// Skip broadcasting the block to the network -pub static TEST_SKIP_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); - /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { + /// The private key of the signer + #[cfg(any(test, feature = "testing"))] + pub private_key: StacksPrivateKey, + #[cfg(not(any(test, feature = "testing")))] /// The private key of the signer private_key: StacksPrivateKey, /// The stackerdb client @@ -92,6 +76,8 @@ pub struct Signer { pub block_proposal_validation_timeout: Duration, /// The current submitted block proposal and its submission time pub submitted_block_proposal: Option<(BlockProposal, Instant)>, + /// Maximum age of a block proposal in seconds before it is dropped without processing + pub block_proposal_max_age_secs: u64, } impl std::fmt::Display for Signer { @@ -126,6 +112,7 @@ impl SignerTrait for Signer { Some(SignerEvent::BlockValidationResponse(_)) | Some(SignerEvent::MinerMessages(..)) | Some(SignerEvent::NewBurnBlock { .. }) + | Some(SignerEvent::NewBlock { .. }) | Some(SignerEvent::StatusCheck) | None => None, Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), @@ -141,6 +128,17 @@ impl SignerTrait for Signer { debug!("{self}: No event received"); return; }; + if self.reward_cycle > current_reward_cycle + && !matches!( + event, + SignerEvent::StatusCheck | SignerEvent::NewBurnBlock { .. } + ) + { + // The reward cycle has not yet started for this signer instance + // Do not process any events other than status checks or new burn blocks + debug!("{self}: Signer reward cycle has not yet started. Ignoring event."); + return; + } match event { SignerEvent::BlockValidationResponse(block_validate_response) => { debug!("{self}: Received a block proposal result from the stacks node..."); @@ -168,21 +166,8 @@ impl SignerTrait for Signer { match message { SignerMessage::BlockProposal(block_proposal) => { #[cfg(any(test, feature = "testing"))] - if let Some(public_keys) = - &*TEST_IGNORE_ALL_BLOCK_PROPOSALS.lock().unwrap() - { - if public_keys.contains( - &stacks_common::types::chainstate::StacksPublicKey::from_private( - &self.private_key, - ), - ) { - warn!("{self}: Ignoring block proposal due to testing directive"; - "block_id" => %block_proposal.block.block_id(), - "height" => block_proposal.block.header.chain_length, - "consensus_hash" => %block_proposal.block.header.consensus_hash - ); - continue; - } + if self.test_ignore_all_block_proposals(block_proposal) { + continue; } self.handle_block_proposal( stacks_client, @@ -246,6 +231,33 @@ impl SignerTrait for Signer { }); *sortition_state = None; } + SignerEvent::NewBlock { + block_hash, + block_height, + } => { + debug!( + "{self}: Received a new block event."; + "block_hash" => %block_hash, + "block_height" => block_height + ); + if let Ok(Some(mut block_info)) = self + .signer_db + .block_lookup(block_hash) + .inspect_err(|e| warn!("{self}: Failed to load block state: {e:?}")) + { + if block_info.state == BlockState::GloballyAccepted { + // We have already globally accepted this block. Do nothing. + return; + } + if let Err(e) = block_info.mark_globally_accepted() { + warn!("{self}: Failed to mark block as globally accepted: {e:?}"); + return; + } + if let Err(e) = self.signer_db.insert_block(&block_info) { + warn!("{self}: Failed to update block state to globally accepted: {e:?}"); + } + } + } } } @@ -284,6 +296,7 @@ impl From for Signer { proposal_config, submitted_block_proposal: None, block_proposal_validation_timeout: signer_config.block_proposal_validation_timeout, + block_proposal_max_age_secs: signer_config.block_proposal_max_age_secs, } } } @@ -296,36 +309,113 @@ impl Signer { let valid = block_info.valid?; let response = if valid { debug!("{self}: Accepting block {}", block_info.block.block_id()); - let signature = self - .private_key - .sign(block_info.signer_signature_hash().bits()) - .expect("Failed to sign block"); - BlockResponse::accepted( - block_info.signer_signature_hash(), - signature, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_info.block, - true, - ), - ) + self.create_block_acceptance(&block_info.block) } else { debug!("{self}: Rejecting block {}", block_info.block.block_id()); - BlockResponse::rejected( - block_info.signer_signature_hash(), - RejectCode::RejectedInPriorRound, - &self.private_key, - self.mainnet, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_info.block, - false, - ), - ) + self.create_block_rejection(RejectCode::RejectedInPriorRound, &block_info.block) }; Some(response) } + /// Create a block acceptance response for a block + pub fn create_block_acceptance(&self, block: &NakamotoBlock) -> BlockResponse { + let signature = self + .private_key + .sign(block.header.signer_signature_hash().bits()) + .expect("Failed to sign block"); + BlockResponse::accepted( + block.header.signer_signature_hash(), + signature, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + block, + true, + ), + ) + } + /// Create a block rejection response for a block with the given reject code + pub fn create_block_rejection( + &self, + reject_code: RejectCode, + block: &NakamotoBlock, + ) -> BlockResponse { + BlockResponse::rejected( + block.header.signer_signature_hash(), + reject_code, + &self.private_key, + self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + block, + false, + ), + ) + } + /// Check if block should be rejected based on sortition state + /// Will return a BlockResponse::Rejection if the block is invalid, none otherwise. + fn check_block_against_sortition_state( + &mut self, + stacks_client: &StacksClient, + sortition_state: &mut Option, + block: &NakamotoBlock, + miner_pubkey: &Secp256k1PublicKey, + ) -> Option { + let signer_signature_hash = block.header.signer_signature_hash(); + let block_id = block.block_id(); + // Get sortition view if we don't have it + if sortition_state.is_none() { + *sortition_state = + SortitionsView::fetch_view(self.proposal_config.clone(), stacks_client) + .inspect_err(|e| { + warn!( + "{self}: Failed to update sortition view: {e:?}"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_id, + ) + }) + .ok(); + } + + // Check if proposal can be rejected now if not valid against sortition view + if let Some(sortition_state) = sortition_state { + match sortition_state.check_proposal( + stacks_client, + &mut self.signer_db, + block, + miner_pubkey, + true, + ) { + // Error validating block + Err(e) => { + warn!( + "{self}: Error checking block proposal: {e:?}"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_id, + ); + Some(self.create_block_rejection(RejectCode::ConnectivityIssues, block)) + } + // Block proposal is bad + Ok(false) => { + warn!( + "{self}: Block proposal invalid"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_id, + ); + Some(self.create_block_rejection(RejectCode::SortitionViewMismatch, block)) + } + // Block proposal passed check, still don't know if valid + Ok(true) => None, + } + } else { + warn!( + "{self}: Cannot validate block, no sortition view"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_id, + ); + Some(self.create_block_rejection(RejectCode::NoSortitionView, block)) + } + } + /// Handle block proposal messages submitted to signers stackerdb fn handle_block_proposal( &mut self, @@ -344,14 +434,28 @@ impl Signer { return; } + if block_proposal + .block + .header + .timestamp + .saturating_add(self.block_proposal_max_age_secs) + < get_epoch_time_secs() + { + // Block is too old. Drop it with a warning. Don't even bother broadcasting to the node. + warn!("{self}: Received a block proposal that is more than {} secs old. Ignoring...", self.block_proposal_max_age_secs; + "signer_sighash" => %block_proposal.block.header.signer_signature_hash(), + "block_id" => %block_proposal.block.block_id(), + "block_height" => block_proposal.block.header.chain_length, + "burn_height" => block_proposal.burn_height, + "timestamp" => block_proposal.block.header.timestamp, + ); + return; + } + // TODO: should add a check to ignore an old burn block height if we know its outdated. Would require us to store the burn block height we last saw on the side. // the signer needs to be able to determine whether or not the block they're about to sign would conflict with an already-signed Stacks block let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); - if let Some(block_info) = self - .signer_db - .block_lookup(&signer_signature_hash) - .expect("Failed to connect to signer DB") - { + if let Some(block_info) = self.block_lookup_by_reward_cycle(&signer_signature_hash) { let Some(block_response) = self.determine_response(&block_info) else { // We are still waiting for a response for this block. Do nothing. debug!("{self}: Received a block proposal for a block we are already validating."; @@ -385,7 +489,10 @@ impl Signer { "burn_height" => block_proposal.burn_height, ); crate::monitoring::increment_block_proposals_received(); + #[cfg(any(test, feature = "testing"))] let mut block_info = BlockInfo::from(block_proposal.clone()); + #[cfg(not(any(test, feature = "testing")))] + let block_info = BlockInfo::from(block_proposal.clone()); // Get sortition view if we don't have it if sortition_state.is_none() { @@ -402,83 +509,19 @@ impl Signer { } // Check if proposal can be rejected now if not valid against sortition view - let block_response = if let Some(sortition_state) = sortition_state { - match sortition_state.check_proposal( - stacks_client, - &mut self.signer_db, - &block_proposal.block, - miner_pubkey, - true, - ) { - // Error validating block - Err(e) => { - warn!( - "{self}: Error checking block proposal: {e:?}"; - "signer_sighash" => %signer_signature_hash, - "block_id" => %block_proposal.block.block_id(), - ); - Some(BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::ConnectivityIssues, - &self.private_key, - self.mainnet, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_proposal.block, - false, - ), - )) - } - // Block proposal is bad - Ok(false) => { - warn!( - "{self}: Block proposal invalid"; - "signer_sighash" => %signer_signature_hash, - "block_id" => %block_proposal.block.block_id(), - ); - Some(BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::SortitionViewMismatch, - &self.private_key, - self.mainnet, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_proposal.block, - false, - ), - )) - } - // Block proposal passed check, still don't know if valid - Ok(true) => None, - } - } else { - warn!( - "{self}: Cannot validate block, no sortition view"; - "signer_sighash" => %signer_signature_hash, - "block_id" => %block_proposal.block.block_id(), - ); - Some(BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::NoSortitionView, - &self.private_key, - self.mainnet, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_proposal.block, - false, - ), - )) - }; + let block_response = self.check_block_against_sortition_state( + stacks_client, + sortition_state, + &block_proposal.block, + miner_pubkey, + ); #[cfg(any(test, feature = "testing"))] let block_response = self.test_reject_block_proposal(block_proposal, &mut block_info, block_response); if let Some(block_response) = block_response { - // We know proposal is invalid. Send rejection message, do not do further validation - if let Err(e) = block_info.mark_locally_rejected() { - warn!("{self}: Failed to mark block as locally rejected: {e:?}",); - }; + // We know proposal is invalid. Send rejection message, do not do further validation and do not store it. debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); let res = self .stackerdb @@ -504,6 +547,8 @@ impl Signer { "block_height" => block_proposal.block.header.chain_length, "burn_height" => block_proposal.burn_height, ); + #[cfg(any(test, feature = "testing"))] + self.test_stall_block_validation_submission(); match stacks_client.submit_block_for_validation(block_info.block.clone()) { Ok(_) => { self.submitted_block_proposal = @@ -543,6 +588,83 @@ impl Signer { } } } + + /// WARNING: This is an incomplete check. Do NOT call this function PRIOR to check_proposal or block_proposal validation succeeds. + /// + /// Re-verify a block's chain length against the last signed block within signerdb. + /// This is required in case a block has been approved since the initial checks of the block validation endpoint. + fn check_block_against_signer_db_state( + &mut self, + stacks_client: &StacksClient, + proposed_block: &NakamotoBlock, + ) -> Option { + let signer_signature_hash = proposed_block.header.signer_signature_hash(); + let proposed_block_consensus_hash = proposed_block.header.consensus_hash; + // If this is a tenure change block, ensure that it confirms the correct number of blocks from the parent tenure. + if let Some(tenure_change) = proposed_block.get_tenure_change_tx_payload() { + // Ensure that the tenure change block confirms the expected parent block + match SortitionsView::check_tenure_change_confirms_parent( + tenure_change, + proposed_block, + &mut self.signer_db, + stacks_client, + self.proposal_config.tenure_last_block_proposal_timeout, + ) { + Ok(true) => {} + Ok(false) => { + return Some( + self.create_block_rejection( + RejectCode::SortitionViewMismatch, + proposed_block, + ), + ) + } + Err(e) => { + warn!("{self}: Error checking block proposal: {e}"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %proposed_block.block_id() + ); + return Some( + self.create_block_rejection(RejectCode::ConnectivityIssues, proposed_block), + ); + } + } + } + + // Ensure that the block is the last block in the chain of its current tenure. + match self + .signer_db + .get_last_accepted_block(&proposed_block_consensus_hash) + { + Ok(Some(last_block_info)) => { + if proposed_block.header.chain_length <= last_block_info.block.header.chain_length { + warn!( + "Miner's block proposal does not confirm as many blocks as we expect"; + "proposed_block_consensus_hash" => %proposed_block_consensus_hash, + "proposed_block_signer_sighash" => %signer_signature_hash, + "proposed_chain_length" => proposed_block.header.chain_length, + "expected_at_least" => last_block_info.block.header.chain_length + 1, + ); + return Some(self.create_block_rejection( + RejectCode::SortitionViewMismatch, + proposed_block, + )); + } + } + Ok(_) => {} + Err(e) => { + warn!("{self}: Failed to check block against signer db: {e}"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %proposed_block.block_id() + ); + return Some( + self.create_block_rejection(RejectCode::ConnectivityIssues, proposed_block), + ); + } + } + None + } + /// Handle the block validate ok response. Returns our block response if we have one fn handle_block_validate_ok( &mut self, @@ -562,66 +684,65 @@ impl Signer { self.submitted_block_proposal = None; } // For mutability reasons, we need to take the block_info out of the map and add it back after processing - let mut block_info = match self.signer_db.block_lookup(&signer_signature_hash) { - Ok(Some(block_info)) => { - if block_info.reward_cycle != self.reward_cycle { - // We are not signing for this reward cycle. Ignore the block. - debug!( - "{self}: Received a block validation response for a different reward cycle. Ignore it."; - "requested_reward_cycle" => block_info.reward_cycle, - ); - return None; + let Some(mut block_info) = self.block_lookup_by_reward_cycle(&signer_signature_hash) else { + // We have not seen this block before. Why are we getting a response for it? + debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); + return None; + }; + if block_info.is_locally_finalized() { + debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); + return None; + } + + if let Some(block_response) = + self.check_block_against_signer_db_state(stacks_client, &block_info.block) + { + // The signer db state has changed. We no longer view this block as valid. Override the validation response. + if let Err(e) = block_info.mark_locally_rejected() { + if !block_info.has_reached_consensus() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}"); } - if block_info.is_locally_finalized() { - debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); + }; + debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); + let res = self + .stackerdb + .send_message_with_retry::(block_response.into()); + + match res { + Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), + Ok(ack) if !ack.accepted => warn!( + "{self}: Block rejection not accepted by stacker-db: {:?}", + ack.reason + ), + Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), + } + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|e| self.handle_insert_block_error(e)); + None + } else { + if let Err(e) = block_info.mark_locally_accepted(false) { + if !block_info.has_reached_consensus() { + warn!("{self}: Failed to mark block as locally accepted: {e:?}",); return None; } - block_info - } - Ok(None) => { - // We have not seen this block before. Why are we getting a response for it? - debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); - return None; - } - Err(e) => { - error!("{self}: Failed to lookup block in signer db: {e:?}",); - return None; - } - }; - if let Err(e) = block_info.mark_locally_accepted(false) { - if !block_info.has_reached_consensus() { - warn!("{self}: Failed to mark block as locally accepted: {e:?}",); - return None; + block_info.signed_self.get_or_insert(get_epoch_time_secs()); } - block_info.signed_self.get_or_insert(get_epoch_time_secs()); - } - // Record the block validation time but do not consider stx transfers or boot contract calls - block_info.validation_time_ms = if block_validate_ok.cost.is_zero() { - Some(0) - } else { - Some(block_validate_ok.validation_time_ms) - }; - - let signature = self - .private_key - .sign(&signer_signature_hash.0) - .expect("Failed to sign block"); + // Record the block validation time but do not consider stx transfers or boot contract calls + block_info.validation_time_ms = if block_validate_ok.cost.is_zero() { + Some(0) + } else { + Some(block_validate_ok.validation_time_ms) + }; - self.signer_db - .insert_block(&block_info) - .unwrap_or_else(|e| self.handle_insert_block_error(e)); - let accepted = BlockAccepted::new( - block_info.signer_signature_hash(), - signature, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_info.block, - true, - ), - ); - // have to save the signature _after_ the block info - self.handle_block_signature(stacks_client, &accepted); - Some(BlockResponse::Accepted(accepted)) + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|e| self.handle_insert_block_error(e)); + let block_response = self.create_block_acceptance(&block_info.block); + // have to save the signature _after_ the block info + self.handle_block_signature(stacks_client, block_response.as_block_accepted()?); + Some(block_response) + } } /// Handle the block validate reject response. Returns our block response if we have one @@ -641,32 +762,15 @@ impl Signer { { self.submitted_block_proposal = None; } - let mut block_info = match self.signer_db.block_lookup(&signer_signature_hash) { - Ok(Some(block_info)) => { - if block_info.reward_cycle != self.reward_cycle { - // We are not signing for this reward cycle. Ignore the block. - debug!( - "{self}: Received a block validation response for a different reward cycle. Ignore it."; - "requested_reward_cycle" => block_info.reward_cycle, - ); - return None; - } - if block_info.is_locally_finalized() { - debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); - return None; - } - block_info - } - Ok(None) => { - // We have not seen this block before. Why are we getting a response for it? - debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); - return None; - } - Err(e) => { - error!("{self}: Failed to lookup block in signer db: {e:?}"); - return None; - } + let Some(mut block_info) = self.block_lookup_by_reward_cycle(&signer_signature_hash) else { + // We have not seen this block before. Why are we getting a response for it? + debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); + return None; }; + if block_info.is_locally_finalized() { + debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); + return None; + } if let Err(e) = block_info.mark_locally_rejected() { if !block_info.has_reached_consensus() { warn!("{self}: Failed to mark block as locally rejected: {e:?}",); @@ -742,9 +846,7 @@ impl Signer { // For mutability reasons, we need to take the block_info out of the map and add it back after processing let mut block_info = match self.signer_db.block_lookup(&signature_sighash) { Ok(Some(block_info)) => { - if block_info.state == BlockState::GloballyRejected - || block_info.state == BlockState::GloballyAccepted - { + if block_info.has_reached_consensus() { // The block has already reached consensus. return; } @@ -771,19 +873,12 @@ impl Signer { "signer_sighash" => %signature_sighash, "block_id" => %block_proposal.block.block_id(), ); - let rejection = BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::ConnectivityIssues, - &self.private_key, - self.mainnet, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_proposal.block, - false, - ), - ); + let rejection = + self.create_block_rejection(RejectCode::ConnectivityIssues, &block_proposal.block); if let Err(e) = block_info.mark_locally_rejected() { - warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + if !block_info.has_reached_consensus() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}"); + } }; debug!("{self}: Broadcasting a block response to stacks node: {rejection:?}"); let res = self @@ -828,25 +923,16 @@ impl Signer { let block_hash = &rejection.signer_signature_hash; let signature = &rejection.signature; - let mut block_info = match self.signer_db.block_lookup(block_hash) { - Ok(Some(block_info)) => { - if block_info.state == BlockState::GloballyRejected - || block_info.state == BlockState::GloballyAccepted - { - debug!("{self}: Received block rejection for a block that is already marked as {}. Ignoring...", block_info.state); - return; - } - block_info - } - Ok(None) => { - debug!("{self}: Received block rejection for a block we have not seen before. Ignoring..."); - return; - } - Err(e) => { - warn!("{self}: Failed to load block state: {e:?}",); - return; - } + let Some(mut block_info) = self.block_lookup_by_reward_cycle(block_hash) else { + debug!( + "{self}: Received block rejection for a block we have not seen before. Ignoring..." + ); + return; }; + if block_info.has_reached_consensus() { + debug!("{self}: Received block rejection for a block that is already marked as {}. Ignoring...", block_info.state); + return; + } // recover public key let Ok(public_key) = rejection.recover_public_key() else { @@ -901,7 +987,7 @@ impl Signer { // Not enough rejection signatures to make a decision return; } - debug!("{self}: {total_reject_weight}/{total_weight} signers voteed to reject the block {block_hash}"); + debug!("{self}: {total_reject_weight}/{total_weight} signers voted to reject the block {block_hash}"); if let Err(e) = block_info.mark_globally_rejected() { warn!("{self}: Failed to mark block as globally rejected: {e:?}",); } @@ -932,23 +1018,15 @@ impl Signer { "{self}: Received a block-accept signature: ({block_hash}, {signature}, {})", metadata.server_version ); - - // Have we already processed this block? - match self.signer_db.get_block_state(block_hash) { - Ok(Some(state)) => { - if state == BlockState::GloballyAccepted || state == BlockState::GloballyRejected { - debug!("{self}: Received block signature for a block that is already marked as {}. Ignoring...", state); - return; - } - } - Ok(None) => { - debug!("{self}: Received block signature for a block we have not seen before. Ignoring..."); - return; - } - Err(e) => { - warn!("{self}: Failed to load block state: {e:?}",); - return; - } + let Some(mut block_info) = self.block_lookup_by_reward_cycle(block_hash) else { + debug!( + "{self}: Received block signature for a block we have not seen before. Ignoring..." + ); + return; + }; + if block_info.has_reached_consensus() { + debug!("{self}: Received block signature for a block that is already marked as {}. Ignoring...", block_info.state); + return; } // recover public key @@ -1016,14 +1094,8 @@ impl Signer { } // have enough signatures to broadcast! - let Ok(Some(mut block_info)) = self.signer_db.block_lookup(block_hash).inspect_err(|e| { - warn!("{self}: Failed to load block {block_hash}: {e:?})"); - }) else { - warn!("{self}: No such block {block_hash}"); - return; - }; // move block to LOCALLY accepted state. - // We only mark this GLOBALLY accepted if we manage to broadcast it... + // It is only considered globally accepted IFF we receive a new block event confirming it OR see the chain tip of the node advance to it. if let Err(e) = block_info.mark_locally_accepted(true) { // Do not abort as we should still try to store the block signature threshold warn!("{self}: Failed to mark block as locally accepted: {e:?}"); @@ -1036,22 +1108,8 @@ impl Signer { panic!("{self} Failed to write block to signerdb: {e}"); }); #[cfg(any(test, feature = "testing"))] - { - if *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Block broadcast is stalled due to testing directive."; - "block_id" => %block_info.block.block_id(), - "height" => block_info.block.header.chain_length, - ); - while *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - info!("Block validation is no longer stalled due to testing directive."; - "block_id" => %block_info.block.block_id(), - "height" => block_info.block.header.chain_length, - ); - } - } + self.test_pause_block_broadcast(&block_info); + self.broadcast_signed_block(stacks_client, block_info.block, &addrs_to_sigs); if self .submitted_block_proposal @@ -1099,71 +1157,6 @@ impl Signer { } } - #[cfg(any(test, feature = "testing"))] - fn test_skip_block_broadcast(&self, block: &NakamotoBlock) -> bool { - if *TEST_SKIP_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - let block_hash = block.header.signer_signature_hash(); - warn!( - "{self}: Skipping block broadcast due to testing directive"; - "block_id" => %block.block_id(), - "height" => block.header.chain_length, - "consensus_hash" => %block.header.consensus_hash - ); - - if let Err(e) = self - .signer_db - .set_block_broadcasted(&block_hash, get_epoch_time_secs()) - { - warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); - } - return true; - } - false - } - - #[cfg(any(test, feature = "testing"))] - fn test_reject_block_proposal( - &mut self, - block_proposal: &BlockProposal, - block_info: &mut BlockInfo, - block_response: Option, - ) -> Option { - let Some(public_keys) = &*TEST_REJECT_ALL_BLOCK_PROPOSAL.lock().unwrap() else { - return block_response; - }; - if public_keys.contains( - &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), - ) { - warn!("{self}: Rejecting block proposal automatically due to testing directive"; - "block_id" => %block_proposal.block.block_id(), - "height" => block_proposal.block.header.chain_length, - "consensus_hash" => %block_proposal.block.header.consensus_hash - ); - if let Err(e) = block_info.mark_locally_rejected() { - warn!("{self}: Failed to mark block as locally rejected: {e:?}",); - }; - // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject - // as invalid since we rejected in a prior round if this crops up again) - // in case this is the first time we saw this block. Safe to do since this is testing case only. - self.signer_db - .insert_block(block_info) - .unwrap_or_else(|e| self.handle_insert_block_error(e)); - Some(BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::TestingDirective, - &self.private_key, - self.mainnet, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_proposal.block, - false, - ), - )) - } else { - None - } - } - /// Send a mock signature to stackerdb to prove we are still alive fn mock_sign(&mut self, mock_proposal: MockProposal) { info!("{self}: Mock signing mock proposal: {mock_proposal:?}"); @@ -1178,8 +1171,28 @@ impl Signer { } /// Helper for logging insert_block error - fn handle_insert_block_error(&self, e: DBError) { + pub fn handle_insert_block_error(&self, e: DBError) { error!("{self}: Failed to insert block into signer-db: {e:?}"); panic!("{self} Failed to write block to signerdb: {e}"); } + + /// Helper for getting the block info from the db while accommodating for reward cycle + pub fn block_lookup_by_reward_cycle( + &self, + block_hash: &Sha512Trunc256Sum, + ) -> Option { + let block_info = self + .signer_db + .block_lookup(block_hash) + .inspect_err(|e| { + error!("{self}: Failed to lookup block hash {block_hash} in signer db: {e:?}"); + }) + .ok() + .flatten()?; + if block_info.reward_cycle == self.reward_cycle { + Some(block_info) + } else { + None + } + } } diff --git a/stacks-signer/src/v0/tests.rs b/stacks-signer/src/v0/tests.rs new file mode 100644 index 0000000000..6109ee4f6c --- /dev/null +++ b/stacks-signer/src/v0/tests.rs @@ -0,0 +1,152 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::LazyLock; + +use blockstack_lib::chainstate::nakamoto::NakamotoBlock; +use libsigner::v0::messages::{BlockResponse, RejectCode}; +use libsigner::BlockProposal; +use slog::{slog_info, slog_warn}; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::util::get_epoch_time_secs; +use stacks_common::util::tests::TestFlag; +use stacks_common::{info, warn}; + +use super::signer::Signer; +use crate::signerdb::BlockInfo; + +/// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list +pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: LazyLock>> = + LazyLock::new(TestFlag::default); + +/// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list +pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: LazyLock>> = + LazyLock::new(TestFlag::default); + +/// A global variable that can be used to pause broadcasting the block to the network +pub static TEST_PAUSE_BLOCK_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); + +/// A global variable that can be used to skip broadcasting the block to the network +pub static TEST_SKIP_BLOCK_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); + +/// A global variable that can be used to pause the block validation submission +pub static TEST_STALL_BLOCK_VALIDATION_SUBMISSION: LazyLock> = + LazyLock::new(TestFlag::default); + +/// A global variable that can be used to prevent signer cleanup +pub static TEST_SKIP_SIGNER_CLEANUP: LazyLock> = LazyLock::new(TestFlag::default); + +impl Signer { + /// Skip the block broadcast if the TEST_SKIP_BLOCK_BROADCAST flag is set + pub fn test_skip_block_broadcast(&self, block: &NakamotoBlock) -> bool { + if TEST_SKIP_BLOCK_BROADCAST.get() { + let block_hash = block.header.signer_signature_hash(); + warn!( + "{self}: Skipping block broadcast due to testing directive"; + "block_id" => %block.block_id(), + "height" => block.header.chain_length, + "consensus_hash" => %block.header.consensus_hash + ); + + if let Err(e) = self + .signer_db + .set_block_broadcasted(&block_hash, get_epoch_time_secs()) + { + warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); + } + return true; + } + false + } + + /// Reject block proposals if the TEST_REJECT_ALL_BLOCK_PROPOSAL flag is set for the signer's public key + pub fn test_reject_block_proposal( + &mut self, + block_proposal: &BlockProposal, + block_info: &mut BlockInfo, + block_response: Option, + ) -> Option { + let public_keys = TEST_REJECT_ALL_BLOCK_PROPOSAL.get(); + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + warn!("{self}: Rejecting block proposal automatically due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + if let Err(e) = block_info.mark_locally_rejected() { + if !block_info.has_reached_consensus() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}"); + } + }; + // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject + // as invalid since we rejected in a prior round if this crops up again) + // in case this is the first time we saw this block. Safe to do since this is testing case only. + self.signer_db + .insert_block(block_info) + .unwrap_or_else(|e| self.handle_insert_block_error(e)); + Some(self.create_block_rejection(RejectCode::TestingDirective, &block_proposal.block)) + } else { + block_response + } + } + + /// Pause the block broadcast if the TEST_PAUSE_BLOCK_BROADCAST flag is set + pub fn test_pause_block_broadcast(&self, block_info: &BlockInfo) { + if TEST_PAUSE_BLOCK_BROADCAST.get() { + // Do an extra check just so we don't log EVERY time. + warn!("{self}: Block broadcast is stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + while TEST_PAUSE_BLOCK_BROADCAST.get() { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("{self}: Block validation is no longer stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + } + } + + /// Ignore block proposals if the TEST_IGNORE_ALL_BLOCK_PROPOSALS flag is set for the signer's public key + pub fn test_ignore_all_block_proposals(&self, block_proposal: &BlockProposal) -> bool { + let public_keys = TEST_IGNORE_ALL_BLOCK_PROPOSALS.get(); + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + warn!("{self}: Ignoring block proposal due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + return true; + } + false + } + + /// Stall the block validation submission if the TEST_STALL_BLOCK_VALIDATION_SUBMISSION flag is set + pub fn test_stall_block_validation_submission(&self) { + if TEST_STALL_BLOCK_VALIDATION_SUBMISSION.get() { + // Do an extra check just so we don't log EVERY time. + warn!("{self}: Block validation submission is stalled due to testing directive"); + while TEST_STALL_BLOCK_VALIDATION_SUBMISSION.get() { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + warn!("{self}: Block validation submission is no longer stalled due to testing directive. Continuing..."); + } + } +} diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index edd58c6161..cf0ae6c1f8 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -12,7 +12,7 @@ keywords = [ "stacks", "stx", "bitcoin", "crypto", "blockstack", "decentralized" readme = "README.md" resolver = "2" edition = "2021" -rust-version = "1.61" +rust-version = "1.80" [lib] name = "blockstack_lib" @@ -58,6 +58,7 @@ libstackerdb = { path = "../libstackerdb" } siphasher = "0.3.7" hashbrown = { workspace = true } rusqlite = { workspace = true } +toml = { workspace = true } [target.'cfg(not(any(target_os = "macos",target_os="windows", target_arch = "arm" )))'.dependencies] tikv-jemallocator = {workspace = true} @@ -92,7 +93,6 @@ features = ["std"] [dev-dependencies] assert-json-diff = "1.0.0" -criterion = "0.3.5" stdext = "0.3.1" stx-genesis = { path = "../stx-genesis"} clarity = { features = ["default", "testing"], path = "../clarity" } diff --git a/stackslib/conf b/stackslib/conf new file mode 120000 index 0000000000..f5306ce0c2 --- /dev/null +++ b/stackslib/conf @@ -0,0 +1 @@ +../sample/conf \ No newline at end of file diff --git a/stackslib/src/blockstack_cli.rs b/stackslib/src/blockstack_cli.rs index dbecb0393d..afb80b2f47 100644 --- a/stackslib/src/blockstack_cli.rs +++ b/stackslib/src/blockstack_cli.rs @@ -48,6 +48,7 @@ use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; use stacks_common::address::{b58, AddressHashMode}; use stacks_common::codec::{Error as CodecError, StacksMessageCodec}; use stacks_common::types::chainstate::StacksAddress; +use stacks_common::util::cargo_workspace; use stacks_common::util::hash::{hex_bytes, to_hex}; use stacks_common::util::retry::LogReader; @@ -374,7 +375,7 @@ fn handle_contract_publish( ) -> Result { let mut args = args_slice.to_vec(); - if args.len() >= 1 && args[0] == "-h" { + if !args.is_empty() && args[0] == "-h" { return Err(CliError::Message(format!("USAGE:\n {}", PUBLISH_USAGE))); } if args.len() != 5 { @@ -432,7 +433,7 @@ fn handle_contract_call( clarity_version: ClarityVersion, ) -> Result { let mut args = args_slice.to_vec(); - if args.len() >= 1 && args[0] == "-h" { + if !args.is_empty() && args[0] == "-h" { return Err(CliError::Message(format!("USAGE:\n {}", CALL_USAGE))); } if args.len() < 6 { @@ -517,7 +518,7 @@ fn handle_token_transfer( chain_id: u32, ) -> Result { let mut args = args_slice.to_vec(); - if args.len() >= 1 && args[0] == "-h" { + if !args.is_empty() && args[0] == "-h" { return Err(CliError::Message(format!( "USAGE:\n {}", TOKEN_TRANSFER_USAGE @@ -574,7 +575,7 @@ fn handle_token_transfer( } fn generate_secret_key(args: &[String], version: TransactionVersion) -> Result { - if args.len() >= 1 && args[0] == "-h" { + if !args.is_empty() && args[0] == "-h" { return Err(CliError::Message(format!("USAGE:\n {}", GENERATE_USAGE))); } @@ -605,7 +606,7 @@ fn generate_secret_key(args: &[String], version: TransactionVersion) -> Result Result { - if (args.len() >= 1 && args[0] == "-h") || args.len() != 1 { + if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { return Err(CliError::Message(format!("USAGE:\n {}", ADDRESSES_USAGE))); } @@ -644,7 +645,7 @@ fn get_addresses(args: &[String], version: TransactionVersion) -> Result Result { - if (args.len() >= 1 && args[0] == "-h") || args.len() != 1 { + if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { return Err(CliError::Message(format!( "Usage: {}\n", DECODE_TRANSACTION_USAGE @@ -682,7 +683,7 @@ fn decode_transaction(args: &[String], _version: TransactionVersion) -> Result Result { - if (args.len() >= 1 && args[0] == "-h") || args.len() != 1 { + if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { return Err(CliError::Message(format!( "Usage: {}\n", DECODE_HEADER_USAGE @@ -721,7 +722,7 @@ fn decode_header(args: &[String], _version: TransactionVersion) -> Result Result { - if (args.len() >= 1 && args[0] == "-h") || args.len() != 1 { + if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { return Err(CliError::Message(format!( "Usage: {}\n", DECODE_BLOCK_USAGE @@ -758,7 +759,7 @@ fn decode_block(args: &[String], _version: TransactionVersion) -> Result Result { - if (args.len() >= 1 && args[0] == "-h") || args.len() != 1 { + if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { return Err(CliError::Message(format!( "Usage: {}\n", DECODE_MICROBLOCK_USAGE @@ -797,7 +798,7 @@ fn decode_microblock(args: &[String], _version: TransactionVersion) -> Result Result { - if (args.len() >= 1 && args[0] == "-h") || args.len() != 1 { + if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { return Err(CliError::Message(format!( "Usage: {}\n", DECODE_MICROBLOCKS_USAGE @@ -917,7 +918,9 @@ mod test { "1", "0", "foo-contract", - "../sample-contracts/tokens.clar", + &cargo_workspace("sample/contracts/tokens.clar") + .display() + .to_string(), ]; assert!(main_handler(to_string_vec(&publish_args)).is_ok()); @@ -928,7 +931,9 @@ mod test { "1", "0", "foo-contract", - "../sample-contracts/non-existent-tokens.clar", + &cargo_workspace("sample/contracts/non-existent-tokens.clar") + .display() + .to_string(), ]; assert!(format!( diff --git a/stackslib/src/burnchains/affirmation.rs b/stackslib/src/burnchains/affirmation.rs index fc7398c9ff..88ad745800 100644 --- a/stackslib/src/burnchains/affirmation.rs +++ b/stackslib/src/burnchains/affirmation.rs @@ -378,6 +378,10 @@ impl AffirmationMap { self.affirmations.len() } + pub fn is_empty(&self) -> bool { + self.affirmations.is_empty() + } + pub fn as_slice(&self) -> &[AffirmationMapEntry] { &self.affirmations } @@ -876,7 +880,7 @@ fn inner_find_heaviest_block_commit_ptr( test_debug!("ancestors = {:?}", &ancestors); test_debug!("ancestor_confirmations = {:?}", &ancestor_confirmations); - if ancestor_confirmations.len() == 0 { + if ancestor_confirmations.is_empty() { // empty prepare phase test_debug!("Prepare-phase has no block-commits"); return None; diff --git a/stackslib/src/burnchains/bitcoin/address.rs b/stackslib/src/burnchains/bitcoin/address.rs index bc5ab4b459..24e0ef8f9d 100644 --- a/stackslib/src/burnchains/bitcoin/address.rs +++ b/stackslib/src/burnchains/bitcoin/address.rs @@ -79,8 +79,8 @@ pub const ADDRESS_VERSION_TESTNET_SINGLESIG: u8 = 111; pub const ADDRESS_VERSION_TESTNET_MULTISIG: u8 = 196; // segwit hrps -pub const SEGWIT_MAINNET_HRP: &'static str = "bc"; -pub const SEGWIT_TESTNET_HRP: &'static str = "tb"; +pub const SEGWIT_MAINNET_HRP: &str = "bc"; +pub const SEGWIT_TESTNET_HRP: &str = "tb"; // segwit witnes versions pub const SEGWIT_V0: u8 = 0; @@ -234,8 +234,8 @@ impl LegacyBitcoinAddress { payload_bytes.copy_from_slice(b); Ok(LegacyBitcoinAddress { - network_id: network_id, - addrtype: addrtype, + network_id, + addrtype, bytes: Hash160(payload_bytes), }) } @@ -317,7 +317,7 @@ impl SegwitBitcoinAddress { None }?; - if quintets.len() == 0 || quintets.len() > 65 { + if quintets.is_empty() || quintets.len() > 65 { test_debug!("Invalid prog length: {}", quintets.len()); return None; } @@ -436,8 +436,8 @@ impl BitcoinAddress { my_bytes.copy_from_slice(b); Ok(BitcoinAddress::Legacy(LegacyBitcoinAddress { - network_id: network_id, - addrtype: addrtype, + network_id, + addrtype, bytes: Hash160(my_bytes), })) } @@ -478,7 +478,7 @@ impl BitcoinAddress { my_bytes.copy_from_slice(b); Some(BitcoinAddress::Legacy(LegacyBitcoinAddress { - network_id: network_id, + network_id, addrtype: LegacyBitcoinAddressType::PublicKeyHash, bytes: Hash160(my_bytes), })) @@ -492,7 +492,7 @@ impl BitcoinAddress { my_bytes.copy_from_slice(b); Some(BitcoinAddress::Legacy(LegacyBitcoinAddress { - network_id: network_id, + network_id, addrtype: LegacyBitcoinAddressType::ScriptHash, bytes: Hash160(my_bytes), })) diff --git a/stackslib/src/burnchains/bitcoin/bits.rs b/stackslib/src/burnchains/bitcoin/bits.rs index afeaefc0dc..4198bf3278 100644 --- a/stackslib/src/burnchains/bitcoin/bits.rs +++ b/stackslib/src/burnchains/bitcoin/bits.rs @@ -39,7 +39,7 @@ use crate::chainstate::stacks::{ }; /// Parse a script into its structured constituant opcodes and data and collect them -pub fn parse_script<'a>(script: &'a Script) -> Vec> { +pub fn parse_script(script: &Script) -> Vec> { // we will have to accept non-minimial pushdata since there's at least one OP_RETURN // in the transaction stream that has this property already. script.iter(false).collect() @@ -93,7 +93,7 @@ impl BitcoinTxInputStructured { segwit: bool, input_txid: (Txid, u32), ) -> Option { - if num_sigs < 1 || pubkey_pushbytes.len() < 1 || pubkey_pushbytes.len() < num_sigs { + if num_sigs < 1 || pubkey_pushbytes.is_empty() || pubkey_pushbytes.len() < num_sigs { test_debug!( "Not a multisig script: num_sigs = {}, num_pubkeys <= {}", num_sigs, @@ -136,7 +136,7 @@ impl BitcoinTxInputStructured { Some(BitcoinTxInputStructured { tx_ref: input_txid, - keys: keys, + keys, num_required: num_sigs, in_type: if segwit { BitcoinInputType::SegwitP2SH @@ -153,7 +153,7 @@ impl BitcoinTxInputStructured { pubkey_vecs: &[Vec], input_txid: (Txid, u32), ) -> Option { - if num_sigs < 1 || pubkey_vecs.len() < 1 || pubkey_vecs.len() < num_sigs { + if num_sigs < 1 || pubkey_vecs.is_empty() || pubkey_vecs.len() < num_sigs { test_debug!( "Not a multisig script: num_sigs = {}, num_pubkeys <= {}", num_sigs, @@ -184,7 +184,7 @@ impl BitcoinTxInputStructured { let tx_input = BitcoinTxInputStructured { tx_ref: input_txid, - keys: keys, + keys, num_required: num_sigs, in_type: BitcoinInputType::SegwitP2SH, }; @@ -498,7 +498,7 @@ impl BitcoinTxInputRaw { ) -> BitcoinTxInputRaw { BitcoinTxInputRaw { scriptSig: script_sig.clone().into_bytes(), - witness: witness, + witness, tx_ref: input_txid, } } diff --git a/stackslib/src/burnchains/bitcoin/blocks.rs b/stackslib/src/burnchains/bitcoin/blocks.rs index 0cee9e60e6..d261dd07c5 100644 --- a/stackslib/src/burnchains/bitcoin/blocks.rs +++ b/stackslib/src/burnchains/bitcoin/blocks.rs @@ -150,9 +150,7 @@ impl BitcoinMessageHandler for BitcoinBlockDownloader { None => panic!("No block header set"), Some(ref ipc_header) => { let block_hash = ipc_header.block_header.header.bitcoin_hash().clone(); - indexer - .send_getdata(&vec![block_hash]) - .and_then(|_r| Ok(true)) + indexer.send_getdata(&vec![block_hash]).map(|_r| true) } } } @@ -231,7 +229,7 @@ impl BitcoinBlockParser { /// New block parser pub fn new(network_id: BitcoinNetworkType, magic_bytes: MagicBytes) -> BitcoinBlockParser { BitcoinBlockParser { - network_id: network_id, + network_id, magic_bytes: magic_bytes.clone(), } } @@ -381,7 +379,7 @@ impl BitcoinBlockParser { tx: &Transaction, epoch_id: StacksEpochId, ) -> Option> { - if tx.output.len() == 0 { + if tx.output.is_empty() { return None; } @@ -478,7 +476,7 @@ impl BitcoinBlockParser { } BitcoinBlock { - block_height: block_height, + block_height, block_hash: BurnchainHeaderHash::from_bitcoin_hash(&block.bitcoin_hash()), parent_block_hash: BurnchainHeaderHash::from_bitcoin_hash(&block.header.prev_blockhash), txs: accepted_txs, @@ -596,7 +594,7 @@ mod tests { let header = deserialize(&header_bin.to_vec()).map_err(|_e| "failed to deserialize header")?; Ok(LoneBlockHeader { - header: header, + header, tx_count: VarInt(0), }) } @@ -655,8 +653,8 @@ mod tests { result: Some(BitcoinTransaction { data_amt: 0, txid: to_txid(&hex_bytes("185c112401590b11acdfea6bb26d2a8e37cb31f24a0c89dbb8cc14b3d6271fb1").unwrap()), - vtxindex: vtxindex, - opcode: '+' as u8, + vtxindex, + opcode: b'+', data: hex_bytes("fae543ff5672fb607fe15e16b1c3ef38737c631c7c5d911c6617993c21fba731363f1cfe").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -702,8 +700,8 @@ mod tests { result: Some(BitcoinTransaction { data_amt: 0, txid: to_txid(&hex_bytes("eb2e84a45cf411e528185a98cd5fb45ed349843a83d39fd4dff2de47adad8c8f").unwrap()), - vtxindex: vtxindex, - opcode: '~' as u8, + vtxindex, + opcode: b'~', data: hex_bytes("7061747269636b7374616e6c6579322e6964").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -745,8 +743,8 @@ mod tests { result: Some(BitcoinTransaction { data_amt: 0, txid: to_txid(&hex_bytes("b908952b30ccfdfa59985dc1ffdd2a22ef054d20fa253510d2af7797dddee459").unwrap()), - vtxindex: vtxindex, - opcode: ':' as u8, + vtxindex, + opcode: b':', data: hex_bytes("666f6f2e74657374").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -776,8 +774,8 @@ mod tests { result: Some(BitcoinTransaction { data_amt: 0, txid: to_txid(&hex_bytes("16751ca54407b922e3072830cf4be58c5562a6dc350f6703192b673c4cc86182").unwrap()), - vtxindex: vtxindex, - opcode: '?' as u8, + vtxindex, + opcode: b'?', data: hex_bytes("9fab7f294936ddb6524a48feff691ecbd0ca9e8f107d845c417a5438d1cb441e827c5126").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -826,8 +824,8 @@ mod tests { result: Some(BitcoinTransaction { data_amt: 0, txid: to_txid(&hex_bytes("185c112401590b11acdfea6bb26d2a8e37cb31f24a0c89dbb8cc14b3d6271fb1").unwrap()), - vtxindex: vtxindex, - opcode: '+' as u8, + vtxindex, + opcode: b'+', data: hex_bytes("fae543ff5672fb607fe15e16b1c3ef38737c631c7c5d911c6617993c21fba731363f1cfe").unwrap(), inputs: vec![ BitcoinTxInputRaw { @@ -864,8 +862,8 @@ mod tests { result: Some(BitcoinTransaction { data_amt: 0, txid: to_txid(&hex_bytes("eb2e84a45cf411e528185a98cd5fb45ed349843a83d39fd4dff2de47adad8c8f").unwrap()), - vtxindex: vtxindex, - opcode: '~' as u8, + vtxindex, + opcode: b'~', data: hex_bytes("7061747269636b7374616e6c6579322e6964").unwrap(), inputs: vec![ BitcoinTxInputRaw { @@ -897,8 +895,8 @@ mod tests { result: Some(BitcoinTransaction { data_amt: 0, txid: to_txid(&hex_bytes("b908952b30ccfdfa59985dc1ffdd2a22ef054d20fa253510d2af7797dddee459").unwrap()), - vtxindex: vtxindex, - opcode: ':' as u8, + vtxindex, + opcode: b':', data: hex_bytes("666f6f2e74657374").unwrap(), inputs: vec![ BitcoinTxInputRaw { @@ -928,8 +926,8 @@ mod tests { result: Some(BitcoinTransaction { data_amt: 0, txid: to_txid(&hex_bytes("16751ca54407b922e3072830cf4be58c5562a6dc350f6703192b673c4cc86182").unwrap()), - vtxindex: vtxindex, - opcode: '?' as u8, + vtxindex, + opcode: b'?', data: hex_bytes("9fab7f294936ddb6524a48feff691ecbd0ca9e8f107d845c417a5438d1cb441e827c5126").unwrap(), inputs: vec![ BitcoinTxInputRaw { @@ -961,8 +959,8 @@ mod tests { result: Some(BitcoinTransaction { data_amt: 0, txid: to_txid(&hex_bytes("8b8a12909d48fd86c06e92270133d320498fb36caa0fdcb3292a8bba99669ebd").unwrap()), - vtxindex: vtxindex, - opcode: '&' as u8, + vtxindex, + opcode: b'&', data: hex_bytes("0000cd73fa046543210000000000aa000174657374").unwrap(), inputs: vec![ BitcoinTxInputRaw { @@ -1039,7 +1037,7 @@ mod tests { // NAME_REGISTRATION with segwit p2wpkh-p2sh input txid: to_txid(&hex_bytes("b908952b30ccfdfa59985dc1ffdd2a22ef054d20fa253510d2af7797dddee459").unwrap()), vtxindex: 1, - opcode: ':' as u8, + opcode: b':', data: hex_bytes("666f6f2e74657374").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -1082,7 +1080,7 @@ mod tests { // TOKEN_TRANSFER txid: to_txid(&hex_bytes("13f2c54dbbe3d4d6ed6c9fd1a68fe3c4238ec5de50316d102a106553b57b8728").unwrap()), vtxindex: 2, - opcode: '$' as u8, + opcode: b'$', data: hex_bytes("7c503a2e30a905cb515cfbc291766dfa00000000000000000000000000535441434b530000000000000064").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -1110,7 +1108,7 @@ mod tests { // TOKEN_TRANSFER txid: to_txid(&hex_bytes("7c7c60ae8617daeb351da01d0f683633e6778eb39b69e6e652b24ca0ce230291").unwrap()), vtxindex: 4, - opcode: '$' as u8, + opcode: b'$', data: hex_bytes("7c503a2e30a905cb515cfbc291766dfa00000000000000000000000000535441434b530000000000000064").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -1138,7 +1136,7 @@ mod tests { // TOKEN_TRANSFER txid: to_txid(&hex_bytes("ae1cf8b812cf28ea96c7343dc7ee9ff2d8dfb2f441ab11c886dfcd56a0a1a2b4").unwrap()), vtxindex: 7, - opcode: '$' as u8, + opcode: b'$', data: hex_bytes("7c503a2e30a905cb515cfbc291766dfa00000000000000000000000000535441434b530000000000000064").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -1166,7 +1164,7 @@ mod tests { // TOKEN_TRANSFER txid: to_txid(&hex_bytes("12fed1db482a35dba87535a13089692cea35a71bfb159b21d0a04be41219b2bd").unwrap()), vtxindex: 10, - opcode: '$' as u8, + opcode: b'$', data: hex_bytes("7c503a2e30a905cb515cfbc291766dfa00000000000000000000000000535441434b530000000000000064").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -1194,7 +1192,7 @@ mod tests { // TOKEN_TRANSFER txid: to_txid(&hex_bytes("78035609a8733f214555cfec29e3eee1d24014863dc9f9d98092f6fbc5df63e8").unwrap()), vtxindex: 13, - opcode: '$' as u8, + opcode: b'$', data: hex_bytes("7c503a2e30a905cb515cfbc291766dfa00000000000000000000000000535441434b530000000000000064").unwrap(), inputs: vec![ BitcoinTxInputStructured { diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 83c8903d35..c99c5909ad 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -50,15 +50,15 @@ use crate::core::{ }; use crate::util_lib::db::Error as DBError; -pub const USER_AGENT: &'static str = "Stacks/2.1"; +pub const USER_AGENT: &str = "Stacks/2.1"; pub const BITCOIN_MAINNET: u32 = 0xD9B4BEF9; pub const BITCOIN_TESTNET: u32 = 0x0709110B; pub const BITCOIN_REGTEST: u32 = 0xDAB5BFFA; -pub const BITCOIN_MAINNET_NAME: &'static str = "mainnet"; -pub const BITCOIN_TESTNET_NAME: &'static str = "testnet"; -pub const BITCOIN_REGTEST_NAME: &'static str = "regtest"; +pub const BITCOIN_MAINNET_NAME: &str = "mainnet"; +pub const BITCOIN_TESTNET_NAME: &str = "testnet"; +pub const BITCOIN_REGTEST_NAME: &str = "regtest"; // batch size for searching for a reorg // kept small since sometimes bitcoin will just send us one header at a time @@ -160,7 +160,7 @@ impl BitcoinIndexerConfig { username: Some("blockstack".to_string()), password: Some("blockstacksystem".to_string()), timeout: 30, - spv_headers_path: spv_headers_path, + spv_headers_path, first_block: 0, magic_bytes: BLOCKSTACK_MAGIC_MAINNET.clone(), epochs: None, @@ -193,7 +193,7 @@ impl BitcoinIndexerRuntime { services: 0, user_agent: USER_AGENT.to_owned(), version_nonce: rng.gen(), - network_id: network_id, + network_id, block_height: 0, last_getdata_send_time: 0, last_getheaders_send_time: 0, @@ -458,7 +458,7 @@ impl BitcoinIndexer { } spv_client .run(self) - .and_then(|_r| Ok(spv_client.end_block_height.unwrap())) + .map(|_r| spv_client.end_block_height.unwrap()) } #[cfg(test)] @@ -705,7 +705,7 @@ impl BitcoinIndexer { e })?; - if reorg_headers.len() == 0 { + if reorg_headers.is_empty() { // chain shrank considerably info!( "Missing Bitcoin headers in block range {}-{} -- did the Bitcoin chain shrink?", @@ -736,7 +736,7 @@ impl BitcoinIndexer { })?; assert!( - canonical_headers.len() > 0, + !canonical_headers.is_empty(), "BUG: uninitialized canonical SPV headers DB" ); @@ -924,7 +924,7 @@ impl BitcoinIndexer { return Ok(()); } warn!( - "Header at height {} is not wihtin 2 hours of now (is at {})", + "Header at height {} is not within 2 hours of now (is at {})", highest_header_height, highest_header.block_header.header.time ); self.drop_headers(highest_header_height.saturating_sub(1))?; @@ -1379,7 +1379,7 @@ mod test { spv_client .insert_block_headers_before(start_block - 1, hdrs) .unwrap(); - } else if hdrs.len() > 0 { + } else if !hdrs.is_empty() { test_debug!("insert at {}: {:?}", 0, &hdrs); spv_client.test_write_block_headers(0, hdrs).unwrap(); } @@ -1552,7 +1552,7 @@ mod test { spv_client .insert_block_headers_before(start_block - 1, hdrs) .unwrap(); - } else if hdrs.len() > 0 { + } else if !hdrs.is_empty() { test_debug!("insert at {}: {:?}", 0, &hdrs); spv_client.test_write_block_headers(0, hdrs).unwrap(); } diff --git a/stackslib/src/burnchains/bitcoin/mod.rs b/stackslib/src/burnchains/bitcoin/mod.rs index d273b1f5f8..6ba66f524b 100644 --- a/stackslib/src/burnchains/bitcoin/mod.rs +++ b/stackslib/src/burnchains/bitcoin/mod.rs @@ -239,8 +239,8 @@ impl BitcoinBlock { block_height: height, block_hash: hash.clone(), parent_block_hash: parent.clone(), - txs: txs, - timestamp: timestamp, + txs, + timestamp, } } } diff --git a/stackslib/src/burnchains/bitcoin/network.rs b/stackslib/src/burnchains/bitcoin/network.rs index 3e8bf9340c..119c360713 100644 --- a/stackslib/src/burnchains/bitcoin/network.rs +++ b/stackslib/src/burnchains/bitcoin/network.rs @@ -45,7 +45,7 @@ impl BitcoinIndexer { pub fn send_message(&mut self, payload: btc_message::NetworkMessage) -> Result<(), btc_error> { let message = btc_message::RawNetworkMessage { magic: network_id_to_bytes(self.runtime.network_id), - payload: payload, + payload, }; self.with_socket(|ref mut sock| { @@ -127,16 +127,16 @@ impl BitcoinIndexer { // classify the message here, so we can pass it along to the handler explicitly match message { btc_message::NetworkMessage::Version(..) => { - return self.handle_version(message).and_then(|_r| Ok(true)); + return self.handle_version(message).map(|_r| true); } btc_message::NetworkMessage::Verack => { - return self.handle_verack(message).and_then(|_r| Ok(true)); + return self.handle_verack(message).map(|_r| true); } btc_message::NetworkMessage::Ping(..) => { - return self.handle_ping(message).and_then(|_r| Ok(true)); + return self.handle_ping(message).map(|_r| true); } btc_message::NetworkMessage::Pong(..) => { - return self.handle_pong(message).and_then(|_r| Ok(true)); + return self.handle_pong(message).map(|_r| true); } _ => match handler { Some(custom_handler) => custom_handler.handle_message(self, message), @@ -245,7 +245,7 @@ impl BitcoinIndexer { let payload = btc_message_network::VersionMessage { version: btc_constants::PROTOCOL_VERSION, services: 0, - timestamp: timestamp, + timestamp, receiver: remote_address, sender: sender_address, nonce: self.runtime.version_nonce, @@ -355,7 +355,7 @@ impl BitcoinIndexer { /// Send a GetData message pub fn send_getdata(&mut self, block_hashes: &Vec) -> Result<(), btc_error> { - assert!(block_hashes.len() > 0); + assert!(!block_hashes.is_empty()); let getdata_invs = block_hashes .iter() .map(|h| btc_message_blockdata::Inventory { diff --git a/stackslib/src/burnchains/bitcoin/spv.rs b/stackslib/src/burnchains/bitcoin/spv.rs index 82cbb7b7f6..8e3ceac237 100644 --- a/stackslib/src/burnchains/bitcoin/spv.rs +++ b/stackslib/src/burnchains/bitcoin/spv.rs @@ -46,23 +46,23 @@ use crate::util_lib::db::{ const BLOCK_HEADER_SIZE: u64 = 81; -pub const BITCOIN_GENESIS_BLOCK_HASH_MAINNET: &'static str = +pub const BITCOIN_GENESIS_BLOCK_HASH_MAINNET: &str = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"; -pub const BITCOIN_GENESIS_BLOCK_MERKLE_ROOT_MAINNET: &'static str = +pub const BITCOIN_GENESIS_BLOCK_MERKLE_ROOT_MAINNET: &str = "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"; -pub const BITCOIN_GENESIS_BLOCK_HASH_TESTNET: &'static str = +pub const BITCOIN_GENESIS_BLOCK_HASH_TESTNET: &str = "000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943"; -pub const BITCOIN_GENESIS_BLOCK_HASH_REGTEST: &'static str = +pub const BITCOIN_GENESIS_BLOCK_HASH_REGTEST: &str = "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"; pub const BLOCK_DIFFICULTY_CHUNK_SIZE: u64 = 2016; const BLOCK_DIFFICULTY_INTERVAL: u32 = 14 * 24 * 60 * 60; // two weeks, in seconds -pub const SPV_DB_VERSION: &'static str = "3"; +pub const SPV_DB_VERSION: &str = "3"; -const SPV_INITIAL_SCHEMA: &[&'static str] = &[ +const SPV_INITIAL_SCHEMA: &[&str] = &[ r#" CREATE TABLE headers( version INTEGER NOT NULL, @@ -81,7 +81,7 @@ const SPV_INITIAL_SCHEMA: &[&'static str] = &[ // unlike the `headers` table, this table will never be deleted from, since we use it to determine // whether or not newly-arrived headers represent a better chain than the best-known chain. The // only way to _replace_ a row is to find a header difficulty interval with a _higher_ work score. -const SPV_SCHEMA_2: &[&'static str] = &[r#" +const SPV_SCHEMA_2: &[&str] = &[r#" CREATE TABLE chain_work( interval INTEGER PRIMARY KEY, work TEXT NOT NULL -- 32-byte (256-bit) integer @@ -89,7 +89,7 @@ const SPV_SCHEMA_2: &[&'static str] = &[r#" "#]; // force the node to go and store the burnchain block header hash as well -const SPV_SCHEMA_3: &[&'static str] = &[ +const SPV_SCHEMA_3: &[&str] = &[ r#" DROP TABLE headers; "#, @@ -132,7 +132,7 @@ impl FromColumn for Sha256dHash { } impl FromRow for BlockHeader { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let version: u32 = row.get_unwrap("version"); let prev_blockhash: Sha256dHash = Sha256dHash::from_column(row, "prev_blockhash")?; let merkle_root: Sha256dHash = Sha256dHash::from_column(row, "merkle_root")?; @@ -167,9 +167,9 @@ impl SpvClient { start_block_height: start_block, end_block_height: end_block, cur_block_height: start_block, - network_id: network_id, - readwrite: readwrite, - reverse_order: reverse_order, + network_id, + readwrite, + reverse_order, headers_db: conn, check_txcount: true, }; @@ -197,9 +197,9 @@ impl SpvClient { start_block_height: start_block, end_block_height: end_block, cur_block_height: start_block, - network_id: network_id, - readwrite: readwrite, - reverse_order: reverse_order, + network_id, + readwrite, + reverse_order, headers_db: conn, check_txcount: true, }; @@ -225,7 +225,7 @@ impl SpvClient { &mut self.headers_db } - pub fn tx_begin<'a>(&'a mut self) -> Result, btc_error> { + pub fn tx_begin(&mut self) -> Result, btc_error> { if !self.readwrite { return Err(db_error::ReadOnly.into()); } @@ -274,7 +274,7 @@ impl SpvClient { tx.execute("UPDATE db_config SET version = ?1", &[version]) .map_err(db_error::SqliteError) .map_err(|e| e.into()) - .and_then(|_| Ok(())) + .map(|_| ()) } #[cfg(test)] @@ -354,11 +354,11 @@ impl SpvClient { pub fn is_initialized(&self) -> Result<(), btc_error> { fs::metadata(&self.headers_path) .map_err(btc_error::FilesystemError) - .and_then(|_m| Ok(())) + .map(|_m| ()) } /// Get the block range to scan - pub fn set_scan_range(&mut self, start_block: u64, end_block: Option) -> () { + pub fn set_scan_range(&mut self, start_block: u64, end_block: Option) { self.start_block_height = start_block; self.end_block_height = end_block; self.cur_block_height = start_block; @@ -529,7 +529,7 @@ impl SpvClient { headers: &Vec, check_txcount: bool, ) -> Result<(), btc_error> { - if headers.len() == 0 { + if headers.is_empty() { return Ok(()); } @@ -741,8 +741,8 @@ impl SpvClient { } /// Insert a block header - fn insert_block_header<'a>( - tx: &mut DBTx<'a>, + fn insert_block_header( + tx: &mut DBTx<'_>, header: BlockHeader, height: u64, ) -> Result<(), btc_error> { @@ -762,7 +762,7 @@ impl SpvClient { tx.execute(sql, args) .map_err(|e| btc_error::DBError(db_error::SqliteError(e))) - .and_then(|_x| Ok(())) + .map(|_x| ()) } /// Initialize the block headers file with the genesis block hash. @@ -945,7 +945,7 @@ impl SpvClient { ) -> Result<(), btc_error> { assert!(self.readwrite, "SPV header DB is open read-only"); - if block_headers.len() == 0 { + if block_headers.is_empty() { // no-op return Ok(()); } @@ -996,7 +996,7 @@ impl SpvClient { block_headers: Vec, ) -> Result<(), btc_error> { assert!(self.readwrite, "SPV header DB is open read-only"); - if block_headers.len() == 0 { + if block_headers.is_empty() { // no-op return Ok(()); } @@ -1137,7 +1137,7 @@ impl SpvClient { ]); let max_target_bits = BlockHeader::compact_target_from_u256(&max_target); - let parent_header = if headers_in_range.len() > 0 { + let parent_header = if !headers_in_range.is_empty() { headers_in_range[0] } else { match self.read_block_header(current_header_height - 1)? { @@ -1231,7 +1231,7 @@ impl BitcoinMessageHandler for SpvClient { indexer.runtime.last_getheaders_send_time = get_epoch_time_secs(); self.send_next_getheaders(indexer, start_height) - .and_then(|_r| Ok(true)) + .map(|_r| true) } /// Trait message handler @@ -1298,7 +1298,7 @@ impl BitcoinMessageHandler for SpvClient { ); } self.send_next_getheaders(indexer, block_height) - .and_then(|_| Ok(true)) + .map(|_| true) } x => Err(btc_error::UnhandledMessage(x)), } diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index b688097d70..caeefe538c 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -130,7 +130,7 @@ impl BurnchainStateTransition { block_total_burns.sort(); - if block_total_burns.len() == 0 { + if block_total_burns.is_empty() { return Some(0); } else if block_total_burns.len() == 1 { return Some(block_total_burns[0]); diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index d5f1e18804..1f42881ac2 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -84,7 +84,7 @@ pub struct BlockCommitMetadata { } impl FromColumn for AffirmationMap { - fn from_column<'a>(row: &'a Row, col_name: &str) -> Result { + fn from_column(row: &Row, col_name: &str) -> Result { let txt: String = row.get_unwrap(col_name); let am = AffirmationMap::decode(&txt).ok_or(DBError::ParseError)?; Ok(am) @@ -92,13 +92,13 @@ impl FromColumn for AffirmationMap { } impl FromRow for AffirmationMap { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { AffirmationMap::from_column(row, "affirmation_map") } } impl FromRow for BlockCommitMetadata { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let burn_block_hash = BurnchainHeaderHash::from_column(row, "burn_block_hash")?; let txid = Txid::from_column(row, "txid")?; let block_height = u64::from_column(row, "block_height")?; @@ -132,7 +132,7 @@ impl FromRow for BlockCommitMetadata { block_height, vtxindex, affirmation_id, - anchor_block: anchor_block, + anchor_block, anchor_block_descendant, }) } @@ -144,7 +144,7 @@ impl FromRow for BlockCommitMetadata { pub(crate) fn apply_blockstack_txs_safety_checks( block_height: u64, blockstack_txs: &mut Vec, -) -> () { +) { test_debug!( "Apply safety checks on {} txs at burnchain height {}", blockstack_txs.len(), @@ -207,9 +207,9 @@ impl FromRow for BlockstackOperationType { } } -pub const BURNCHAIN_DB_VERSION: &'static str = "2"; +pub const BURNCHAIN_DB_VERSION: &str = "2"; -const BURNCHAIN_DB_SCHEMA: &'static str = r#" +const BURNCHAIN_DB_SCHEMA: &str = r#" CREATE TABLE burnchain_db_block_headers ( -- height of the block (non-negative) block_height INTEGER NOT NULL, @@ -299,9 +299,8 @@ CREATE TABLE db_config(version TEXT NOT NULL); INSERT INTO affirmation_maps(affirmation_id,weight,affirmation_map) VALUES (0,0,""); "#; -const LAST_BURNCHAIN_DB_INDEX: &'static str = - "index_block_commit_metadata_burn_block_hash_anchor_block"; -const BURNCHAIN_DB_INDEXES: &'static [&'static str] = &[ +const LAST_BURNCHAIN_DB_INDEX: &str = "index_block_commit_metadata_burn_block_hash_anchor_block"; +const BURNCHAIN_DB_INDEXES: &[&str] = &[ "CREATE INDEX IF NOT EXISTS index_burnchain_db_block_headers_height_hash ON burnchain_db_block_headers(block_height DESC, block_hash ASC);", "CREATE INDEX IF NOT EXISTS index_burnchain_db_block_hash ON burnchain_db_block_ops(block_hash);", "CREATE INDEX IF NOT EXISTS index_burnchain_db_txid ON burnchain_db_block_ops(txid);", @@ -312,7 +311,7 @@ const BURNCHAIN_DB_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS index_block_commit_metadata_burn_block_hash_anchor_block ON block_commit_metadata(burn_block_hash,anchor_block);", ]; -impl<'a> BurnchainDBTransaction<'a> { +impl BurnchainDBTransaction<'_> { /// Store a burnchain block header into the burnchain database. /// Returns the row ID on success. pub(crate) fn store_burnchain_db_entry( @@ -452,7 +451,7 @@ impl<'a> BurnchainDBTransaction<'a> { }) .collect() }; - if commits.len() == 0 { + if commits.is_empty() { test_debug!("No block-commits for block {}", hdr.block_height); return Ok(()); } @@ -1104,9 +1103,9 @@ impl BurnchainDB { &self.conn } - pub fn tx_begin<'a>(&'a mut self) -> Result, BurnchainError> { + pub fn tx_begin(&mut self) -> Result, BurnchainError> { let sql_tx = tx_begin_immediate(&mut self.conn)?; - Ok(BurnchainDBTransaction { sql_tx: sql_tx }) + Ok(BurnchainDBTransaction { sql_tx }) } fn inner_get_canonical_chain_tip( diff --git a/stackslib/src/burnchains/tests/affirmation.rs b/stackslib/src/burnchains/tests/affirmation.rs index 8876f3d1aa..ca40fb5724 100644 --- a/stackslib/src/burnchains/tests/affirmation.rs +++ b/stackslib/src/burnchains/tests/affirmation.rs @@ -246,11 +246,11 @@ pub fn make_simple_key_register( &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a").unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: next_txid(), - vtxindex: vtxindex, - block_height: block_height, + vtxindex, + block_height, burn_header_hash: burn_header_hash.clone(), } } @@ -389,16 +389,11 @@ pub fn make_reward_cycle_with_vote( ); if let Some(ref parent_commit) = parent_commits[i].as_ref() { + assert!(parent_commit.block_height != block_commit.block_height); assert!( - parent_commit.block_height as u64 != block_commit.block_height as u64 - ); - assert!( - parent_commit.block_height as u64 - == block_commit.parent_block_ptr as u64 - ); - assert!( - parent_commit.vtxindex as u64 == block_commit.parent_vtxindex as u64 + parent_commit.block_height == u64::from(block_commit.parent_block_ptr) ); + assert!(parent_commit.vtxindex == u32::from(block_commit.parent_vtxindex)); } parent_commits[i] = Some(block_commit.clone()); @@ -418,7 +413,7 @@ pub fn make_reward_cycle_with_vote( new_commits.push(commits.clone()); commits .into_iter() - .filter_map(|cmt| cmt) + .flatten() .map(|cmt| BlockstackOperationType::LeaderBlockCommit(cmt)) .collect() }; diff --git a/stackslib/src/burnchains/tests/burnchain.rs b/stackslib/src/burnchains/tests/burnchain.rs index 8d72d4efa9..7f6be5bcf8 100644 --- a/stackslib/src/burnchains/tests/burnchain.rs +++ b/stackslib/src/burnchains/tests/burnchain.rs @@ -99,7 +99,7 @@ fn test_process_block_ops() { &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a").unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562").unwrap(), @@ -119,7 +119,7 @@ fn test_process_block_ops() { &hex_bytes("bb519494643f79f1dea0350e6fb9a1da88dfdb6137117fc2523824a8aa44fe1c").unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes( &hex_bytes("9410df84e2b440055c33acb075a0687752df63fe8fe84aeec61abe469f0448c7").unwrap(), @@ -139,7 +139,7 @@ fn test_process_block_ops() { &hex_bytes("de8af7037e522e65d2fe2d63fb1b764bfea829df78b84444338379df13144a02").unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes( &hex_bytes("eb54704f71d4a2d1128d60ffccced547054b52250ada6f3e7356165714f44d4c").unwrap(), @@ -464,11 +464,8 @@ fn test_process_block_ops() { 123, )); - let initial_snapshot = BlockSnapshot::initial( - first_block_height, - &first_burn_hash, - first_block_height as u64, - ); + let initial_snapshot = + BlockSnapshot::initial(first_block_height, &first_burn_hash, first_block_height); // process up to 124 { @@ -574,7 +571,7 @@ fn test_process_block_ops() { acc }); - let next_sortition = block_ops_124.len() > 0 && burn_total > 0; + let next_sortition = !block_ops_124.is_empty() && burn_total > 0; let mut block_124_snapshot = BlockSnapshot { accumulated_coinbase_ustx: 400_000_000, @@ -733,11 +730,8 @@ fn test_burn_snapshot_sequence() { // insert all operations let mut db = SortitionDB::connect_test(first_block_height, &first_burn_hash).unwrap(); - let mut prev_snapshot = BlockSnapshot::initial( - first_block_height, - &first_burn_hash, - first_block_height as u64, - ); + let mut prev_snapshot = + BlockSnapshot::initial(first_block_height, &first_burn_hash, first_block_height); let mut all_stacks_block_hashes = vec![]; for i in 0..32 { diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index f14243d049..c8f568b5bf 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -545,7 +545,7 @@ pub fn make_simple_block_commit( txid: next_txid(), vtxindex: 0, - block_height: block_height, + block_height, burn_parent_modulus: ((block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: burn_header.block_hash.clone(), }; @@ -602,14 +602,14 @@ fn test_get_commit_at() { for i in 0..5 { let hdr = BurnchainHeaderHash([(i + 1) as u8; 32]); let block_header = BurnchainBlockHeader { - block_height: (first_height + i) as u64, + block_height: first_height + i, block_hash: hdr, parent_block_hash: parent_block_header .as_ref() .map(|blk| blk.block_hash.clone()) .unwrap_or(first_block_header.block_hash.clone()), num_txs: 1, - timestamp: i as u64, + timestamp: i, }; headers.push(block_header.clone()); @@ -656,13 +656,13 @@ fn test_get_commit_at() { assert_eq!(cmt, cmts[4]); // fork off the last stored commit block - let fork_hdr = BurnchainHeaderHash([90 as u8; 32]); + let fork_hdr = BurnchainHeaderHash([90; 32]); let fork_block_header = BurnchainBlockHeader { block_height: 5, block_hash: fork_hdr, - parent_block_hash: BurnchainHeaderHash([4 as u8; 32]), + parent_block_hash: BurnchainHeaderHash([4; 32]), num_txs: 1, - timestamp: 4 as u64, + timestamp: 4, }; let mut fork_cmt = cmts[4].clone(); @@ -716,14 +716,14 @@ fn test_get_set_check_anchor_block() { for i in 0..5 { let hdr = BurnchainHeaderHash([(i + 1) as u8; 32]); let block_header = BurnchainBlockHeader { - block_height: (first_height + i) as u64, + block_height: first_height + i, block_hash: hdr, parent_block_hash: parent_block_header .as_ref() .map(|blk| blk.block_hash.clone()) .unwrap_or(first_block_header.block_hash.clone()), num_txs: 1, - timestamp: i as u64, + timestamp: i, }; headers.push(block_header.clone()); @@ -802,14 +802,14 @@ fn test_update_block_descendancy() { for i in 0..5 { let hdr = BurnchainHeaderHash([(i + 1) as u8; 32]); let block_header = BurnchainBlockHeader { - block_height: (first_height + i) as u64, + block_height: first_height + i, block_hash: hdr, parent_block_hash: parent_block_header .as_ref() .map(|blk| blk.block_hash.clone()) .unwrap_or(first_block_header.block_hash.clone()), num_txs: 3, - timestamp: i as u64, + timestamp: i, }; headers.push(block_header.clone()); @@ -926,14 +926,14 @@ fn test_update_block_descendancy_with_fork() { for i in 0..5 { let hdr = BurnchainHeaderHash([(i + 1) as u8; 32]); let block_header = BurnchainBlockHeader { - block_height: (first_height + i) as u64, + block_height: first_height + i, block_hash: hdr, parent_block_hash: parent_block_header .as_ref() .map(|blk| blk.block_hash.clone()) .unwrap_or(first_block_header.block_hash.clone()), num_txs: 3, - timestamp: i as u64, + timestamp: i, }; headers.push(block_header.clone()); @@ -943,14 +943,14 @@ fn test_update_block_descendancy_with_fork() { for i in 0..5 { let hdr = BurnchainHeaderHash([(i + 128 + 1) as u8; 32]); let block_header = BurnchainBlockHeader { - block_height: (first_height + i) as u64, + block_height: first_height + i, block_hash: hdr, parent_block_hash: parent_block_header .as_ref() .map(|blk| blk.block_hash.clone()) .unwrap_or(first_block_header.block_hash.clone()), num_txs: 3, - timestamp: i as u64, + timestamp: i, }; fork_headers.push(block_header.clone()); diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index c8543b1142..ab3763dac0 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -74,9 +74,9 @@ impl BurnchainBlockHeader { ) -> BurnchainBlockHeader { BurnchainBlockHeader { block_height: parent_sn.block_height + 1, - block_hash: block_hash, + block_hash, parent_block_hash: parent_sn.burn_header_hash.clone(), - num_txs: num_txs, + num_txs, timestamp: get_epoch_time_secs(), } } @@ -178,7 +178,7 @@ impl TestMiner { } pub fn next_VRF_key(&mut self) -> VRFPrivateKey { - let pk = if self.vrf_keys.len() == 0 { + let pk = if self.vrf_keys.is_empty() { // first key is simply the 32-byte hash of the secret state let mut buf: Vec = vec![]; for i in 0..self.privks.len() { @@ -204,7 +204,7 @@ impl TestMiner { } pub fn next_microblock_privkey(&mut self) -> StacksPrivateKey { - let pk = if self.microblock_privks.len() == 0 { + let pk = if self.microblock_privks.is_empty() { // first key is simply the 32-byte hash of the secret state let mut buf: Vec = vec![]; for i in 0..self.privks.len() { @@ -279,11 +279,11 @@ impl TestMiner { self.nonce } - pub fn set_nonce(&mut self, n: u64) -> () { + pub fn set_nonce(&mut self, n: u64) { self.nonce = n; } - pub fn sign_as_origin(&mut self, tx_signer: &mut StacksTransactionSigner) -> () { + pub fn sign_as_origin(&mut self, tx_signer: &mut StacksTransactionSigner) { let num_keys = if self.privks.len() < self.num_sigs as usize { self.privks.len() } else { @@ -297,7 +297,7 @@ impl TestMiner { self.nonce += 1 } - pub fn sign_as_sponsor(&mut self, tx_signer: &mut StacksTransactionSigner) -> () { + pub fn sign_as_sponsor(&mut self, tx_signer: &mut StacksTransactionSigner) { let num_keys = if self.privks.len() < self.num_sigs as usize { self.privks.len() } else { @@ -375,7 +375,7 @@ impl TestBurnchainBlock { burn_header_hash, }), ], - fork_id: fork_id, + fork_id, timestamp: get_epoch_time_secs(), } } @@ -576,7 +576,7 @@ impl TestBurnchainBlock { ) } - pub fn patch_from_chain_tip(&mut self, parent_snapshot: &BlockSnapshot) -> () { + pub fn patch_from_chain_tip(&mut self, parent_snapshot: &BlockSnapshot) { assert_eq!(parent_snapshot.block_height + 1, self.block_height); for i in 0..self.txs.len() { @@ -644,7 +644,6 @@ impl TestBurnchainBlock { } pub fn mine_pox< - 'a, T: BlockEventDispatcher, N: CoordinatorNotices, R: RewardSetProvider, @@ -655,7 +654,7 @@ impl TestBurnchainBlock { &self, db: &mut SortitionDB, burnchain: &Burnchain, - coord: &mut ChainsCoordinator<'a, T, N, R, CE, FE, B>, + coord: &mut ChainsCoordinator<'_, T, N, R, CE, FE, B>, ) -> BlockSnapshot { let mut indexer = BitcoinIndexer::new_unit_test(&burnchain.working_dir); let parent_hdr = indexer @@ -724,7 +723,7 @@ impl TestBurnchainFork { tip_index_root: start_index_root.clone(), blocks: vec![], pending_blocks: vec![], - fork_id: fork_id, + fork_id, } } @@ -734,7 +733,7 @@ impl TestBurnchainFork { new_fork } - pub fn append_block(&mut self, b: TestBurnchainBlock) -> () { + pub fn append_block(&mut self, b: TestBurnchainBlock) { self.pending_blocks.push(b); } @@ -783,7 +782,6 @@ impl TestBurnchainFork { } pub fn mine_pending_blocks_pox< - 'a, T: BlockEventDispatcher, N: CoordinatorNotices, R: RewardSetProvider, @@ -794,7 +792,7 @@ impl TestBurnchainFork { &mut self, db: &mut SortitionDB, burnchain: &Burnchain, - coord: &mut ChainsCoordinator<'a, T, N, R, CE, FE, B>, + coord: &mut ChainsCoordinator<'_, T, N, R, CE, FE, B>, ) -> BlockSnapshot { let mut snapshot = { let ic = db.index_conn(); @@ -858,7 +856,7 @@ fn process_next_sortition( let mut next_commits = vec![]; let mut next_prev_keys = vec![]; - if prev_keys.len() > 0 { + if !prev_keys.is_empty() { assert_eq!(miners.len(), prev_keys.len()); // make a Stacks block (hash) for each of the prior block's keys @@ -894,7 +892,7 @@ fn process_next_sortition( (tip_snapshot, next_prev_keys, next_commits) } -fn verify_keys_accepted(node: &mut TestBurnchainNode, prev_keys: &Vec) -> () { +fn verify_keys_accepted(node: &mut TestBurnchainNode, prev_keys: &Vec) { // all keys accepted for key in prev_keys.iter() { let tx_opt = SortitionDB::get_burnchain_transaction(node.sortdb.conn(), &key.txid).unwrap(); @@ -915,7 +913,7 @@ fn verify_keys_accepted(node: &mut TestBurnchainNode, prev_keys: &Vec, -) -> () { +) { // all commits accepted for commit in next_block_commits.iter() { let tx_opt = @@ -1035,13 +1033,13 @@ fn mine_10_stacks_blocks_2_forks_disjoint() { let mut miners_1 = vec![]; let mut miners_2 = vec![]; - let mut miners_drain = miners.drain(..); + let mut miners_iter = miners.into_iter(); for i in 0..5 { - let m = miners_drain.next().unwrap(); + let m = miners_iter.next().unwrap(); miners_1.push(m); } for i in 0..5 { - let m = miners_drain.next().unwrap(); + let m = miners_iter.next().unwrap(); miners_2.push(m); } @@ -1150,13 +1148,13 @@ fn mine_10_stacks_blocks_2_forks_disjoint_same_blocks() { let mut miners_1 = vec![]; let mut miners_2 = vec![]; - let mut miners_drain = miners.drain(..); + let mut miners_iter = miners.into_iter(); for i in 0..5 { - let m = miners_drain.next().unwrap(); + let m = miners_iter.next().unwrap(); miners_1.push(m); } for i in 0..5 { - let m = miners_drain.next().unwrap(); + let m = miners_iter.next().unwrap(); miners_2.push(m); } diff --git a/stackslib/src/chainstate/burn/db/mod.rs b/stackslib/src/chainstate/burn/db/mod.rs index cbee114603..9136a36f6d 100644 --- a/stackslib/src/chainstate/burn/db/mod.rs +++ b/stackslib/src/chainstate/burn/db/mod.rs @@ -53,7 +53,7 @@ impl_byte_array_from_column_only!(TrieHash); impl_byte_array_from_column_only!(MessageSignature); impl FromColumn for VRFPublicKey { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let pubkey_hex: String = row.get_unwrap(column_name); match VRFPublicKey::from_hex(&pubkey_hex) { Some(pubk) => Ok(pubk), @@ -63,7 +63,7 @@ impl FromColumn for VRFPublicKey { } impl FromColumn for StacksAddress { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let address_str: String = row.get_unwrap(column_name); match Self::from_string(&address_str) { Some(a) => Ok(a), @@ -73,14 +73,14 @@ impl FromColumn for StacksAddress { } impl FromColumn for PrincipalData { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let address_str: String = row.get_unwrap(column_name); Self::parse(&address_str).map_err(|_| db_error::ParseError) } } impl FromColumn for PoxAddress { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let address_str: String = row.get_unwrap(column_name); match Self::from_db_string(&address_str) { Some(a) => Ok(a), @@ -90,7 +90,7 @@ impl FromColumn for PoxAddress { } impl FromColumn for BitcoinAddress { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let address_str: String = row.get_unwrap(column_name); match Self::from_string(&address_str) { Some(a) => Ok(a), diff --git a/stackslib/src/chainstate/burn/db/processing.rs b/stackslib/src/chainstate/burn/db/processing.rs index 0aacd2816a..17e2546389 100644 --- a/stackslib/src/chainstate/burn/db/processing.rs +++ b/stackslib/src/chainstate/burn/db/processing.rs @@ -35,7 +35,7 @@ use crate::chainstate::stacks::index::{Error as MARFError, MARFValue, MarfTrieId use crate::core::INITIAL_MINING_BONUS_WINDOW; use crate::util_lib::db::Error as DBError; -impl<'a> SortitionHandleTx<'a> { +impl SortitionHandleTx<'_> { /// Run a blockstack operation's "check()" method and return the result. fn check_transaction( &mut self, @@ -379,7 +379,7 @@ mod tests { "a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a", ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index e399121e07..3d86f67e54 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -88,7 +88,7 @@ use crate::util_lib::db::{ u64_to_sql, DBConn, DBTx, Error as db_error, FromColumn, FromRow, IndexDBConn, IndexDBTx, }; -const BLOCK_HEIGHT_MAX: u64 = ((1 as u64) << 63) - 1; +const BLOCK_HEIGHT_MAX: u64 = (1 << 63) - 1; pub const REWARD_WINDOW_START: u64 = 144 * 15; pub const REWARD_WINDOW_END: u64 = 144 * 90 + REWARD_WINDOW_START; @@ -96,25 +96,25 @@ pub const REWARD_WINDOW_END: u64 = 144 * 90 + REWARD_WINDOW_START; pub type BlockHeaderCache = HashMap, ConsensusHash)>; impl FromRow for SortitionId { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { SortitionId::from_column(row, "sortition_id") } } impl FromRow for ConsensusHash { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { ConsensusHash::from_column(row, "consensus_hash") } } impl FromRow for BurnchainHeaderHash { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { BurnchainHeaderHash::from_column(row, "burn_header_hash") } } impl FromRow for MissedBlockCommit { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let intended_sortition = SortitionId::from_column(row, "intended_sortition_id")?; let input_json: String = row.get_unwrap("input"); let input = @@ -130,7 +130,7 @@ impl FromRow for MissedBlockCommit { } impl FromRow for BlockSnapshot { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let block_height = u64::from_column(row, "block_height")?; let burn_header_hash = BurnchainHeaderHash::from_column(row, "burn_header_hash")?; let burn_header_timestamp = u64::from_column(row, "burn_header_timestamp")?; @@ -211,7 +211,7 @@ impl FromRow for BlockSnapshot { } impl FromRow for LeaderKeyRegisterOp { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; let vtxindex: u32 = row.get_unwrap("vtxindex"); let block_height = u64::from_column(row, "block_height")?; @@ -225,14 +225,14 @@ impl FromRow for LeaderKeyRegisterOp { let memo = memo_bytes.to_vec(); let leader_key_row = LeaderKeyRegisterOp { - txid: txid, - vtxindex: vtxindex, - block_height: block_height, - burn_header_hash: burn_header_hash, - - consensus_hash: consensus_hash, - public_key: public_key, - memo: memo, + txid, + vtxindex, + block_height, + burn_header_hash, + + consensus_hash, + public_key, + memo, }; Ok(leader_key_row) @@ -240,7 +240,7 @@ impl FromRow for LeaderKeyRegisterOp { } impl FromRow for LeaderBlockCommitOp { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; let vtxindex: u32 = row.get_unwrap("vtxindex"); let block_height = u64::from_column(row, "block_height")?; @@ -314,7 +314,7 @@ impl FromRow for LeaderBlockCommitOp { } impl FromRow for StackStxOp { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; let vtxindex: u32 = row.get_unwrap("vtxindex"); let block_height = u64::from_column(row, "block_height")?; @@ -357,7 +357,7 @@ impl FromRow for StackStxOp { } impl FromRow for DelegateStxOp { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; let vtxindex: u32 = row.get_unwrap("vtxindex"); let block_height = u64::from_column(row, "block_height")?; @@ -389,7 +389,7 @@ impl FromRow for DelegateStxOp { } impl FromRow for TransferStxOp { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; let vtxindex: u32 = row.get_unwrap("vtxindex"); let block_height = u64::from_column(row, "block_height")?; @@ -417,7 +417,7 @@ impl FromRow for TransferStxOp { } impl FromRow for VoteForAggregateKeyOp { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; let vtxindex: u32 = row.get_unwrap("vtxindex"); let block_height = u64::from_column(row, "block_height")?; @@ -450,7 +450,7 @@ impl FromRow for VoteForAggregateKeyOp { } impl FromColumn for ASTRules { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let x: u8 = row.get_unwrap(column_name); let ast_rules = ASTRules::from_u8(x).ok_or(db_error::ParseError)?; Ok(ast_rules) @@ -458,7 +458,7 @@ impl FromColumn for ASTRules { } impl FromRow<(ASTRules, u64)> for (ASTRules, u64) { - fn from_row<'a>(row: &'a Row) -> Result<(ASTRules, u64), db_error> { + fn from_row(row: &Row) -> Result<(ASTRules, u64), db_error> { let ast_rules = ASTRules::from_column(row, "ast_rule_id")?; let height = u64::from_column(row, "block_height")?; Ok((ast_rules, height)) @@ -479,7 +479,7 @@ pub struct InitialMiningBonus { } impl FromRow for AcceptedStacksBlockHeader { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let tip_consensus_hash = ConsensusHash::from_column(row, "tip_consensus_hash")?; let consensus_hash = ConsensusHash::from_column(row, "consensus_hash")?; let block_hash = BlockHeaderHash::from_column(row, "stacks_block_hash")?; @@ -495,7 +495,7 @@ impl FromRow for AcceptedStacksBlockHeader { } impl FromRow for StacksEpoch { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let epoch_id_u32: u32 = row.get_unwrap("epoch_id"); let epoch_id = StacksEpochId::try_from(epoch_id_u32).map_err(|_| db_error::ParseError)?; @@ -515,9 +515,9 @@ impl FromRow for StacksEpoch { } } -pub const SORTITION_DB_VERSION: &'static str = "9"; +pub const SORTITION_DB_VERSION: &str = "9"; -const SORTITION_DB_INITIAL_SCHEMA: &'static [&'static str] = &[ +const SORTITION_DB_INITIAL_SCHEMA: &[&str] = &[ r#" PRAGMA foreign_keys = ON; "#, @@ -652,7 +652,7 @@ const SORTITION_DB_INITIAL_SCHEMA: &'static [&'static str] = &[ "CREATE TABLE db_config(version TEXT PRIMARY KEY);", ]; -const SORTITION_DB_SCHEMA_2: &'static [&'static str] = &[r#" +const SORTITION_DB_SCHEMA_2: &[&str] = &[r#" CREATE TABLE epochs ( start_block_height INTEGER NOT NULL, end_block_height INTEGER NOT NULL, @@ -662,7 +662,7 @@ const SORTITION_DB_SCHEMA_2: &'static [&'static str] = &[r#" PRIMARY KEY(start_block_height,epoch_id) );"#]; -const SORTITION_DB_SCHEMA_3: &'static [&'static str] = &[r#" +const SORTITION_DB_SCHEMA_3: &[&str] = &[r#" CREATE TABLE block_commit_parents ( block_commit_txid TEXT NOT NULL, block_commit_sortition_id TEXT NOT NULL, @@ -673,7 +673,7 @@ const SORTITION_DB_SCHEMA_3: &'static [&'static str] = &[r#" FOREIGN KEY(block_commit_txid,block_commit_sortition_id) REFERENCES block_commits(txid,sortition_id) );"#]; -const SORTITION_DB_SCHEMA_4: &'static [&'static str] = &[ +const SORTITION_DB_SCHEMA_4: &[&str] = &[ r#" CREATE TABLE delegate_stx ( txid TEXT NOT NULL, @@ -698,16 +698,16 @@ const SORTITION_DB_SCHEMA_4: &'static [&'static str] = &[ /// The changes for version five *just* replace the existing epochs table /// by deleting all the current entries and inserting the new epochs definition. -const SORTITION_DB_SCHEMA_5: &'static [&'static str] = &[r#" +const SORTITION_DB_SCHEMA_5: &[&str] = &[r#" DELETE FROM epochs;"#]; -const SORTITION_DB_SCHEMA_6: &'static [&'static str] = &[r#" +const SORTITION_DB_SCHEMA_6: &[&str] = &[r#" DELETE FROM epochs;"#]; -const SORTITION_DB_SCHEMA_7: &'static [&'static str] = &[r#" +const SORTITION_DB_SCHEMA_7: &[&str] = &[r#" DELETE FROM epochs;"#]; -const SORTITION_DB_SCHEMA_8: &'static [&'static str] = &[ +const SORTITION_DB_SCHEMA_8: &[&str] = &[ r#"DELETE FROM epochs;"#, r#"DROP INDEX IF EXISTS index_user_burn_support_txid;"#, r#"DROP INDEX IF EXISTS index_user_burn_support_sortition_id_vtxindex;"#, @@ -751,11 +751,11 @@ const SORTITION_DB_SCHEMA_8: &'static [&'static str] = &[ );"#, ]; -static SORTITION_DB_SCHEMA_9: &[&'static str] = +static SORTITION_DB_SCHEMA_9: &[&str] = &[r#"ALTER TABLE block_commits ADD punished TEXT DEFAULT NULL;"#]; -const LAST_SORTITION_DB_INDEX: &'static str = "index_block_commits_by_sender"; -const SORTITION_DB_INDEXES: &'static [&'static str] = &[ +const LAST_SORTITION_DB_INDEX: &str = "index_block_commits_by_sender"; +const SORTITION_DB_INDEXES: &[&str] = &[ "CREATE INDEX IF NOT EXISTS snapshots_block_hashes ON snapshots(block_height,index_root,winning_stacks_block_hash);", "CREATE INDEX IF NOT EXISTS snapshots_block_stacks_hashes ON snapshots(num_sortitions,index_root,winning_stacks_block_hash);", "CREATE INDEX IF NOT EXISTS snapshots_block_heights ON snapshots(burn_header_hash,block_height);", @@ -1533,7 +1533,7 @@ impl SortitionHandle for SortitionHandleConn<'_> { } } -impl<'a> SortitionHandleTx<'a> { +impl SortitionHandleTx<'_> { pub fn set_stacks_block_accepted( &mut self, consensus_hash: &ConsensusHash, @@ -1604,7 +1604,7 @@ impl<'a> SortitionHandleTx<'a> { anchor_block, reward_set.rewarded_addresses.len() ); - if reward_set.rewarded_addresses.len() == 0 { + if reward_set.rewarded_addresses.is_empty() { return Ok(None); } @@ -2646,7 +2646,7 @@ impl<'a> SortitionHandleConn<'a> { // Connection methods impl SortitionDB { /// Begin a transaction. - pub fn tx_begin<'a>(&'a mut self) -> Result, db_error> { + pub fn tx_begin(&mut self) -> Result, db_error> { if !self.readwrite { return Err(db_error::ReadOnly); } @@ -2663,7 +2663,7 @@ impl SortitionDB { } /// Make an indexed connection - pub fn index_conn<'a>(&'a self) -> SortitionDBConn<'a> { + pub fn index_conn(&self) -> SortitionDBConn<'_> { SortitionDBConn::new( &self.marf, SortitionDBTxContext { @@ -2739,7 +2739,7 @@ impl SortitionDB { )) } - pub fn conn<'a>(&'a self) -> &'a Connection { + pub fn conn(&self) -> &Connection { self.marf.sqlite_conn() } @@ -3556,8 +3556,8 @@ impl SortitionDB { } #[cfg(any(test, feature = "testing"))] - pub fn override_ast_rule_height<'a>( - tx: &mut DBTx<'a>, + pub fn override_ast_rule_height( + tx: &mut DBTx<'_>, ast_rules: ASTRules, height: u64, ) -> Result<(), db_error> { @@ -3587,7 +3587,7 @@ impl SortitionDB { NO_PARAMS, )?; - assert!(ast_rule_sets.len() > 0); + assert!(!ast_rule_sets.is_empty()); let mut last_height = ast_rule_sets[0].1; let mut last_rules = ast_rule_sets[0].0; for (ast_rules, ast_rule_height) in ast_rule_sets.into_iter() { @@ -3699,7 +3699,7 @@ impl SortitionDB { } } -impl<'a> SortitionDBTx<'a> { +impl SortitionDBTx<'_> { pub fn find_sortition_tip_affirmation_map( &mut self, chain_tip: &SortitionId, @@ -3720,7 +3720,7 @@ impl<'a> SortitionDBTx<'a> { } } -impl<'a> SortitionDBConn<'a> { +impl SortitionDBConn<'_> { pub fn as_handle<'b>(&'b self, chain_tip: &SortitionId) -> SortitionHandleConn<'b> { SortitionHandleConn { index: self.index, @@ -3936,7 +3936,7 @@ impl<'a> SortitionDBConn<'a> { tip, reward_cycle_id, ) - .and_then(|(reward_cycle_info, _anchor_sortition_id)| Ok(reward_cycle_info)) + .map(|(reward_cycle_info, _anchor_sortition_id)| reward_cycle_info) } /// Get the prepare phase start sortition ID of a reward cycle. This is the first prepare @@ -4048,25 +4048,23 @@ impl SortitionDB { } fn parse_last_anchor_block_hash(s: Option) -> Option { - s.map(|s| { - if s == "" { + s.and_then(|s| { + if s.is_empty() { None } else { Some(BlockHeaderHash::from_hex(&s).expect("BUG: Bad BlockHeaderHash stored in DB")) } }) - .flatten() } fn parse_last_anchor_block_txid(s: Option) -> Option { - s.map(|s| { - if s == "" { + s.and_then(|s| { + if s.is_empty() { None } else { Some(Txid::from_hex(&s).expect("BUG: Bad Txid stored in DB")) } }) - .flatten() } /// Mark a Stacks block snapshot as valid again, but update its memoized canonical Stacks tip @@ -4120,8 +4118,8 @@ impl SortitionDB { mut after: G, ) -> Result<(), BurnchainError> where - F: FnMut(&mut SortitionDBTx, &BurnchainHeaderHash, &Vec) -> (), - G: FnMut(&mut SortitionDBTx) -> (), + F: FnMut(&mut SortitionDBTx, &BurnchainHeaderHash, &Vec), + G: FnMut(&mut SortitionDBTx), { let mut db_tx = self.tx_begin()?; let mut queue = vec![burn_block.clone()]; @@ -4287,7 +4285,7 @@ impl SortitionDB { /// * `next_pox_info` - iff this sortition is the first block in a reward cycle, this should be Some /// * `announce_to` - a function that will be invoked with the calculated reward set before this method /// commits its results. This is used to post the calculated reward set to an event observer. - pub fn evaluate_sortition) -> ()>( + pub fn evaluate_sortition)>( &mut self, mainnet: bool, burn_header: &BurnchainBlockHeader, @@ -4536,8 +4534,8 @@ impl SortitionDB { burn_block_height: chain_tip.block_height, burn_block_hash: chain_tip.burn_header_hash, burn_stable_block_height: stable_block_height, - burn_stable_block_hash: burn_stable_block_hash, - last_burn_block_hashes: last_burn_block_hashes, + burn_stable_block_hash, + last_burn_block_hashes, rc_consensus_hash: chain_tip.canonical_stacks_tip_consensus_hash, }) } @@ -4593,10 +4591,10 @@ impl SortitionDB { // remove the first entry -- it's always `n` based on the way we construct it, while the // heaviest affirmation map just has nothing. - if am.len() > 0 { - Ok(AffirmationMap::new(am.as_slice()[1..].to_vec())) - } else { + if am.is_empty() { Ok(AffirmationMap::empty()) + } else { + Ok(AffirmationMap::new(am.as_slice()[1..].to_vec())) } } @@ -4719,7 +4717,7 @@ impl SortitionDB { } /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. - pub fn index_handle_at_tip<'a>(&'a self) -> SortitionHandleConn<'a> { + pub fn index_handle_at_tip(&self) -> SortitionHandleConn<'_> { let sortition_id = SortitionDB::get_canonical_sortition_tip(self.conn()).unwrap(); self.index_handle(&sortition_id) } @@ -4737,7 +4735,7 @@ impl SortitionDB { /// Open a tx handle at the burn chain tip /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. - pub fn tx_begin_at_tip<'a>(&'a mut self) -> SortitionHandleTx<'a> { + pub fn tx_begin_at_tip(&mut self) -> SortitionHandleTx<'_> { let sortition_id = SortitionDB::get_canonical_sortition_tip(self.conn()).unwrap(); self.tx_handle_begin(&sortition_id).unwrap() } @@ -5238,8 +5236,8 @@ impl SortitionDB { pub fn merge_block_header_cache( cache: &mut BlockHeaderCache, header_data: &Vec<(ConsensusHash, Option)>, - ) -> () { - if header_data.len() > 0 { + ) { + if !header_data.is_empty() { let mut i = header_data.len() - 1; while i > 0 { let cur_consensus_hash = &header_data[i].0; @@ -5394,7 +5392,7 @@ impl SortitionDB { } } -impl<'a> SortitionHandleTx<'a> { +impl SortitionHandleTx<'_> { /// Append a snapshot to a chain tip, and update various chain tip statistics. /// Returns the new state root of this fork. /// `initialize_bonus` - if Some(..), then this snapshot is the first mined snapshot, @@ -5881,7 +5879,7 @@ impl<'a> SortitionHandleTx<'a> { "SELECT 1 FROM snapshots WHERE burn_header_hash = ?1 AND pox_valid = 1 LIMIT 1", &[&snapshot.burn_header_hash], )?; - if all_valid_sortitions.len() > 0 { + if !all_valid_sortitions.is_empty() { error!("FATAL: Tried to insert snapshot {:?}, but already have pox-valid sortition for {:?}", &snapshot, &snapshot.burn_header_hash); panic!(); } @@ -6118,7 +6116,10 @@ impl<'a> SortitionHandleTx<'a> { if let Some(mut reward_set) = reward_info.known_selected_anchor_block_owned() { // record payouts separately from the remaining addresses, since some of them // could have just been consumed. - if reward_set.rewarded_addresses.len() > 0 { + if reward_set.rewarded_addresses.is_empty() { + // no payouts + pox_payout_addrs = vec![]; + } else { // if we have a reward set, then we must also have produced a recipient // info for this block let mut recipients_to_remove: Vec<_> = recipient_info @@ -6136,9 +6137,6 @@ impl<'a> SortitionHandleTx<'a> { "BUG: Attempted to remove used address from reward set, but failed to do so safely"); } pox_payout_addrs = addrs; - } else { - // no payouts - pox_payout_addrs = vec![]; } keys.push(db_keys::pox_reward_set_size().to_string()); @@ -6321,7 +6319,7 @@ impl<'a> SortitionHandleTx<'a> { } } - if tied.len() == 0 { + if tied.is_empty() { return None; } if tied.len() == 1 { @@ -6608,7 +6606,7 @@ pub mod tests { use crate::core::{StacksEpochExtension, *}; use crate::util_lib::db::Error as db_error; - impl<'a> SortitionHandleTx<'a> { + impl SortitionHandleTx<'_> { /// Update the canonical Stacks tip (testing only) pub fn test_update_canonical_stacks_tip( &mut self, @@ -7066,14 +7064,14 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") .unwrap(), ) .unwrap(), - vtxindex: vtxindex, + vtxindex, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x01; 32]), }; @@ -7145,14 +7143,14 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") .unwrap(), ) .unwrap(), - vtxindex: vtxindex, + vtxindex, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x01; 32]), }; @@ -7192,7 +7190,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - vtxindex: vtxindex, + vtxindex, block_height: block_height + 2, burn_parent_modulus: ((block_height + 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x03; 32]), @@ -7369,14 +7367,14 @@ pub mod tests { ) .unwrap(), public_key: public_key.clone(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") .unwrap(), ) .unwrap(), - vtxindex: vtxindex, + vtxindex, block_height: block_height + 2, burn_header_hash: BurnchainHeaderHash([0x03; 32]), }; @@ -7422,7 +7420,7 @@ pub mod tests { for i in 0..255 { let sortition_id = SortitionId([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, i as u8, + 0, 0, 0, 0, 0, i, ]); let parent_sortition_id = if i == 0 { last_snapshot.sortition_id.clone() @@ -7459,7 +7457,7 @@ pub mod tests { 0, 0, 0, - i - 1 as u8, + i - 1, ]) }; @@ -7471,7 +7469,7 @@ pub mod tests { burn_header_timestamp: get_epoch_time_secs(), burn_header_hash: BurnchainHeaderHash::from_bytes(&[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, i as u8, + 0, 0, 0, 0, 0, 0, i, ]) .unwrap(), sortition_id, @@ -7508,7 +7506,7 @@ pub mod tests { 0, 0, 0, - (if i == 0 { 0xff } else { i - 1 }) as u8, + (if i == 0 { 0xff } else { i - 1 }), ]) .unwrap(), consensus_hash: ConsensusHash::from_bytes(&[ @@ -7531,12 +7529,12 @@ pub mod tests { 0, 0, 0, - (i + 1) as u8, + i + 1, ]) .unwrap(), ops_hash: OpsHash::from_bytes(&[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, i as u8, + 0, 0, 0, 0, 0, 0, i, ]) .unwrap(), total_burn: i as u64, @@ -7717,7 +7715,7 @@ pub mod tests { let snapshot_row = BlockSnapshot { accumulated_coinbase_ustx: 0, pox_valid: true, - block_height: i as u64 + 1, + block_height: i + 1, burn_header_timestamp: get_epoch_time_secs(), burn_header_hash: BurnchainHeaderHash::from_bytes(&[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -7789,7 +7787,7 @@ pub mod tests { 0, 0, 0, 0, 0, 0, i as u8, ]) .unwrap(), - total_burn: i as u64, + total_burn: i, sortition: true, sortition_hash: SortitionHash::initial(), winning_block_txid: Txid::from_hex( @@ -7801,7 +7799,7 @@ pub mod tests { ) .unwrap(), index_root: TrieHash::from_empty_data(), - num_sortitions: i as u64 + 1, + num_sortitions: i + 1, stacks_block_accepted: false, stacks_block_height: 0, arrival_index: 0, @@ -7824,7 +7822,7 @@ pub mod tests { last_snapshot = snapshot_row; last_snapshot.index_root = index_root; // should succeed within the tx - let ch = tx.get_consensus_at(i as u64 + 1).unwrap().unwrap(); + let ch = tx.get_consensus_at(i + 1).unwrap().unwrap(); assert_eq!(ch, last_snapshot.consensus_hash); tx.commit().unwrap(); @@ -7864,14 +7862,14 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") .unwrap(), ) .unwrap(), - vtxindex: vtxindex, + vtxindex, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x01; 32]), }; @@ -7911,7 +7909,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - vtxindex: vtxindex, + vtxindex, block_height: block_height + 2, burn_parent_modulus: ((block_height + 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x03; 32]), @@ -7995,7 +7993,7 @@ pub mod tests { let mut snapshot_with_sortition = BlockSnapshot { accumulated_coinbase_ustx: 0, pox_valid: true, - block_height: block_height, + block_height, burn_header_timestamp: get_epoch_time_secs(), burn_header_hash: BurnchainHeaderHash::from_bytes(&[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -8719,7 +8717,7 @@ pub mod tests { 0, 0, 0, 0, 0, 0, 0, i as u8, ]) .unwrap(), - total_burn: total_burn, + total_burn, sortition: false, sortition_hash: SortitionHash([(i as u8); 32]), winning_block_txid: Txid([(i as u8); 32]), @@ -8796,7 +8794,7 @@ pub mod tests { 0, 0, 0, 0, 0, 0, 0, i as u8, ]) .unwrap(), - total_burn: total_burn, + total_burn, sortition: true, sortition_hash: SortitionHash([(i as u8); 32]), winning_block_txid: Txid([(i as u8); 32]), @@ -10080,14 +10078,14 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") .unwrap(), ) .unwrap(), - vtxindex: vtxindex, + vtxindex, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x01; 32]), }; @@ -10635,10 +10633,10 @@ pub mod tests { .map(|op| BlockstackOperationType::LeaderBlockCommit(op.clone())) }) .collect(); - let winner = if commit_set.len() > 0 { - commit_set[0].clone() - } else { + let winner = if commit_set.is_empty() { None + } else { + commit_set[0].clone() }; let burn_header_hash = headers[i + 1].block_hash.clone(); let burn_block_height = headers[i + 1].block_height; diff --git a/stackslib/src/chainstate/burn/distribution.rs b/stackslib/src/chainstate/burn/distribution.rs index 59c335cd58..0d94c7e78d 100644 --- a/stackslib/src/chainstate/burn/distribution.rs +++ b/stackslib/src/chainstate/burn/distribution.rs @@ -365,8 +365,8 @@ impl BurnSamplePoint { /// Calculate the ranges between 0 and 2**256 - 1 over which each point in the burn sample /// applies, so we can later select which block to use. - fn make_sortition_ranges(burn_sample: &mut Vec) -> () { - if burn_sample.len() == 0 { + fn make_sortition_ranges(burn_sample: &mut Vec) { + if burn_sample.is_empty() { // empty sample return; } @@ -818,7 +818,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -843,7 +843,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("9410df84e2b440055c33acb075a0687752df63fe8fe84aeec61abe469f0448c7") @@ -868,7 +868,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("eb54704f71d4a2d1128d60ffccced547054b52250ada6f3e7356165714f44d4c") diff --git a/stackslib/src/chainstate/burn/mod.rs b/stackslib/src/chainstate/burn/mod.rs index 4552210f44..4156375a5a 100644 --- a/stackslib/src/chainstate/burn/mod.rs +++ b/stackslib/src/chainstate/burn/mod.rs @@ -62,13 +62,13 @@ impl_byte_array_newtype!(SortitionHash, u8, 32); #[derive(Debug, Clone, PartialEq)] #[repr(u8)] pub enum Opcodes { - LeaderBlockCommit = '[' as u8, - LeaderKeyRegister = '^' as u8, - StackStx = 'x' as u8, - PreStx = 'p' as u8, - TransferStx = '$' as u8, - DelegateStx = '#' as u8, - VoteForAggregateKey = 'v' as u8, + LeaderBlockCommit = b'[', + LeaderKeyRegister = b'^', + StackStx = b'x', + PreStx = b'p', + TransferStx = b'$', + DelegateStx = b'#', + VoteForAggregateKey = b'v', } // a burnchain block snapshot @@ -183,17 +183,17 @@ impl SortitionHash { } impl Opcodes { - const HTTP_BLOCK_COMMIT: &'static str = "block_commit"; - const HTTP_KEY_REGISTER: &'static str = "key_register"; - const HTTP_BURN_SUPPORT: &'static str = "burn_support"; - const HTTP_STACK_STX: &'static str = "stack_stx"; - const HTTP_PRE_STX: &'static str = "pre_stx"; - const HTTP_TRANSFER_STX: &'static str = "transfer_stx"; - const HTTP_DELEGATE_STX: &'static str = "delegate_stx"; - const HTTP_PEG_IN: &'static str = "peg_in"; - const HTTP_PEG_OUT_REQUEST: &'static str = "peg_out_request"; - const HTTP_PEG_OUT_FULFILL: &'static str = "peg_out_fulfill"; - const HTTP_VOTE_FOR_AGGREGATE_KEY: &'static str = "vote_for_aggregate_key"; + const HTTP_BLOCK_COMMIT: &str = "block_commit"; + const HTTP_KEY_REGISTER: &str = "key_register"; + const HTTP_BURN_SUPPORT: &str = "burn_support"; + const HTTP_STACK_STX: &str = "stack_stx"; + const HTTP_PRE_STX: &str = "pre_stx"; + const HTTP_TRANSFER_STX: &str = "transfer_stx"; + const HTTP_DELEGATE_STX: &str = "delegate_stx"; + const HTTP_PEG_IN: &str = "peg_in"; + const HTTP_PEG_OUT_REQUEST: &str = "peg_out_request"; + const HTTP_PEG_OUT_FULFILL: &str = "peg_out_fulfill"; + const HTTP_VOTE_FOR_AGGREGATE_KEY: &str = "vote_for_aggregate_key"; pub fn to_http_str(&self) -> &'static str { match self { @@ -350,8 +350,8 @@ impl ConsensusHashExtensions for ConsensusHash { ) -> Result, db_error> { let mut i = 0; let mut prev_chs = vec![]; - while i < 64 && block_height - (((1 as u64) << i) - 1) >= first_block_height { - let prev_block: u64 = block_height - (((1 as u64) << i) - 1); + while i < 64 && block_height - ((1 << i) - 1) >= first_block_height { + let prev_block: u64 = block_height - ((1 << i) - 1); let prev_ch = sort_tx .get_consensus_at(prev_block) .unwrap_or_else(|_| { @@ -366,7 +366,7 @@ impl ConsensusHashExtensions for ConsensusHash { prev_chs.push(prev_ch.clone()); i += 1; - if block_height < (((1 as u64) << i) - 1) { + if block_height < ((1 << i) - 1) { break; } } diff --git a/stackslib/src/chainstate/burn/operations/delegate_stx.rs b/stackslib/src/chainstate/burn/operations/delegate_stx.rs index 130a42784b..ad5c268878 100644 --- a/stackslib/src/chainstate/burn/operations/delegate_stx.rs +++ b/stackslib/src/chainstate/burn/operations/delegate_stx.rs @@ -136,7 +136,7 @@ impl DelegateStxOp { return Err(op_error::InvalidInput); } - if outputs.len() == 0 { + if outputs.is_empty() { warn!( "Invalid tx: inputs: {}, outputs: {}", tx.num_signers(), @@ -230,24 +230,24 @@ impl StacksMessageCodec for DelegateStxOp { .map_err(|e| codec_error::WriteError(e))?; if let Some((index, _)) = self.reward_addr { - fd.write_all(&(1 as u8).to_be_bytes()) + fd.write_all(&1_u8.to_be_bytes()) .map_err(|e| codec_error::WriteError(e))?; fd.write_all(&index.to_be_bytes()) .map_err(|e| codec_error::WriteError(e))?; } else { - fd.write_all(&(0 as u8).to_be_bytes()) + fd.write_all(&0_u8.to_be_bytes()) .map_err(|e| codec_error::WriteError(e))?; - fd.write_all(&(0 as u32).to_be_bytes()) + fd.write_all(&0_u32.to_be_bytes()) .map_err(|e| codec_error::WriteError(e))?; } if let Some(height) = self.until_burn_height { - fd.write_all(&(1 as u8).to_be_bytes()) + fd.write_all(&1_u8.to_be_bytes()) .map_err(|e| codec_error::WriteError(e))?; fd.write_all(&height.to_be_bytes()) .map_err(|e| codec_error::WriteError(e))?; } else { - fd.write_all(&(0 as u8).to_be_bytes()) + fd.write_all(&0_u8.to_be_bytes()) .map_err(|e| codec_error::WriteError(e))?; } Ok(()) diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index a752131668..136e4d4a75 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -102,7 +102,7 @@ impl LeaderBlockCommitOp { ) -> LeaderBlockCommitOp { LeaderBlockCommitOp { sunset_burn: 0, - block_height: block_height, + block_height, burn_parent_modulus: if block_height > 0 { u8::try_from((block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) .expect("FATAL: unreachable: unable to form u8 from 3-bit number") @@ -117,7 +117,7 @@ impl LeaderBlockCommitOp { parent_block_ptr: 0, parent_vtxindex: 0, memo: vec![0x00], - burn_fee: burn_fee, + burn_fee, input: input.clone(), block_header_hash: block_header_hash.clone(), commit_outs: vec![], @@ -147,12 +147,12 @@ impl LeaderBlockCommitOp { LeaderBlockCommitOp { sunset_burn: 0, new_seed: new_seed.clone(), - key_block_ptr: key_block_ptr, - key_vtxindex: key_vtxindex, + key_block_ptr, + key_vtxindex, parent_block_ptr: parent_block_height, - parent_vtxindex: parent_vtxindex, + parent_vtxindex, memo: vec![], - burn_fee: burn_fee, + burn_fee, input: input.clone(), block_header_hash: block_header_hash.clone(), commit_outs: vec![], @@ -293,7 +293,7 @@ impl LeaderBlockCommitOp { return Err(op_error::InvalidInput); } - if outputs.len() == 0 { + if outputs.is_empty() { warn!( "Invalid tx: inputs: {}, outputs: {}", tx.num_signers(), @@ -458,7 +458,7 @@ impl LeaderBlockCommitOp { treatment: Vec::new(), txid: tx.txid(), vtxindex: tx.vtxindex(), - block_height: block_height, + block_height, burn_header_hash: block_hash.clone(), }) } @@ -832,7 +832,7 @@ impl LeaderBlockCommitOp { /// Check the epoch marker in a block-commit to make sure it matches the right epoch. /// Valid in Stacks 2.05+ fn check_epoch_commit_marker(&self, marker: u8) -> Result<(), op_error> { - if self.memo.len() < 1 { + if self.memo.is_empty() { debug!( "Invalid block commit"; "reason" => "no epoch marker byte given", @@ -860,7 +860,7 @@ impl LeaderBlockCommitOp { } StacksEpochId::Epoch20 => { // no-op, but log for helping node operators watch for old nodes - if self.memo.len() < 1 { + if self.memo.is_empty() { debug!( "Soon-to-be-invalid block commit"; "reason" => "no epoch marker byte given", @@ -1777,10 +1777,10 @@ mod tests { apparent_sender: BurnchainSigner("mgbpit8FvkVJ9kuXY8QSM5P7eibnhcEMBk".to_string()), txid: Txid::from_hex("502f3e5756de7e1bdba8c713cd2daab44adb5337d14ff668fdc57cc27d67f0d4").unwrap(), - vtxindex: vtxindex, - block_height: block_height, + vtxindex, + block_height, burn_parent_modulus: ((block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, - burn_header_hash: burn_header_hash, + burn_header_hash, treatment: vec![], }) }, OpFixture { @@ -1960,7 +1960,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -1982,7 +1982,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("9410df84e2b440055c33acb075a0687752df63fe8fe84aeec61abe469f0448c7") @@ -2070,7 +2070,7 @@ mod tests { block_height: (i + 1 + first_block_height as usize) as u64, burn_header_timestamp: get_epoch_time_secs(), burn_header_hash: block_header_hashes[i].clone(), - sortition_id: SortitionId(block_header_hashes[i as usize].0.clone()), + sortition_id: SortitionId(block_header_hashes[i].0.clone()), parent_sortition_id: prev_snapshot.sortition_id.clone(), parent_burn_header_hash: prev_snapshot.burn_header_hash.clone(), consensus_hash: ConsensusHash::from_bytes(&[ @@ -2500,7 +2500,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -2522,7 +2522,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("9410df84e2b440055c33acb075a0687752df63fe8fe84aeec61abe469f0448c7") @@ -2605,7 +2605,7 @@ mod tests { block_height: (i + 1 + first_block_height as usize) as u64, burn_header_timestamp: get_epoch_time_secs(), burn_header_hash: block_header_hashes[i].clone(), - sortition_id: SortitionId(block_header_hashes[i as usize].0.clone()), + sortition_id: SortitionId(block_header_hashes[i].0.clone()), parent_sortition_id: prev_snapshot.sortition_id.clone(), parent_burn_header_hash: prev_snapshot.burn_header_hash.clone(), consensus_hash: ConsensusHash::from_bytes(&[ @@ -3558,7 +3558,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid([0x01; 32]), vtxindex: 456, block_height: first_block_height + 1, diff --git a/stackslib/src/chainstate/burn/operations/leader_key_register.rs b/stackslib/src/chainstate/burn/operations/leader_key_register.rs index 5608b6739d..883ae5209a 100644 --- a/stackslib/src/chainstate/burn/operations/leader_key_register.rs +++ b/stackslib/src/chainstate/burn/operations/leader_key_register.rs @@ -72,7 +72,7 @@ impl LeaderKeyRegisterOp { /// Interpret the first 20 bytes of the key registration's memo field as the Hash160 of /// of the public key that will sign this miner's nakamoto blocks. pub fn interpret_nakamoto_signing_key(&self) -> Option { - self.memo.get(0..20).map(Hash160::from_bytes).flatten() + self.memo.get(0..20).and_then(Hash160::from_bytes) } /// Set the miner public key hash160 for block-signing @@ -163,7 +163,7 @@ impl LeaderKeyRegisterOp { txid: tx.txid(), vtxindex: tx.vtxindex(), - block_height: block_height, + block_height, burn_header_hash: block_hash.clone(), }) } @@ -284,11 +284,11 @@ pub mod tests { result: Some(LeaderKeyRegisterOp { consensus_hash: ConsensusHash::from_bytes(&hex_bytes("2222222222222222222222222222222222222222").unwrap()).unwrap(), public_key: VRFPublicKey::from_bytes(&hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a").unwrap()).unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be(&hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562").unwrap()).unwrap(), - vtxindex: vtxindex, - block_height: block_height, + vtxindex, + block_height, burn_header_hash: burn_header_hash.clone(), }) }, @@ -301,9 +301,9 @@ pub mod tests { memo: vec![], txid: Txid::from_bytes_be(&hex_bytes("2fbf8d5be32dce49790d203ba59acbb0929d5243413174ff5d26a5c6f23dea65").unwrap()).unwrap(), - vtxindex: vtxindex, - block_height: block_height, - burn_header_hash: burn_header_hash, + vtxindex, + block_height, + burn_header_hash, }) }, OpFixture { @@ -491,7 +491,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -627,7 +627,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes( @@ -656,7 +656,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes( diff --git a/stackslib/src/chainstate/burn/operations/stack_stx.rs b/stackslib/src/chainstate/burn/operations/stack_stx.rs index c4c54b9737..5d12c5e67f 100644 --- a/stackslib/src/chainstate/burn/operations/stack_stx.rs +++ b/stackslib/src/chainstate/burn/operations/stack_stx.rs @@ -119,7 +119,7 @@ impl PreStxOp { }; let outputs = tx.get_recipients(); - assert!(outputs.len() > 0); + assert!(!outputs.is_empty()); let output = outputs[0] .as_ref() @@ -145,7 +145,7 @@ impl PreStxOp { } Ok(PreStxOp { - output: output, + output, txid: tx.txid(), vtxindex: tx.vtxindex(), block_height, @@ -317,7 +317,7 @@ impl StackStxOp { })?; let outputs = tx.get_recipients(); - assert!(outputs.len() > 0); + assert!(!outputs.is_empty()); let first_output = outputs[0].as_ref().ok_or_else(|| { warn!("Invalid tx: failed to decode first output"); @@ -689,7 +689,7 @@ mod tests { txid: Txid([0; 32]), vtxindex: 0, opcode: Opcodes::StackStx as u8, - data: data, + data, data_amt: 0, inputs: vec![BitcoinTxInputStructured { keys: vec![], @@ -869,7 +869,7 @@ mod tests { auth_id: Some(0u32), }; let op_bytes = { - let mut bytes = ['T' as u8, '3' as u8].to_vec(); + let mut bytes = [b'T', b'3'].to_vec(); op.consensus_serialize(&mut bytes) .expect("Expected to be able to serialize op into bytes"); bytes diff --git a/stackslib/src/chainstate/burn/operations/transfer_stx.rs b/stackslib/src/chainstate/burn/operations/transfer_stx.rs index 9d1d562d9c..a36849518e 100644 --- a/stackslib/src/chainstate/burn/operations/transfer_stx.rs +++ b/stackslib/src/chainstate/burn/operations/transfer_stx.rs @@ -177,7 +177,7 @@ impl TransferStxOp { })?; let outputs = tx.get_recipients(); - assert!(outputs.len() > 0); + assert!(!outputs.is_empty()); let output = outputs[0] .as_ref() diff --git a/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs b/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs index 648859abc6..3e547366cf 100644 --- a/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs +++ b/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs @@ -340,7 +340,7 @@ mod tests { assert_eq!(vote_op.signer_index, signer_index); assert_eq!(&vote_op.aggregate_key, &aggregate_key); - assert_eq!(vote_op.round, round as u32); + assert_eq!(vote_op.round, round); assert_eq!(vote_op.reward_cycle, reward_cycle); } diff --git a/stackslib/src/chainstate/burn/sortition.rs b/stackslib/src/chainstate/burn/sortition.rs index ff71b0cf10..ff253e38e4 100644 --- a/stackslib/src/chainstate/burn/sortition.rs +++ b/stackslib/src/chainstate/burn/sortition.rs @@ -132,7 +132,7 @@ impl BlockSnapshot { VRF_seed: &VRFSeed, sortition_hash: &SortitionHash, ) -> Option { - if dist.len() == 0 { + if dist.is_empty() { // no winners return None; } @@ -254,12 +254,12 @@ impl BlockSnapshot { debug!("SORTITION({}): NO BLOCK CHOSEN", block_height); Ok(BlockSnapshot { - block_height: block_height, + block_height, burn_header_hash: block_hash, burn_header_timestamp: block_header.timestamp, parent_burn_header_hash: parent_block_hash, consensus_hash: ch, - ops_hash: ops_hash, + ops_hash, total_burn: burn_total, sortition: false, sortition_hash: sortition_hash.clone(), @@ -592,7 +592,7 @@ impl BlockSnapshot { ) }; - if state_transition.burn_dist.len() == 0 { + if state_transition.burn_dist.is_empty() { // no burns happened debug!( "No burns happened in block"; @@ -731,8 +731,7 @@ impl BlockSnapshot { winning_block.key_vtxindex.into(), &parent_snapshot.sortition_id, )? - .map(|key_op| key_op.interpret_nakamoto_signing_key()) - .flatten(); + .and_then(|key_op| key_op.interpret_nakamoto_signing_key()); Ok(BlockSnapshot { block_height, @@ -1099,18 +1098,18 @@ mod test { for i in 0..100 { let header = BurnchainBlockHeader { block_height: prev_block_header.block_height + 1, - block_hash: BurnchainHeaderHash([i as u8; 32]), + block_hash: BurnchainHeaderHash([i; 32]), parent_block_hash: prev_block_header.block_hash.clone(), num_txs: 0, - timestamp: prev_block_header.timestamp + (i as u64) + 1, + timestamp: prev_block_header.timestamp + u64::from(i) + 1, }; - let sortition_hash = SortitionHash([i as u8; 32]); + let sortition_hash = SortitionHash([i; 32]); let commit_winner = LeaderBlockCommitOp { sunset_burn: 0, - block_header_hash: BlockHeaderHash([i as u8; 32]), - new_seed: VRFSeed([i as u8; 32]), + block_header_hash: BlockHeaderHash([i; 32]), + new_seed: VRFSeed([i; 32]), parent_block_ptr: 0, parent_vtxindex: 0, key_block_ptr: 0, @@ -1120,11 +1119,11 @@ mod test { burn_fee: 100, input: (Txid([0; 32]), 0), - apparent_sender: BurnchainSigner(format!("signer {}", i)), - txid: Txid([i as u8; 32]), + apparent_sender: BurnchainSigner(format!("signer {i}")), + txid: Txid([i; 32]), vtxindex: 0, block_height: header.block_height, - burn_parent_modulus: (i % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_parent_modulus: i % BURN_BLOCK_MINED_AT_MODULUS as u8, burn_header_hash: header.block_hash.clone(), treatment: vec![], }; diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 139a666098..45684a20af 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -198,6 +198,9 @@ pub trait BlockEventDispatcher { } pub struct ChainsCoordinatorConfig { + /// true: assume all anchor blocks are present, and block chain sync until they arrive + /// false: process sortitions in reward cycles without anchor blocks + pub assume_present_anchor_blocks: bool, /// true: use affirmation maps before 2.1 /// false: only use affirmation maps in 2.1 or later pub always_use_affirmation_maps: bool, @@ -209,8 +212,17 @@ pub struct ChainsCoordinatorConfig { impl ChainsCoordinatorConfig { pub fn new() -> ChainsCoordinatorConfig { ChainsCoordinatorConfig { - always_use_affirmation_maps: false, + always_use_affirmation_maps: true, require_affirmed_anchor_blocks: true, + assume_present_anchor_blocks: true, + } + } + + pub fn test_new() -> ChainsCoordinatorConfig { + ChainsCoordinatorConfig { + always_use_affirmation_maps: false, + require_affirmed_anchor_blocks: false, + assume_present_anchor_blocks: false, } } } @@ -314,7 +326,7 @@ impl OnChainRewardSetProvider<'static, DummyEventDispatcher> { } } -impl<'a, T: BlockEventDispatcher> RewardSetProvider for OnChainRewardSetProvider<'a, T> { +impl RewardSetProvider for OnChainRewardSetProvider<'_, T> { fn get_reward_set( &self, cycle_start_burn_height: u64, @@ -382,7 +394,7 @@ impl<'a, T: BlockEventDispatcher> RewardSetProvider for OnChainRewardSetProvider } } -impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { +impl OnChainRewardSetProvider<'_, T> { fn get_reward_set_epoch2( &self, // Todo: `current_burn_height` is a misleading name: should be the `cycle_start_burn_height` @@ -622,12 +634,12 @@ impl< } } -impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader> - ChainsCoordinator<'a, T, (), U, (), (), B> +impl + ChainsCoordinator<'_, T, (), U, (), (), B> { /// Create a coordinator for testing, with some parameters defaulted to None #[cfg(test)] - pub fn test_new( + pub fn test_new<'a>( burnchain: &Burnchain, chain_id: u32, path: &str, @@ -647,7 +659,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader /// Create a coordinator for testing allowing for all configurable params #[cfg(test)] - pub fn test_new_full( + pub fn test_new_full<'a>( burnchain: &Burnchain, chain_id: u32, path: &str, @@ -700,7 +712,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader notifier: (), atlas_config, atlas_db: Some(atlas_db), - config: ChainsCoordinatorConfig::new(), + config: ChainsCoordinatorConfig::test_new(), burnchain_indexer, refresh_stacker_db: Arc::new(AtomicBool::new(false)), in_nakamoto_epoch: false, @@ -898,7 +910,7 @@ pub fn calculate_paid_rewards(ops: &[BlockstackOperationType]) -> PaidRewards { let mut burn_amt = 0; for op in ops.iter() { if let BlockstackOperationType::LeaderBlockCommit(commit) = op { - if commit.commit_outs.len() == 0 { + if commit.commit_outs.is_empty() { continue; } let amt_per_address = commit.burn_fee / (commit.commit_outs.len() as u64); @@ -1100,14 +1112,13 @@ pub fn static_get_stacks_tip_affirmation_map( } impl< - 'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider, CE: CostEstimator + ?Sized, FE: FeeEstimator + ?Sized, B: BurnchainHeaderReader, - > ChainsCoordinator<'a, T, N, U, CE, FE, B> + > ChainsCoordinator<'_, T, N, U, CE, FE, B> { /// Process new Stacks blocks. If we get stuck for want of a missing PoX anchor block, return /// its hash. @@ -2336,6 +2347,20 @@ impl< panic!("BUG: no epoch defined at height {}", header.block_height) }); + if self.config.assume_present_anchor_blocks { + // anchor blocks are always assumed to be present in the chain history, + // so report its absence if we don't have it. + if let PoxAnchorBlockStatus::SelectedAndUnknown(missing_anchor_block, _) = + &rc_info.anchor_status + { + info!( + "Currently missing PoX anchor block {}, which is assumed to be present", + &missing_anchor_block + ); + return Ok(Some(missing_anchor_block.clone())); + } + } + if cur_epoch.epoch_id >= StacksEpochId::Epoch21 || self.config.always_use_affirmation_maps { // potentially have an anchor block, but only process the next reward cycle (and // subsequent reward cycles) with it if the prepare-phase block-commits affirm its @@ -2386,11 +2411,11 @@ impl< // burnchain has not yet advanced to epoch 3.0 return self .handle_new_epoch2_burnchain_block(&mut HashSet::new()) - .and_then(|block_hash_opt| { + .map(|block_hash_opt| { if let Some(block_hash) = block_hash_opt { - Ok(NewBurnchainBlockStatus::WaitForPox2x(block_hash)) + NewBurnchainBlockStatus::WaitForPox2x(block_hash) } else { - Ok(NewBurnchainBlockStatus::Ready) + NewBurnchainBlockStatus::Ready } }); } @@ -2419,12 +2444,12 @@ impl< // proceed to process sortitions in epoch 3.0 self.handle_new_nakamoto_burnchain_block() - .and_then(|can_proceed| { + .map(|can_proceed| { if can_proceed { - Ok(NewBurnchainBlockStatus::Ready) + NewBurnchainBlockStatus::Ready } else { // missing PoX anchor block, but unlike in 2.x, we don't know what it is! - Ok(NewBurnchainBlockStatus::WaitForPoxNakamoto) + NewBurnchainBlockStatus::WaitForPoxNakamoto } }) } @@ -2747,7 +2772,7 @@ impl< } sortition_db_handle.commit()?; - if unorphan_blocks.len() > 0 { + if !unorphan_blocks.is_empty() { revalidated_stacks_block = true; let ic = self.sortition_db.index_conn(); let mut chainstate_db_tx = self.chain_state_db.db_tx_begin()?; @@ -3078,7 +3103,7 @@ impl< } } - if !found && staging_block_chs.len() > 0 { + if !found && !staging_block_chs.is_empty() { // we have seen this block before, but in a different consensus fork. // queue it for re-processing -- it might still be valid if it's in a reward // cycle that exists on the new PoX fork. diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index f203ea5e28..0863708122 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -991,7 +991,7 @@ fn make_stacks_block_with_input( parent_vtxindex, txid: next_txid(), - vtxindex: (1 + key_index) as u32, + vtxindex: 1 + key_index, block_height: 0, burn_parent_modulus: (BURN_BLOCK_MINED_AT_MODULUS - 1) as u8, burn_header_hash: BurnchainHeaderHash([0; 32]), diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 1bb5e44192..c6dd44ac39 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -84,7 +84,7 @@ macro_rules! inf_or_debug { }) } -impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { +impl OnChainRewardSetProvider<'_, T> { /// Read a reward_set written while updating .signers /// `debug_log` should be set to true if the reward set loading should /// log messages as `debug!` instead of `error!` or `info!`. This allows @@ -615,14 +615,13 @@ pub fn get_nakamoto_next_recipients( } impl< - 'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider, CE: CostEstimator + ?Sized, FE: FeeEstimator + ?Sized, B: BurnchainHeaderReader, - > ChainsCoordinator<'a, T, N, U, CE, FE, B> + > ChainsCoordinator<'_, T, N, U, CE, FE, B> { /// Get the first nakamoto reward cycle fn get_first_nakamoto_reward_cycle(&self) -> u64 { diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 0525717981..e0b3375452 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -82,7 +82,7 @@ use crate::util_lib::db::{query_rows, u64_to_sql}; use crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic; use crate::util_lib::strings::StacksString; -impl<'a> NakamotoStagingBlocksConnRef<'a> { +impl NakamotoStagingBlocksConnRef<'_> { pub fn get_blocks_at_height(&self, height: u64) -> Vec { let sql = "SELECT data FROM nakamoto_staging_blocks WHERE height = ?1"; let args = rusqlite::params![&u64_to_sql(height).unwrap()]; @@ -568,7 +568,7 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { peer.check_nakamoto_migration(); } -impl<'a> TestPeer<'a> { +impl TestPeer<'_> { pub fn mine_single_block_tenure( &mut self, sender_key: &StacksPrivateKey, @@ -625,7 +625,7 @@ impl<'a> TestPeer<'a> { &mut test_signers, miner_setup, |_miner, chainstate, sortdb, blocks_so_far| { - if blocks_so_far.len() < 1 { + if blocks_so_far.is_empty() { let stx_transfer = make_token_transfer( chainstate, sortdb, @@ -1005,7 +1005,7 @@ fn block_info_tests(use_primary_testnet: bool) { let (last_2x_block_id, last_2x_block_ht) = get_tip_info(&mut peer); peer.mine_tenure(|miner, chainstate, sortdb, blocks_so_far| { - if blocks_so_far.len() > 0 { + if !blocks_so_far.is_empty() { return vec![]; } info!("Producing first nakamoto block, publishing our three contracts"); @@ -2318,9 +2318,9 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a debug!("{}: {:?}", i, &matured_reward); if i < 10 { - assert_eq!(matured_reward.parent_miner.coinbase, 3600_000_000); + assert_eq!(matured_reward.parent_miner.coinbase, 3_600_000_000); } else { - assert_eq!(matured_reward.parent_miner.coinbase, 1000_000_000); + assert_eq!(matured_reward.parent_miner.coinbase, 1_000_000_000); } if i < 11 { @@ -2353,9 +2353,9 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a let miner_reward = &matured_reward.latest_miners[0]; if i < 9 { - assert_eq!(miner_reward.coinbase, 3600_000_000); + assert_eq!(miner_reward.coinbase, 3_600_000_000); } else { - assert_eq!(miner_reward.coinbase, 1000_000_000); + assert_eq!(miner_reward.coinbase, 1_000_000_000); } if i < 10 { // epoch2 @@ -3243,7 +3243,7 @@ fn test_stacks_on_burnchain_ops() { until_burn_height: None, // mocked - txid: Txid([i as u8; 32]), + txid: Txid([i; 32]), vtxindex: 11, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x00; 32]), @@ -3263,7 +3263,7 @@ fn test_stacks_on_burnchain_ops() { auth_id: Some(i as u32), // mocked - txid: Txid([(i as u8) | 0x80; 32]), + txid: Txid([i | 0x80; 32]), vtxindex: 12, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x00; 32]), @@ -3275,7 +3275,7 @@ fn test_stacks_on_burnchain_ops() { memo: vec![0x2], // mocked - txid: Txid([(i as u8) | 0x40; 32]), + txid: Txid([i | 0x40; 32]), vtxindex: 13, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x00; 32]), @@ -3294,7 +3294,7 @@ fn test_stacks_on_burnchain_ops() { )), // mocked - txid: Txid([(i as u8) | 0xc0; 32]), + txid: Txid([i | 0xc0; 32]), vtxindex: 14, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x00; 32]), @@ -3307,7 +3307,7 @@ fn test_stacks_on_burnchain_ops() { let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); - bitpatterns.insert(consensus_hash.clone(), i as u8); + bitpatterns.insert(consensus_hash.clone(), i); tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); @@ -3337,11 +3337,11 @@ fn test_stacks_on_burnchain_ops() { .unwrap(); let mut expected_burnchain_txids = HashSet::new(); - for j in (i as u64).saturating_sub(6)..i { - expected_burnchain_txids.insert(Txid([j as u8; 32])); - expected_burnchain_txids.insert(Txid([(j as u8) | 0x80; 32])); - expected_burnchain_txids.insert(Txid([(j as u8) | 0x40; 32])); - expected_burnchain_txids.insert(Txid([(j as u8) | 0xc0; 32])); + for j in i.saturating_sub(6)..i { + expected_burnchain_txids.insert(Txid([j; 32])); + expected_burnchain_txids.insert(Txid([j | 0x80; 32])); + expected_burnchain_txids.insert(Txid([j | 0x40; 32])); + expected_burnchain_txids.insert(Txid([j | 0xc0; 32])); } assert_eq!(processed_burnchain_txids, expected_burnchain_txids); @@ -3441,7 +3441,7 @@ fn test_stacks_on_burnchain_ops() { sort_tip.consensus_hash ); assert!(last_block.header.consensus_hash == sort_tip.consensus_hash); - assert_eq!(highest_tenure.coinbase_height, 12 + i); + assert_eq!(highest_tenure.coinbase_height, 12 + u64::from(i)); assert_eq!(highest_tenure.cause, TenureChangeCause::Extended); assert_eq!( highest_tenure.num_blocks_confirmed, diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 68cdb2454a..ab8b53ddcc 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -86,7 +86,7 @@ use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::Error as DBError; /// Nakamaoto tenure information -#[derive(Debug)] +#[derive(Debug, Default)] pub struct NakamotoTenureInfo { /// Coinbase tx, if this is a new tenure pub coinbase_tx: Option, @@ -98,8 +98,8 @@ impl NakamotoTenureInfo { pub fn cause(&self) -> Option { self.tenure_change_tx .as_ref() - .map(|tx| tx.try_as_tenure_change().map(|payload| payload.cause)) - .flatten() + .map(|tx| tx.try_as_tenure_change())? + .map(|payload| payload.cause) } pub fn tenure_change_tx(&self) -> Option<&StacksTransaction> { @@ -464,7 +464,7 @@ impl NakamotoBlockBuilder { /// Returns the unsigned Nakamoto block fn finalize_block(&mut self, clarity_tx: &mut ClarityTx) -> NakamotoBlock { // done! Calculate state root and tx merkle root - let txid_vecs = self + let txid_vecs: Vec<_> = self .txs .iter() .map(|tx| tx.txid().as_bytes().to_vec()) @@ -596,7 +596,7 @@ impl NakamotoBlockBuilder { tenure_info.coinbase_tx.clone(), ] .into_iter() - .filter_map(|x| x) + .flatten() .collect(); // TODO: update this mempool check to prioritize signer vote transactions over other transactions diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index adf9dddc0e..a08968beed 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -73,7 +73,8 @@ use super::stacks::db::{ use super::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; use super::stacks::{ Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksMicroblock, StacksTransaction, - TenureChangeError, TenureChangePayload, TransactionPayload, + TenureChangeError, TenureChangePayload, TokenTransferMemo, TransactionPayload, + TransactionVersion, }; use crate::burnchains::{Burnchain, PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::SortitionDB; @@ -108,8 +109,7 @@ use crate::core::{ }; use crate::net::stackerdb::{StackerDBConfig, MINER_SLOT_COUNT}; use crate::net::Error as net_error; -use crate::util_lib::boot; -use crate::util_lib::boot::boot_code_id; +use crate::util_lib::boot::{self, boot_code_addr, boot_code_id, boot_code_tx_auth}; use crate::util_lib::db::{ query_int, query_row, query_row_columns, query_row_panic, query_rows, sqlite_open, tx_begin_immediate, u64_to_sql, DBConn, Error as DBError, FromRow, @@ -347,8 +347,7 @@ pub trait StacksDBIndexed { tip, &nakamoto_keys::ongoing_tenure_coinbase_height(coinbase_height), )? - .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) - .flatten()) + .and_then(|id_str| nakamoto_keys::parse_block_id(&id_str))) } /// Get the first block in the tenure for a given tenure ID consensus hash in the fork @@ -363,8 +362,7 @@ pub trait StacksDBIndexed { tip, &nakamoto_keys::tenure_start_block_id(tenure_id_consensus_hash), )? - .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) - .flatten()) + .and_then(|id_str| nakamoto_keys::parse_block_id(&id_str))) } /// Get the coinbase height of a tenure (identified by its consensus hash) in a fork identified @@ -379,8 +377,7 @@ pub trait StacksDBIndexed { tip, &nakamoto_keys::coinbase_height(tenure_id_consensus_hash), )? - .map(|height_str| nakamoto_keys::parse_u64(&height_str)) - .flatten()) + .and_then(|height_str| nakamoto_keys::parse_u64(&height_str))) } /// Get the ongoing tenure ID in the fork identified by `tip` @@ -390,8 +387,7 @@ pub trait StacksDBIndexed { ) -> Result, DBError> { Ok(self .get(tip, nakamoto_keys::ongoing_tenure_id())? - .map(|id_str| nakamoto_keys::parse_tenure_id_value(&id_str)) - .flatten()) + .and_then(|id_str| nakamoto_keys::parse_tenure_id_value(&id_str))) } /// Get the highest block ID in a tenure identified by its consensus hash in the Stacks fork @@ -406,8 +402,7 @@ pub trait StacksDBIndexed { tip, &nakamoto_keys::highest_block_in_tenure(tenure_id_consensus_hash), )? - .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) - .flatten()) + .and_then(|id_str| nakamoto_keys::parse_block_id(&id_str))) } /// Get the block-found tenure ID for a given tenure's consensus hash (if defined) in a given @@ -422,8 +417,7 @@ pub trait StacksDBIndexed { tip, &nakamoto_keys::block_found_tenure_id(tenure_id_consensus_hash), )? - .map(|id_str| nakamoto_keys::parse_tenure_id_value(&id_str)) - .flatten()) + .and_then(|id_str| nakamoto_keys::parse_tenure_id_value(&id_str))) } /// Determine if a tenure, identified by its consensus hash, has finished in a fork identified @@ -475,8 +469,7 @@ pub trait StacksDBIndexed { tip, &nakamoto_keys::parent_tenure_consensus_hash(tenure_id_consensus_hash), )? - .map(|ch_str| nakamoto_keys::parse_consensus_hash(&ch_str)) - .flatten()) + .and_then(|ch_str| nakamoto_keys::parse_consensus_hash(&ch_str))) } } @@ -1082,23 +1075,21 @@ impl NakamotoBlock { /// Get the VRF proof from this block. /// It's Some(..) only if there's a coinbase pub fn get_vrf_proof(&self) -> Option<&VRFProof> { - self.get_coinbase_tx() - .map(|coinbase_tx| { - if let TransactionPayload::Coinbase(_, _, vrf_proof) = &coinbase_tx.payload { - vrf_proof.as_ref() - } else { - // actually unreachable - None - } - }) - .flatten() + self.get_coinbase_tx().and_then(|coinbase_tx| { + if let TransactionPayload::Coinbase(_, _, vrf_proof) = &coinbase_tx.payload { + vrf_proof.as_ref() + } else { + // actually unreachable + None + } + }) } /// Try to get the first transaction in the block as a tenure-change /// Return Some(tenure-change-payload) if it's a tenure change /// Return None if not pub fn try_get_tenure_change_payload(&self) -> Option<&TenureChangePayload> { - if self.txs.len() == 0 { + if self.txs.is_empty() { return None; } if let TransactionPayload::TenureChange(ref tc) = &self.txs[0].payload { @@ -1145,7 +1136,7 @@ impl NakamotoBlock { }) .collect::>(); - if tenure_change_positions.len() == 0 { + if tenure_change_positions.is_empty() { return Ok(false); } @@ -1246,7 +1237,7 @@ impl NakamotoBlock { }) .collect::>(); - if coinbase_positions.len() == 0 && tenure_change_positions.len() == 0 { + if coinbase_positions.is_empty() && tenure_change_positions.is_empty() { // can't be a first block in a tenure return Ok(false); } @@ -1264,7 +1255,7 @@ impl NakamotoBlock { return Err(()); } - if coinbase_positions.len() == 1 && tenure_change_positions.len() == 0 { + if coinbase_positions.len() == 1 && tenure_change_positions.is_empty() { // coinbase unaccompanied by a tenure change warn!("Invalid block -- have coinbase without tenure change"; "consensus_hash" => %self.header.consensus_hash, @@ -1274,7 +1265,7 @@ impl NakamotoBlock { return Err(()); } - if coinbase_positions.len() == 0 && tenure_change_positions.len() == 1 { + if coinbase_positions.is_empty() && tenure_change_positions.len() == 1 { // this is possibly a block with a tenure-extend transaction. // It must be the first tx if tenure_change_positions[0] != 0 { @@ -1864,11 +1855,11 @@ impl NakamotoChainState { /// /// It returns Err(..) on DB error, or if the child block does not connect to the parent. /// The caller should keep calling this until it gets Ok(None) - pub fn process_next_nakamoto_block<'a, T: BlockEventDispatcher>( + pub fn process_next_nakamoto_block( stacks_chain_state: &mut StacksChainState, sort_db: &mut SortitionDB, canonical_sortition_tip: &SortitionId, - dispatcher_opt: Option<&'a T>, + dispatcher_opt: Option<&T>, ) -> Result, ChainstateError> { #[cfg(test)] fault_injection::stall_block_processing(); @@ -2093,7 +2084,8 @@ impl NakamotoChainState { return Err(e); }; - let (receipt, clarity_commit, reward_set_data) = ok_opt.expect("FATAL: unreachable"); + let (mut receipt, clarity_commit, reward_set_data, phantom_unlock_events) = + ok_opt.expect("FATAL: unreachable"); assert_eq!( receipt.header.anchored_header.block_hash(), @@ -2147,6 +2139,20 @@ impl NakamotoChainState { &receipt.header.anchored_header.block_hash() ); + let tx_receipts = &mut receipt.tx_receipts; + if let Some(unlock_receipt) = + // For the event dispatcher, attach any STXMintEvents that + // could not be included in the block (e.g. because the + // block didn't have a Coinbase transaction). + Self::generate_phantom_unlock_tx( + phantom_unlock_events, + &stacks_chain_state.config(), + next_ready_block.header.chain_length, + ) + { + tx_receipts.push(unlock_receipt); + } + // announce the block, if we're connected to an event dispatcher if let Some(dispatcher) = dispatcher_opt { let block_event = ( @@ -2157,7 +2163,7 @@ impl NakamotoChainState { dispatcher.announce_block( &block_event, &receipt.header.clone(), - &receipt.tx_receipts, + &tx_receipts, &parent_block_id, next_ready_block_snapshot.winning_block_txid, &receipt.matured_rewards, @@ -3017,7 +3023,7 @@ impl NakamotoChainState { let args = params![tenure_start_block_id]; let proof_bytes: Option = query_row(chainstate_conn, sql, args)?; if let Some(bytes) = proof_bytes { - if bytes.len() == 0 { + if bytes.is_empty() { // no VRF proof return Ok(None); } @@ -4197,11 +4203,13 @@ impl NakamotoChainState { applied_epoch_transition: bool, signers_updated: bool, coinbase_height: u64, + phantom_lockup_events: Vec, ) -> Result< ( StacksEpochReceipt, PreCommitClarityBlock<'a>, Option, + Vec, ), ChainstateError, > { @@ -4238,7 +4246,7 @@ impl NakamotoChainState { coinbase_height, }; - return Ok((epoch_receipt, clarity_commit, None)); + return Ok((epoch_receipt, clarity_commit, None, phantom_lockup_events)); } /// Append a Nakamoto Stacks block to the Stacks chain state. @@ -4264,6 +4272,7 @@ impl NakamotoChainState { StacksEpochReceipt, PreCommitClarityBlock<'a>, Option, + Vec, ), ChainstateError, > { @@ -4531,20 +4540,22 @@ impl NakamotoChainState { Ok(lockup_events) => lockup_events, }; - // if any, append lockups events to the coinbase receipt - if lockup_events.len() > 0 { + // If any, append lockups events to the coinbase receipt + if let Some(receipt) = tx_receipts.get_mut(0) { // Receipts are appended in order, so the first receipt should be // the one of the coinbase transaction - if let Some(receipt) = tx_receipts.get_mut(0) { - if receipt.is_coinbase_tx() { - receipt.events.append(&mut lockup_events); - } - } else { - warn!("Unable to attach lockups events, block's first transaction is not a coinbase transaction") + if receipt.is_coinbase_tx() { + receipt.events.append(&mut lockup_events); } } + + // If lockup_events still contains items, it means they weren't attached + if !lockup_events.is_empty() { + info!("Unable to attach lockup events, block's first transaction is not a coinbase transaction. Will attach as a phantom tx."); + } + // if any, append auto unlock events to the coinbase receipt - if auto_unlock_events.len() > 0 { + if !auto_unlock_events.is_empty() { // Receipts are appended in order, so the first receipt should be // the one of the coinbase transaction if let Some(receipt) = tx_receipts.get_mut(0) { @@ -4615,6 +4626,7 @@ impl NakamotoChainState { applied_epoch_transition, signer_set_calc.is_some(), coinbase_height, + lockup_events, ); } @@ -4728,7 +4740,12 @@ impl NakamotoChainState { coinbase_height, }; - Ok((epoch_receipt, clarity_commit, reward_set_data)) + Ok(( + epoch_receipt, + clarity_commit, + reward_set_data, + lockup_events, + )) } /// Create a StackerDB config for the .miners contract. @@ -4889,6 +4906,53 @@ impl NakamotoChainState { clarity.save_analysis(&contract_id, &analysis).unwrap(); }) } + + /// Generate a "phantom" transaction to include STXMintEvents for + /// lockups that could not be attached to a Coinbase transaction + /// (because the block doesn't have a Coinbase transaction). + fn generate_phantom_unlock_tx( + events: Vec, + config: &ChainstateConfig, + stacks_block_height: u64, + ) -> Option { + if events.is_empty() { + return None; + } + info!("Generating phantom unlock tx"); + let version = if config.mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + + // Make the txid unique -- the phantom tx payload should include something block-specific otherwise + // they will always have the same txid. In this case we use the block height in the memo. This also + // happens to give some indication of the purpose of this phantom tx, for anyone looking. + let memo = TokenTransferMemo({ + let str = format!("Block {} token unlocks", stacks_block_height); + let mut buf = [0u8; 34]; + buf[..str.len().min(34)].copy_from_slice(&str.as_bytes()[..]); + buf + }); + let boot_code_address = boot_code_addr(config.mainnet); + let boot_code_auth = boot_code_tx_auth(boot_code_address.clone()); + let unlock_tx = StacksTransaction::new( + version, + boot_code_auth, + TransactionPayload::TokenTransfer( + PrincipalData::Standard(boot_code_address.into()), + 0, + memo, + ), + ); + let unlock_receipt = StacksTransactionReceipt::from_stx_transfer( + unlock_tx, + events, + Value::okay_true(), + ExecutionCost::ZERO, + ); + Some(unlock_receipt) + } } impl StacksMessageCodec for NakamotoBlock { @@ -4918,7 +4982,7 @@ impl StacksMessageCodec for NakamotoBlock { } // header and transactions must be consistent - let txid_vecs = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); + let txid_vecs: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); let merkle_tree = MerkleTree::new(&txid_vecs); let tx_merkle_root: Sha512Trunc256Sum = merkle_tree.root(); diff --git a/stackslib/src/chainstate/nakamoto/shadow.rs b/stackslib/src/chainstate/nakamoto/shadow.rs index cdc099e120..dad10f62e0 100644 --- a/stackslib/src/chainstate/nakamoto/shadow.rs +++ b/stackslib/src/chainstate/nakamoto/shadow.rs @@ -506,7 +506,7 @@ impl NakamotoBlockBuilder { chainstate_handle: &StacksChainState, burn_dbconn: &SortitionHandleConn, tenure_id_consensus_hash: &ConsensusHash, - mut txs: Vec, + txs: Vec, ) -> Result<(NakamotoBlock, u64, ExecutionCost), Error> { use clarity::vm::ast::ASTRules; @@ -532,7 +532,7 @@ impl NakamotoBlockBuilder { &mut miner_tenure_info, tenure_id_consensus_hash, )?; - for tx in txs.drain(..) { + for tx in txs.into_iter() { let tx_len = tx.tx_len(); match builder.try_mine_tx_with_len( &mut tenure_tx, @@ -750,7 +750,7 @@ impl NakamotoBlockBuilder { } } -impl<'a> NakamotoStagingBlocksConnRef<'a> { +impl NakamotoStagingBlocksConnRef<'_> { /// Determine if we have a particular block with the given index hash. /// Returns Ok(true) if so /// Returns Ok(false) if not @@ -812,7 +812,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { } } -impl<'a> NakamotoStagingBlocksTx<'a> { +impl NakamotoStagingBlocksTx<'_> { /// Add a shadow block. /// Fails if there are any non-shadow blocks present in the tenure. pub fn add_shadow_block(&self, shadow_block: &NakamotoBlock) -> Result<(), ChainstateError> { diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index c3e8432878..9190bf99af 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -59,7 +59,7 @@ impl fmt::Display for NakamotoBlockObtainMethod { } } -pub const NAKAMOTO_STAGING_DB_SCHEMA_1: &'static [&'static str] = &[ +pub const NAKAMOTO_STAGING_DB_SCHEMA_1: &[&str] = &[ r#" -- Table for staging nakamoto blocks CREATE TABLE nakamoto_staging_blocks ( @@ -102,7 +102,7 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_1: &'static [&'static str] = &[ r#"CREATE INDEX nakamoto_staging_blocks_by_tenure_start_block ON nakamoto_staging_blocks(is_tenure_start,consensus_hash);"#, ]; -pub const NAKAMOTO_STAGING_DB_SCHEMA_2: &'static [&'static str] = &[ +pub const NAKAMOTO_STAGING_DB_SCHEMA_2: &[&str] = &[ r#" DROP TABLE nakamoto_staging_blocks; "#, @@ -155,7 +155,7 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_2: &'static [&'static str] = &[ r#"INSERT INTO db_version (version) VALUES (2)"#, ]; -pub const NAKAMOTO_STAGING_DB_SCHEMA_3: &'static [&'static str] = &[ +pub const NAKAMOTO_STAGING_DB_SCHEMA_3: &[&str] = &[ r#"CREATE INDEX nakamoto_staging_blocks_by_obtain_method ON nakamoto_staging_blocks(consensus_hash,obtain_method);"#, r#"UPDATE db_version SET version = 3"#, ]; @@ -185,8 +185,8 @@ impl NakamotoStagingBlocksConn { pub struct NakamotoStagingBlocksConnRef<'a>(&'a rusqlite::Connection); -impl<'a> NakamotoStagingBlocksConnRef<'a> { - pub fn conn(&self) -> NakamotoStagingBlocksConnRef<'a> { +impl NakamotoStagingBlocksConnRef<'_> { + pub fn conn(&self) -> NakamotoStagingBlocksConnRef<'_> { NakamotoStagingBlocksConnRef(self.0) } } @@ -200,7 +200,7 @@ impl Deref for NakamotoStagingBlocksConnRef<'_> { pub struct NakamotoStagingBlocksTx<'a>(rusqlite::Transaction<'a>); -impl<'a> NakamotoStagingBlocksTx<'a> { +impl NakamotoStagingBlocksTx<'_> { pub fn commit(self) -> Result<(), rusqlite::Error> { self.0.commit() } @@ -217,17 +217,17 @@ impl<'a> Deref for NakamotoStagingBlocksTx<'a> { } } -impl<'a> DerefMut for NakamotoStagingBlocksTx<'a> { +impl DerefMut for NakamotoStagingBlocksTx<'_> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } /// Open a Blob handle to a Nakamoto block -fn inner_open_nakamoto_block<'a>( - conn: &'a Connection, +fn inner_open_nakamoto_block( + conn: &Connection, rowid: i64, readwrite: bool, -) -> Result, ChainstateError> { +) -> Result, ChainstateError> { let blob = conn.blob_open( rusqlite::DatabaseName::Main, "nakamoto_staging_blocks", @@ -240,11 +240,11 @@ fn inner_open_nakamoto_block<'a>( impl NakamotoStagingBlocksConn { /// Open a Blob handle to a Nakamoto block - pub fn open_nakamoto_block<'a>( - &'a self, + pub fn open_nakamoto_block( + &self, rowid: i64, readwrite: bool, - ) -> Result, ChainstateError> { + ) -> Result, ChainstateError> { inner_open_nakamoto_block(self.deref(), rowid, readwrite) } } @@ -511,7 +511,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { } } -impl<'a> NakamotoStagingBlocksTx<'a> { +impl NakamotoStagingBlocksTx<'_> { /// Notify the staging database that a given stacks block has been processed. /// This will update the attachable status for children blocks, as well as marking the stacks /// block itself as processed. @@ -689,17 +689,15 @@ impl<'a> NakamotoStagingBlocksTx<'a> { impl StacksChainState { /// Begin a transaction against the staging blocks DB. /// Note that this DB is (or will eventually be) in a separate database from the headers. - pub fn staging_db_tx_begin<'a>( - &'a mut self, - ) -> Result, ChainstateError> { + pub fn staging_db_tx_begin(&mut self) -> Result, ChainstateError> { let tx = tx_begin_immediate(&mut self.nakamoto_staging_blocks_conn)?; Ok(NakamotoStagingBlocksTx(tx)) } /// Begin a tx to both the headers DB and the staging DB - pub fn headers_and_staging_tx_begin<'a>( - &'a mut self, - ) -> Result<(rusqlite::Transaction<'a>, NakamotoStagingBlocksTx<'a>), ChainstateError> { + pub fn headers_and_staging_tx_begin( + &mut self, + ) -> Result<(rusqlite::Transaction<'_>, NakamotoStagingBlocksTx<'_>), ChainstateError> { let header_tx = self .state_index .storage_tx() @@ -709,9 +707,9 @@ impl StacksChainState { } /// Open a connection to the headers DB, and open a tx to the staging DB - pub fn headers_conn_and_staging_tx_begin<'a>( - &'a mut self, - ) -> Result<(&'a rusqlite::Connection, NakamotoStagingBlocksTx<'a>), ChainstateError> { + pub fn headers_conn_and_staging_tx_begin( + &mut self, + ) -> Result<(&rusqlite::Connection, NakamotoStagingBlocksTx<'_>), ChainstateError> { let header_conn = self.state_index.sqlite_conn(); let staging_tx = tx_begin_immediate(&mut self.nakamoto_staging_blocks_conn)?; Ok((header_conn, NakamotoStagingBlocksTx(staging_tx))) diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index b72bbdda14..a0e516f283 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -120,7 +120,7 @@ use crate::util_lib::db::{ FromRow, }; -pub static NAKAMOTO_TENURES_SCHEMA_1: &'static str = r#" +pub static NAKAMOTO_TENURES_SCHEMA_1: &str = r#" CREATE TABLE nakamoto_tenures ( -- consensus hash of start-tenure block (i.e. the consensus hash of the sortition in which the miner's block-commit -- was mined) @@ -157,7 +157,7 @@ pub static NAKAMOTO_TENURES_SCHEMA_1: &'static str = r#" CREATE INDEX nakamoto_tenures_by_parent ON nakamoto_tenures(tenure_id_consensus_hash,prev_tenure_id_consensus_hash); "#; -pub static NAKAMOTO_TENURES_SCHEMA_2: &'static str = r#" +pub static NAKAMOTO_TENURES_SCHEMA_2: &str = r#" -- Drop the nakamoto_tenures table if it exists DROP TABLE IF EXISTS nakamoto_tenures; @@ -197,7 +197,7 @@ pub static NAKAMOTO_TENURES_SCHEMA_2: &'static str = r#" CREATE INDEX nakamoto_tenures_by_parent ON nakamoto_tenures(tenure_id_consensus_hash,prev_tenure_id_consensus_hash); "#; -pub static NAKAMOTO_TENURES_SCHEMA_3: &'static str = r#" +pub static NAKAMOTO_TENURES_SCHEMA_3: &str = r#" -- Drop the nakamoto_tenures table if it exists DROP TABLE IF EXISTS nakamoto_tenures; diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 6fd559da69..7b5e35a0fd 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -264,7 +264,7 @@ impl TestSigners { let aggregate_public_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); - self.aggregate_public_key = aggregate_public_key.clone(); + self.aggregate_public_key.clone_from(&aggregate_public_key); aggregate_public_key } } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 94ef81c077..bd415b68b0 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -94,7 +94,7 @@ use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::Error as db_error; use crate::util_lib::strings::StacksString; -impl<'a> NakamotoStagingBlocksConnRef<'a> { +impl NakamotoStagingBlocksConnRef<'_> { pub fn get_all_blocks_in_tenure( &self, tenure_id_consensus_hash: &ConsensusHash, @@ -616,7 +616,7 @@ pub fn test_load_store_update_nakamoto_blocks() { let epoch2_txs = vec![coinbase_tx.clone()]; let epoch2_tx_merkle_root = { - let txid_vecs = epoch2_txs + let txid_vecs: Vec<_> = epoch2_txs .iter() .map(|tx| tx.txid().as_bytes().to_vec()) .collect(); @@ -710,7 +710,7 @@ pub fn test_load_store_update_nakamoto_blocks() { let nakamoto_txs = vec![tenure_change_tx.clone(), coinbase_tx.clone()]; let nakamoto_tx_merkle_root = { - let txid_vecs = nakamoto_txs + let txid_vecs: Vec<_> = nakamoto_txs .iter() .map(|tx| tx.txid().as_bytes().to_vec()) .collect(); @@ -720,7 +720,7 @@ pub fn test_load_store_update_nakamoto_blocks() { let nakamoto_txs_2 = vec![stx_transfer_tx.clone()]; let nakamoto_tx_merkle_root_2 = { - let txid_vecs = nakamoto_txs_2 + let txid_vecs: Vec<_> = nakamoto_txs_2 .iter() .map(|tx| tx.txid().as_bytes().to_vec()) .collect(); @@ -730,7 +730,7 @@ pub fn test_load_store_update_nakamoto_blocks() { let nakamoto_txs_3 = vec![stx_transfer_tx_3.clone()]; let nakamoto_tx_merkle_root_3 = { - let txid_vecs = nakamoto_txs_3 + let txid_vecs: Vec<_> = nakamoto_txs_3 .iter() .map(|tx| tx.txid().as_bytes().to_vec()) .collect(); @@ -740,7 +740,7 @@ pub fn test_load_store_update_nakamoto_blocks() { let nakamoto_txs_4 = vec![stx_transfer_tx_4.clone()]; let nakamoto_tx_merkle_root_4 = { - let txid_vecs = nakamoto_txs_4 + let txid_vecs: Vec<_> = nakamoto_txs_4 .iter() .map(|tx| tx.txid().as_bytes().to_vec()) .collect(); @@ -1780,7 +1780,7 @@ fn test_nakamoto_block_static_verification() { let nakamoto_txs = vec![tenure_change_tx.clone(), coinbase_tx.clone()]; let nakamoto_tx_merkle_root = { - let txid_vecs = nakamoto_txs + let txid_vecs: Vec<_> = nakamoto_txs .iter() .map(|tx| tx.txid().as_bytes().to_vec()) .collect(); @@ -1790,7 +1790,7 @@ fn test_nakamoto_block_static_verification() { let nakamoto_recipient_txs = vec![tenure_change_tx.clone(), coinbase_recipient_tx.clone()]; let nakamoto_recipient_tx_merkle_root = { - let txid_vecs = nakamoto_recipient_txs + let txid_vecs: Vec<_> = nakamoto_recipient_txs .iter() .map(|tx| tx.txid().as_bytes().to_vec()) .collect(); @@ -1803,7 +1803,7 @@ fn test_nakamoto_block_static_verification() { coinbase_shadow_recipient_tx.clone(), ]; let nakamoto_shadow_recipient_tx_merkle_root = { - let txid_vecs = nakamoto_shadow_recipient_txs + let txid_vecs: Vec<_> = nakamoto_shadow_recipient_txs .iter() .map(|tx| tx.txid().as_bytes().to_vec()) .collect(); @@ -1813,7 +1813,7 @@ fn test_nakamoto_block_static_verification() { let nakamoto_txs_bad_ch = vec![tenure_change_tx_bad_ch.clone(), coinbase_tx.clone()]; let nakamoto_tx_merkle_root_bad_ch = { - let txid_vecs = nakamoto_txs_bad_ch + let txid_vecs: Vec<_> = nakamoto_txs_bad_ch .iter() .map(|tx| tx.txid().as_bytes().to_vec()) .collect(); @@ -1824,7 +1824,7 @@ fn test_nakamoto_block_static_verification() { let nakamoto_txs_bad_miner_sig = vec![tenure_change_tx_bad_miner_sig.clone(), coinbase_tx.clone()]; let nakamoto_tx_merkle_root_bad_miner_sig = { - let txid_vecs = nakamoto_txs_bad_miner_sig + let txid_vecs: Vec<_> = nakamoto_txs_bad_miner_sig .iter() .map(|tx| tx.txid().as_bytes().to_vec()) .collect(); @@ -2586,7 +2586,7 @@ fn valid_vote_transaction() { post_conditions: vec![], payload: TransactionPayload::ContractCall(TransactionContractCall { address: contract_addr, - contract_name: contract_name, + contract_name, function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: valid_function_args, }), diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 9a488d6a09..e4c315dca2 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -291,7 +291,7 @@ impl TestMiner { } } -impl<'a> NakamotoStagingBlocksConnRef<'a> { +impl NakamotoStagingBlocksConnRef<'_> { pub fn get_any_normal_tenure(&self) -> Result, ChainstateError> { let qry = "SELECT consensus_hash FROM nakamoto_staging_blocks WHERE obtain_method != ?1 ORDER BY RANDOM() LIMIT 1"; let args = params![&NakamotoBlockObtainMethod::Shadow.to_string()]; @@ -444,7 +444,7 @@ impl TestStacksNode { /// Record the nakamoto blocks as a new tenure pub fn add_nakamoto_tenure_blocks(&mut self, tenure_blocks: Vec) { if let Some(last_tenure) = self.nakamoto_blocks.last_mut() { - if tenure_blocks.len() > 0 { + if !tenure_blocks.is_empty() { // this tenure is overwriting the last tenure if last_tenure.first().unwrap().header.consensus_hash == tenure_blocks.first().unwrap().header.consensus_hash @@ -743,7 +743,7 @@ impl TestStacksNode { let mut next_block_txs = block_builder(miner, chainstate, sortdb, &blocks); txs.append(&mut next_block_txs); - if txs.len() == 0 { + if txs.is_empty() { break; } @@ -1016,7 +1016,7 @@ impl TestStacksNode { mut builder: NakamotoBlockBuilder, chainstate_handle: &StacksChainState, burn_dbconn: &SortitionHandleConn, - mut txs: Vec, + txs: Vec, ) -> Result<(NakamotoBlock, u64, ExecutionCost), ChainstateError> { use clarity::vm::ast::ASTRules; @@ -1035,7 +1035,7 @@ impl TestStacksNode { let mut miner_tenure_info = builder.load_tenure_info(&mut chainstate, burn_dbconn, tenure_cause)?; let mut tenure_tx = builder.tenure_begin(burn_dbconn, &mut miner_tenure_info)?; - for tx in txs.drain(..) { + for tx in txs.into_iter() { let tx_len = tx.tx_len(); match builder.try_mine_tx_with_len( &mut tenure_tx, @@ -1088,7 +1088,7 @@ impl TestStacksNode { } } -impl<'a> TestPeer<'a> { +impl TestPeer<'_> { /// Get the Nakamoto parent linkage data for building atop the last-produced tenure or /// Stacks 2.x block. /// Returns (last-tenure-id, epoch2-parent, nakamoto-parent-tenure, parent-sortition) @@ -1509,8 +1509,7 @@ impl<'a> TestPeer<'a> { let mut malleablized_blocks: Vec = blocks .clone() .into_iter() - .map(|(_, _, _, malleablized)| malleablized) - .flatten() + .flat_map(|(_, _, _, malleablized)| malleablized) .collect(); peer.malleablized_blocks.append(&mut malleablized_blocks); @@ -1600,8 +1599,7 @@ impl<'a> TestPeer<'a> { let mut malleablized_blocks: Vec = blocks .clone() .into_iter() - .map(|(_, _, _, malleablized)| malleablized) - .flatten() + .flat_map(|(_, _, _, malleablized)| malleablized) .collect(); self.malleablized_blocks.append(&mut malleablized_blocks); @@ -2129,7 +2127,7 @@ impl<'a> TestPeer<'a> { // get_nakamoto_tenure_length // compare the DB to the block's ancestors let ancestors = Self::load_nakamoto_tenure(chainstate, &block.block_id()); - assert!(ancestors.len() > 0); + assert!(!ancestors.is_empty()); assert_eq!( ancestors.len(), NakamotoChainState::get_nakamoto_tenure_length(chainstate.db(), &block.block_id()) diff --git a/stackslib/src/chainstate/stacks/address.rs b/stackslib/src/chainstate/stacks/address.rs index c3706a2565..438dd17b9b 100644 --- a/stackslib/src/chainstate/stacks/address.rs +++ b/stackslib/src/chainstate/stacks/address.rs @@ -525,7 +525,7 @@ impl StacksAddressExtensions for StacksAddress { let version = to_c32_version_byte(btc_version) .expect("Failed to decode Bitcoin version byte to Stacks version byte"); StacksAddress { - version: version, + version, bytes: addr.bytes.clone(), } } diff --git a/stackslib/src/chainstate/stacks/auth.rs b/stackslib/src/chainstate/stacks/auth.rs index 06cf64d037..a6212d9bdb 100644 --- a/stackslib/src/chainstate/stacks/auth.rs +++ b/stackslib/src/chainstate/stacks/auth.rs @@ -207,12 +207,12 @@ impl MultisigSpendingCondition { &mut self, key_encoding: TransactionPublicKeyEncoding, signature: MessageSignature, - ) -> () { + ) { self.fields .push(TransactionAuthField::Signature(key_encoding, signature)); } - pub fn push_public_key(&mut self, public_key: StacksPublicKey) -> () { + pub fn push_public_key(&mut self, public_key: StacksPublicKey) { self.fields .push(TransactionAuthField::PublicKey(public_key)); } @@ -406,12 +406,12 @@ impl OrderIndependentMultisigSpendingCondition { &mut self, key_encoding: TransactionPublicKeyEncoding, signature: MessageSignature, - ) -> () { + ) { self.fields .push(TransactionAuthField::Signature(key_encoding, signature)); } - pub fn push_public_key(&mut self, public_key: StacksPublicKey) -> () { + pub fn push_public_key(&mut self, public_key: StacksPublicKey) { self.fields .push(TransactionAuthField::PublicKey(public_key)); } @@ -558,18 +558,18 @@ impl StacksMessageCodec for SinglesigSpendingCondition { } Ok(SinglesigSpendingCondition { - signer: signer, - nonce: nonce, - tx_fee: tx_fee, - hash_mode: hash_mode, - key_encoding: key_encoding, - signature: signature, + signer, + nonce, + tx_fee, + hash_mode, + key_encoding, + signature, }) } } impl SinglesigSpendingCondition { - pub fn set_signature(&mut self, signature: MessageSignature) -> () { + pub fn set_signature(&mut self, signature: MessageSignature) { self.signature = signature; } @@ -593,7 +593,7 @@ impl SinglesigSpendingCondition { SinglesigHashMode::P2WPKH => C32_ADDRESS_VERSION_MAINNET_MULTISIG, }; StacksAddress { - version: version, + version, bytes: self.signer.clone(), } } @@ -604,7 +604,7 @@ impl SinglesigSpendingCondition { SinglesigHashMode::P2WPKH => C32_ADDRESS_VERSION_TESTNET_MULTISIG, }; StacksAddress { - version: version, + version, bytes: self.signer.clone(), } } @@ -908,7 +908,7 @@ impl TransactionSpendingCondition { } } - pub fn set_nonce(&mut self, n: u64) -> () { + pub fn set_nonce(&mut self, n: u64) { match *self { TransactionSpendingCondition::Singlesig(ref mut singlesig_data) => { singlesig_data.nonce = n; @@ -922,7 +922,7 @@ impl TransactionSpendingCondition { } } - pub fn set_tx_fee(&mut self, tx_fee: u64) -> () { + pub fn set_tx_fee(&mut self, tx_fee: u64) { match *self { TransactionSpendingCondition::Singlesig(ref mut singlesig_data) => { singlesig_data.tx_fee = tx_fee; @@ -978,7 +978,7 @@ impl TransactionSpendingCondition { } /// Clear fee rate, nonces, signatures, and public keys - pub fn clear(&mut self) -> () { + pub fn clear(&mut self) { match *self { TransactionSpendingCondition::Singlesig(ref mut singlesig_data) => { singlesig_data.tx_fee = 0; @@ -1309,7 +1309,7 @@ impl TransactionAuth { self.origin().nonce() } - pub fn set_origin_nonce(&mut self, n: u64) -> () { + pub fn set_origin_nonce(&mut self, n: u64) { match *self { TransactionAuth::Standard(ref mut s) => s.set_nonce(n), TransactionAuth::Sponsored(ref mut s, _) => s.set_nonce(n), @@ -1340,7 +1340,7 @@ impl TransactionAuth { } } - pub fn set_tx_fee(&mut self, tx_fee: u64) -> () { + pub fn set_tx_fee(&mut self, tx_fee: u64) { match *self { TransactionAuth::Standard(ref mut s) => s.set_tx_fee(tx_fee), TransactionAuth::Sponsored(_, ref mut s) => s.set_tx_fee(tx_fee), @@ -1371,12 +1371,12 @@ impl TransactionAuth { TransactionAuth::Standard(_) => Ok(()), TransactionAuth::Sponsored(_, ref sponsor_condition) => sponsor_condition .verify(&origin_sighash, &TransactionAuthFlags::AuthSponsored) - .and_then(|_sigh| Ok(())), + .map(|_sigh| ()), } } /// Clear out all transaction auth fields, nonces, and fee rates from the spending condition(s). - pub fn clear(&mut self) -> () { + pub fn clear(&mut self) { match *self { TransactionAuth::Standard(ref mut origin_condition) => { origin_condition.clear(); diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 85bfcc5576..d1255d8549 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -156,8 +156,8 @@ impl StacksBlockHeader { total_work: total_work.clone(), proof: proof.clone(), parent_block: parent_header_hash, - parent_microblock: parent_microblock, - parent_microblock_sequence: parent_microblock_sequence, + parent_microblock, + parent_microblock_sequence, tx_merkle_root: tx_merkle_root.clone(), state_index_root: state_index_root.clone(), microblock_pubkey_hash: microblock_pubkey_hash.clone(), @@ -313,7 +313,7 @@ impl StacksMessageCodec for StacksBlock { }?; // there must be at least one transaction (the coinbase) - if txs.len() == 0 { + if txs.is_empty() { warn!("Invalid block: Zero-transaction block"); return Err(codec_error::DeserializeError( "Invalid block: zero transactions".to_string(), @@ -338,7 +338,7 @@ impl StacksMessageCodec for StacksBlock { } // header and transactions must be consistent - let txid_vecs = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); + let txid_vecs: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); let merkle_tree = MerkleTree::::new(&txid_vecs); let tx_merkle_root = merkle_tree.root(); @@ -388,7 +388,7 @@ impl StacksBlock { state_index_root: &TrieHash, microblock_pubkey_hash: &Hash160, ) -> StacksBlock { - let txids = txs + let txids: Vec<_> = txs .iter() .map(|ref tx| tx.txid().as_bytes().to_vec()) .collect(); @@ -429,7 +429,7 @@ impl StacksBlock { /// Find and return the coinbase transaction. It's always the first transaction. /// If there are 0 coinbase txs, or more than 1, then return None pub fn get_coinbase_tx(&self) -> Option { - if self.txs.len() == 0 { + if self.txs.is_empty() { return None; } match self.txs[0].payload { @@ -444,14 +444,14 @@ impl StacksBlock { let mut txids = HashMap::new(); for (i, tx) in txs.iter().enumerate() { let txid = tx.txid(); - if txids.get(&txid).is_some() { + if txids.contains_key(&txid) { warn!( "Duplicate tx {}: at index {} and {}", txid, txids.get(&txid).unwrap(), i ); - test_debug!("{:?}", &tx); + test_debug!("{tx:?}"); return false; } txids.insert(txid, i); @@ -831,7 +831,7 @@ impl StacksMessageCodec for StacksMicroblock { read_next(&mut bound_read) }?; - if txs.len() == 0 { + if txs.is_empty() { warn!("Invalid microblock: zero transactions"); return Err(codec_error::DeserializeError( "Invalid microblock: zero transactions".to_string(), @@ -853,7 +853,7 @@ impl StacksMessageCodec for StacksMicroblock { } // header and transactions must be consistent - let txid_vecs = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); + let txid_vecs: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); let merkle_tree = MerkleTree::::new(&txid_vecs); let tx_merkle_root = merkle_tree.root(); @@ -880,24 +880,21 @@ impl StacksMicroblock { parent_block_hash: &BlockHeaderHash, txs: Vec, ) -> StacksMicroblock { - let txids = txs + let txids: Vec<_> = txs .iter() .map(|ref tx| tx.txid().as_bytes().to_vec()) .collect(); let merkle_tree = MerkleTree::::new(&txids); let tx_merkle_root = merkle_tree.root(); let header = StacksMicroblockHeader::first_unsigned(parent_block_hash, &tx_merkle_root); - StacksMicroblock { - header: header, - txs: txs, - } + StacksMicroblock { header, txs } } pub fn from_parent_unsigned( parent_header: &StacksMicroblockHeader, txs: Vec, ) -> Option { - let txids = txs + let txids: Vec<_> = txs .iter() .map(|ref tx| tx.txid().as_bytes().to_vec()) .collect(); @@ -911,10 +908,7 @@ impl StacksMicroblock { } }; - Some(StacksMicroblock { - header: header, - txs: txs, - }) + Some(StacksMicroblock { header, txs }) } pub fn sign(&mut self, privk: &StacksPrivateKey) -> Result<(), net_error> { @@ -1005,7 +999,7 @@ mod test { burn: 123, work: 456, }, - proof: proof, + proof, parent_block: FIRST_STACKS_BLOCK_HASH.clone(), parent_microblock: BlockHeaderHash([1u8; 32]), parent_microblock_sequence: 3, @@ -1173,7 +1167,7 @@ mod test { all_txs[3 * i + 2].clone(), ]; - let txid_vecs = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); + let txid_vecs: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); let merkle_tree = MerkleTree::::new(&txid_vecs); let tx_merkle_root = merkle_tree.root(); @@ -1183,7 +1177,7 @@ mod test { version: 0x12, sequence: 0x34, prev_block: EMPTY_MICROBLOCK_PARENT_HASH.clone(), - tx_merkle_root: tx_merkle_root, + tx_merkle_root, signature: MessageSignature([ 0x00, 0x35, 0x44, 0x45, 0xa1, 0xdc, 0x98, 0xa1, 0xbd, 0x27, 0x98, 0x4d, 0xbe, 0x69, 0x97, 0x9a, 0x5c, 0xd7, 0x78, 0x86, 0xb4, 0xd9, 0x13, 0x4a, 0xf5, 0xc4, @@ -1216,10 +1210,7 @@ mod test { txs.consensus_serialize(&mut tx_bytes).unwrap(); block_bytes.append(&mut tx_bytes); - let mblock = StacksMicroblock { - header: header, - txs: txs, - }; + let mblock = StacksMicroblock { header, txs }; check_codec_and_corruption::(&mblock, &block_bytes); } @@ -1305,7 +1296,7 @@ mod test { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -1501,7 +1492,7 @@ mod test { let txs_dup = vec![tx_coinbase.clone(), tx_dup.clone(), tx_dup.clone()]; let get_tx_root = |txs: &Vec| { - let txid_vecs = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); + let txid_vecs: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); let merkle_tree = MerkleTree::::new(&txid_vecs); let tx_merkle_root = merkle_tree.root(); @@ -1628,7 +1619,7 @@ mod test { let txs_dup = vec![tx_dup.clone(), tx_dup.clone()]; let get_tx_root = |txs: &Vec| { - let txid_vecs = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); + let txid_vecs: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); let merkle_tree = MerkleTree::::new(&txid_vecs); let tx_merkle_root = merkle_tree.root(); @@ -1718,7 +1709,7 @@ mod test { StacksEpochId::Epoch30, ]; let get_tx_root = |txs: &Vec| { - let txid_vecs = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); + let txid_vecs: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); let merkle_tree = MerkleTree::::new(&txid_vecs); let tx_merkle_root = merkle_tree.root(); diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 4d4e875ba3..58701a2861 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -354,7 +354,7 @@ fn cost_2_contract_is_arithmetic_only() { impl BurnStateDB for TestSimBurnStateDB { fn get_tip_burn_block_height(&self) -> Option { - Some(self.height as u32) + Some(self.height) } fn get_tip_sortition_id(&self) -> Option { @@ -587,7 +587,7 @@ impl HeadersDB for TestSimHeadersDB { let burn_block_height = self.get_burn_block_height_for_block(id_bhh)? as u64; Some( BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP as u64 + burn_block_height - - BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT as u64, + - BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT, ) } } @@ -607,7 +607,7 @@ impl HeadersDB for TestSimHeadersDB { None } else { Some( - (BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT as u32 + input_height as u32) + (BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT + input_height) .try_into() .unwrap(), ) @@ -3047,7 +3047,7 @@ fn test_vote_too_many_confirms() { .0, Value::Response(ResponseData { committed: true, - data: Value::UInt(i as u128).into() + data: Value::UInt(i).into() }) ); } @@ -3061,10 +3061,7 @@ fn test_vote_too_many_confirms() { None, COST_VOTING_CONTRACT_TESTNET.clone(), "vote-proposal", - &symbols_from_values(vec![ - Value::UInt(i as u128), - Value::UInt(USTX_PER_HOLDER) - ]), + &symbols_from_values(vec![Value::UInt(i), Value::UInt(USTX_PER_HOLDER)]), ) .unwrap() .0, @@ -3079,7 +3076,7 @@ fn test_vote_too_many_confirms() { None, COST_VOTING_CONTRACT_TESTNET.clone(), "confirm-votes", - &symbols_from_values(vec![Value::UInt(i as u128)]) + &symbols_from_values(vec![Value::UInt(i)]) ) .unwrap() .0, @@ -3093,10 +3090,7 @@ fn test_vote_too_many_confirms() { None, COST_VOTING_CONTRACT_TESTNET.clone(), "withdraw-votes", - &symbols_from_values(vec![ - Value::UInt(i as u128), - Value::UInt(USTX_PER_HOLDER), - ]), + &symbols_from_values(vec![Value::UInt(i), Value::UInt(USTX_PER_HOLDER)]), ) .unwrap() .0; @@ -3122,7 +3116,7 @@ fn test_vote_too_many_confirms() { None, COST_VOTING_CONTRACT_TESTNET.clone(), "confirm-miners", - &symbols_from_values(vec![Value::UInt(i as u128)]) + &symbols_from_values(vec![Value::UInt(i)]) ) .unwrap() .0, diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index f6f167d75b..86263904f5 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -61,48 +61,48 @@ use crate::core::{ use crate::util_lib::boot; use crate::util_lib::strings::VecDisplay; -const BOOT_CODE_POX_BODY: &'static str = std::include_str!("pox.clar"); -const BOOT_CODE_POX_TESTNET_CONSTS: &'static str = std::include_str!("pox-testnet.clar"); -const BOOT_CODE_POX_MAINNET_CONSTS: &'static str = std::include_str!("pox-mainnet.clar"); -pub const BOOT_CODE_LOCKUP: &'static str = std::include_str!("lockup.clar"); -pub const BOOT_CODE_COSTS: &'static str = std::include_str!("costs.clar"); -pub const BOOT_CODE_COSTS_2: &'static str = std::include_str!("costs-2.clar"); -pub const BOOT_CODE_COSTS_3: &'static str = std::include_str!("costs-3.clar"); -pub const BOOT_CODE_COSTS_2_TESTNET: &'static str = std::include_str!("costs-2-testnet.clar"); -pub const BOOT_CODE_COST_VOTING_MAINNET: &'static str = std::include_str!("cost-voting.clar"); -pub const BOOT_CODE_BNS: &'static str = std::include_str!("bns.clar"); -pub const BOOT_CODE_GENESIS: &'static str = std::include_str!("genesis.clar"); -pub const POX_1_NAME: &'static str = "pox"; -pub const POX_2_NAME: &'static str = "pox-2"; -pub const POX_3_NAME: &'static str = "pox-3"; -pub const POX_4_NAME: &'static str = "pox-4"; -pub const SIGNERS_NAME: &'static str = "signers"; -pub const SIGNERS_VOTING_NAME: &'static str = "signers-voting"; +const BOOT_CODE_POX_BODY: &str = std::include_str!("pox.clar"); +const BOOT_CODE_POX_TESTNET_CONSTS: &str = std::include_str!("pox-testnet.clar"); +const BOOT_CODE_POX_MAINNET_CONSTS: &str = std::include_str!("pox-mainnet.clar"); +pub const BOOT_CODE_LOCKUP: &str = std::include_str!("lockup.clar"); +pub const BOOT_CODE_COSTS: &str = std::include_str!("costs.clar"); +pub const BOOT_CODE_COSTS_2: &str = std::include_str!("costs-2.clar"); +pub const BOOT_CODE_COSTS_3: &str = std::include_str!("costs-3.clar"); +pub const BOOT_CODE_COSTS_2_TESTNET: &str = std::include_str!("costs-2-testnet.clar"); +pub const BOOT_CODE_COST_VOTING_MAINNET: &str = std::include_str!("cost-voting.clar"); +pub const BOOT_CODE_BNS: &str = std::include_str!("bns.clar"); +pub const BOOT_CODE_GENESIS: &str = std::include_str!("genesis.clar"); +pub const POX_1_NAME: &str = "pox"; +pub const POX_2_NAME: &str = "pox-2"; +pub const POX_3_NAME: &str = "pox-3"; +pub const POX_4_NAME: &str = "pox-4"; +pub const SIGNERS_NAME: &str = "signers"; +pub const SIGNERS_VOTING_NAME: &str = "signers-voting"; pub const SIGNERS_VOTING_FUNCTION_NAME: &str = "vote-for-aggregate-public-key"; /// This is the name of a variable in the `.signers` contract which tracks the most recently updated /// reward cycle number. -pub const SIGNERS_UPDATE_STATE: &'static str = "last-set-cycle"; +pub const SIGNERS_UPDATE_STATE: &str = "last-set-cycle"; pub const SIGNERS_MAX_LIST_SIZE: usize = 4000; pub const SIGNERS_PK_LEN: usize = 33; -const POX_2_BODY: &'static str = std::include_str!("pox-2.clar"); -const POX_3_BODY: &'static str = std::include_str!("pox-3.clar"); -const POX_4_BODY: &'static str = std::include_str!("pox-4.clar"); -pub const SIGNERS_BODY: &'static str = std::include_str!("signers.clar"); -pub const SIGNERS_DB_0_BODY: &'static str = std::include_str!("signers-0-xxx.clar"); -pub const SIGNERS_DB_1_BODY: &'static str = std::include_str!("signers-1-xxx.clar"); -pub const SIGNERS_VOTING_BODY: &'static str = std::include_str!("signers-voting.clar"); - -pub const COSTS_1_NAME: &'static str = "costs"; -pub const COSTS_2_NAME: &'static str = "costs-2"; -pub const COSTS_3_NAME: &'static str = "costs-3"; +const POX_2_BODY: &str = std::include_str!("pox-2.clar"); +const POX_3_BODY: &str = std::include_str!("pox-3.clar"); +const POX_4_BODY: &str = std::include_str!("pox-4.clar"); +pub const SIGNERS_BODY: &str = std::include_str!("signers.clar"); +pub const SIGNERS_DB_0_BODY: &str = std::include_str!("signers-0-xxx.clar"); +pub const SIGNERS_DB_1_BODY: &str = std::include_str!("signers-1-xxx.clar"); +pub const SIGNERS_VOTING_BODY: &str = std::include_str!("signers-voting.clar"); + +pub const COSTS_1_NAME: &str = "costs"; +pub const COSTS_2_NAME: &str = "costs-2"; +pub const COSTS_3_NAME: &str = "costs-3"; /// This contract name is used in testnet **only** to lookup an initial /// setting for the pox-4 aggregate key. This contract should contain a `define-read-only` /// function called `aggregate-key` with zero arguments which returns a (buff 33) -pub const BOOT_TEST_POX_4_AGG_KEY_CONTRACT: &'static str = "pox-4-agg-test-booter"; -pub const BOOT_TEST_POX_4_AGG_KEY_FNAME: &'static str = "aggregate-key"; +pub const BOOT_TEST_POX_4_AGG_KEY_CONTRACT: &str = "pox-4-agg-test-booter"; +pub const BOOT_TEST_POX_4_AGG_KEY_FNAME: &str = "aggregate-key"; -pub const MINERS_NAME: &'static str = "miners"; +pub const MINERS_NAME: &str = "miners"; pub mod docs; @@ -237,7 +237,7 @@ pub struct RewardSetData { pub reward_set: RewardSet, pub cycle_number: u64, } -const POX_CYCLE_START_HANDLED_VALUE: &'static str = "1"; +const POX_CYCLE_START_HANDLED_VALUE: &str = "1"; impl PoxStartCycleInfo { pub fn serialize(&self) -> String { @@ -2780,9 +2780,9 @@ pub mod test { ) -> Result, Error> { state .get_reward_addresses(burnchain, sortdb, burn_block_height, block_id) - .and_then(|mut addrs| { + .map(|mut addrs| { addrs.sort_by_key(|k| k.reward_address.bytes()); - Ok(addrs) + addrs }) } diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 64782c67d6..47b57cdd2c 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -294,7 +294,7 @@ pub fn check_pox_print_event( } // assert_eq!(inner_tuple.data_map.get(inner_key), Some(&inner_val)); } - if missing.len() > 0 || wrong.len() > 0 { + if !missing.is_empty() || !wrong.is_empty() { eprintln!("missing:\n{:#?}", &missing); eprintln!("wrong:\n{:#?}", &wrong); assert!(false); @@ -382,7 +382,7 @@ pub fn check_stacking_state_invariants( let mut cycle_indexes = HashMap::new(); - if reward_indexes.len() > 0 || expect_indexes { + if !reward_indexes.is_empty() || expect_indexes { assert_eq!( reward_indexes.len() as u128, lock_period, @@ -3631,7 +3631,7 @@ fn test_pox_2_getters() { .expect_optional() .unwrap(); assert_eq!(bob_delegation_addr, charlie_address.to_account_principal()); - assert_eq!(bob_delegation_amt, LOCKUP_AMT as u128); + assert_eq!(bob_delegation_amt, LOCKUP_AMT); assert!(bob_pox_addr_opt.is_none()); let allowance = data @@ -3679,7 +3679,7 @@ fn test_pox_2_getters() { .unwrap() .expect_u128() .unwrap(); - assert_eq!(partial_stacked, LOCKUP_AMT as u128); + assert_eq!(partial_stacked, LOCKUP_AMT); let rejected = data .get("get-total-pox-rejection-now") @@ -3695,7 +3695,7 @@ fn test_pox_2_getters() { .unwrap() .expect_u128() .unwrap(); - assert_eq!(rejected, LOCKUP_AMT as u128); + assert_eq!(rejected, LOCKUP_AMT); let rejected = data .get("get-total-pox-rejection-future") @@ -3848,10 +3848,9 @@ fn test_get_pox_addrs() { if tenure_id <= 1 { // record the first reward cycle when tokens get stacked - lockup_reward_cycle = 1 - + (burnchain - .block_height_to_reward_cycle(tip_burn_block_height) - .unwrap()) as u64; + lockup_reward_cycle = 1 + burnchain + .block_height_to_reward_cycle(tip_burn_block_height) + .unwrap(); eprintln!( "\nlockup reward cycle: {}\ncur reward cycle: {}\n", lockup_reward_cycle, cur_reward_cycle @@ -4145,10 +4144,9 @@ fn test_stack_with_segwit() { if tenure_id <= 1 { // record the first reward cycle when tokens get stacked - lockup_reward_cycle = 1 - + (burnchain - .block_height_to_reward_cycle(tip_burn_block_height) - .unwrap()) as u64; + lockup_reward_cycle = 1 + burnchain + .block_height_to_reward_cycle(tip_burn_block_height) + .unwrap(); eprintln!( "\nlockup reward cycle: {}\ncur reward cycle: {}\n", lockup_reward_cycle, cur_reward_cycle @@ -4460,7 +4458,7 @@ fn test_pox_2_delegate_stx_addr_validation() { alice_delegation_addr, charlie_address.to_account_principal() ); - assert_eq!(alice_delegation_amt, LOCKUP_AMT as u128); + assert_eq!(alice_delegation_amt, LOCKUP_AMT); assert!(alice_pox_addr_opt.is_some()); let alice_pox_addr = alice_pox_addr_opt.unwrap(); diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index 8a173c6adc..5c52297969 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -3278,7 +3278,7 @@ fn pox_3_getters() { .expect_optional() .unwrap(); assert_eq!(bob_delegation_addr, charlie_address.to_account_principal()); - assert_eq!(bob_delegation_amt, LOCKUP_AMT as u128); + assert_eq!(bob_delegation_amt, LOCKUP_AMT); assert!(bob_pox_addr_opt.is_none()); let allowance = data @@ -3326,7 +3326,7 @@ fn pox_3_getters() { .unwrap() .expect_u128() .unwrap(); - assert_eq!(partial_stacked, LOCKUP_AMT as u128); + assert_eq!(partial_stacked, LOCKUP_AMT); let rejected = data .get("get-total-pox-rejection-now") @@ -3334,7 +3334,7 @@ fn pox_3_getters() { .unwrap() .expect_u128() .unwrap(); - assert_eq!(rejected, LOCKUP_AMT as u128); + assert_eq!(rejected, LOCKUP_AMT); let rejected = data .get("get-total-pox-rejection-next") @@ -4430,7 +4430,7 @@ fn pox_3_delegate_stx_addr_validation() { alice_delegation_addr, charlie_address.to_account_principal() ); - assert_eq!(alice_delegation_amt, LOCKUP_AMT as u128); + assert_eq!(alice_delegation_amt, LOCKUP_AMT); assert!(alice_pox_addr_opt.is_some()); let alice_pox_addr = alice_pox_addr_opt.unwrap(); diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 14dc9e75ab..072f1d33ef 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -6378,7 +6378,7 @@ fn stack_increase(use_nakamoto: bool) { &pox_addr, lock_period, &signing_pk, - block_height as u64, + block_height, Some(signature), u128::MAX, 1, @@ -8930,7 +8930,7 @@ pub fn tenure_with_txs( test_signers, |_miner, _chainstate, _sort_dbconn, _blocks| { info!("Building nakamoto block. Blocks len {}", _blocks.len()); - if _blocks.len() == 0 { + if _blocks.is_empty() { txs.to_vec() } else { vec![] diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index bf84cc1362..0ad5687f12 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -123,7 +123,7 @@ impl FromRow for MinerPaymentSchedule { } impl FromRow for MinerReward { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let address = StacksAddress::from_column(row, "address")?; let recipient_str: Option = row.get_unwrap("recipient"); let recipient = recipient_str @@ -503,8 +503,8 @@ impl StacksChainState { } /// Store a matured miner reward for subsequent query in Clarity, without doing any validation - fn inner_insert_matured_miner_reward<'a>( - tx: &mut DBTx<'a>, + fn inner_insert_matured_miner_reward( + tx: &mut DBTx<'_>, parent_block_id: &StacksBlockId, child_block_id: &StacksBlockId, reward: &MinerReward, @@ -516,7 +516,7 @@ impl StacksChainState { &(*parent_block_id).into(), &(*child_block_id).into(), )?; - if cur_rewards.len() > 0 { + if !cur_rewards.is_empty() { let mut present = false; for rw in cur_rewards.iter() { if (rw.is_parent() && reward.is_parent()) || (rw.is_child() && reward.is_child()) { @@ -564,8 +564,8 @@ impl StacksChainState { /// Store a parent block's matured reward. This is the share of the streamed tx fees produced /// by the miner who mined this block, and nothing else. - pub fn insert_matured_parent_miner_reward<'a>( - tx: &mut DBTx<'a>, + pub fn insert_matured_parent_miner_reward( + tx: &mut DBTx<'_>, parent_block_id: &StacksBlockId, child_block_id: &StacksBlockId, parent_reward: &MinerReward, @@ -594,8 +594,8 @@ impl StacksChainState { /// Store a child block's matured miner reward. This is the block's coinbase, anchored tx fees, and /// share of the confirmed streamed tx fees - pub fn insert_matured_child_miner_reward<'a>( - tx: &mut DBTx<'a>, + pub fn insert_matured_child_miner_reward( + tx: &mut DBTx<'_>, parent_block_id: &StacksBlockId, child_block_id: &StacksBlockId, child_reward: &MinerReward, @@ -625,8 +625,8 @@ impl StacksChainState { /// Store a child block's matured user burn-support reward. This is the share of the /// block's coinbase, anchored tx fees, and share of the confirmed streamed tx fees that go to /// the user burn-support sender - pub fn insert_matured_child_user_reward<'a>( - tx: &mut DBTx<'a>, + pub fn insert_matured_child_user_reward( + tx: &mut DBTx<'_>, parent_block_id: &StacksBlockId, child_block_id: &StacksBlockId, child_reward: &MinerReward, @@ -724,8 +724,8 @@ impl StacksChainState { } /// Get the scheduled miner rewards in a particular Stacks fork at a particular height. - pub fn get_scheduled_block_rewards_in_fork_at_height<'a>( - tx: &mut StacksDBTx<'a>, + pub fn get_scheduled_block_rewards_in_fork_at_height( + tx: &mut StacksDBTx<'_>, tip: &StacksHeaderInfo, block_height: u64, ) -> Result, Error> { @@ -868,9 +868,9 @@ impl StacksChainState { // of all participants' burns. let coinbase_reward = participant .coinbase - .checked_mul(this_burn_total as u128) + .checked_mul(this_burn_total) .expect("FATAL: STX coinbase reward overflow") - / (burn_total as u128); + / burn_total; // process poison -- someone can steal a fraction of the total coinbase if they can present // evidence that the miner forked the microblock stream. The remainder of the coinbase is @@ -976,9 +976,9 @@ impl StacksChainState { address: child_address, recipient: child_recipient, coinbase: coinbase_reward, - tx_fees_anchored: tx_fees_anchored, + tx_fees_anchored, tx_fees_streamed_produced: 0, - tx_fees_streamed_confirmed: tx_fees_streamed_confirmed, + tx_fees_streamed_confirmed, vtxindex: participant.vtxindex, }; @@ -1003,7 +1003,7 @@ impl StacksChainState { let reward_height = tip_stacks_height - MINER_REWARD_MATURITY; - assert!(latest_matured_miners.len() > 0); + assert!(!latest_matured_miners.is_empty()); assert!(latest_matured_miners[0].vtxindex == 0); assert!(latest_matured_miners[0].miner); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 5f6b236973..d530b8af34 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -344,7 +344,7 @@ impl StagingBlock { } impl FromRow for StagingMicroblock { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let anchored_block_hash: BlockHeaderHash = BlockHeaderHash::from_column(row, "anchored_block_hash")?; let consensus_hash: ConsensusHash = ConsensusHash::from_column(row, "consensus_hash")?; @@ -373,7 +373,7 @@ impl FromRow for StagingMicroblock { } impl FromRow for StagingBlock { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let anchored_block_hash: BlockHeaderHash = BlockHeaderHash::from_column(row, "anchored_block_hash")?; let parent_anchored_block_hash: BlockHeaderHash = @@ -678,7 +678,7 @@ impl StacksChainState { blocks_dir: &str, consensus_hash: &ConsensusHash, block_header_hash: &BlockHeaderHash, - ) -> () { + ) { let block_path = StacksChainState::make_block_dir(blocks_dir, consensus_hash, &block_header_hash) .expect("FATAL: failed to create block directory"); @@ -737,7 +737,7 @@ impl StacksChainState { blocks_path: &str, consensus_hash: &ConsensusHash, block_header: &StacksBlockHeader, - ) -> () { + ) { StacksChainState::free_block(blocks_path, consensus_hash, &block_header.block_hash()) } @@ -746,11 +746,11 @@ impl StacksChainState { blocks_conn: &DBConn, ) -> Result, Error> { let list_block_sql = "SELECT * FROM staging_blocks ORDER BY height".to_string(); - let mut blocks = query_rows::(blocks_conn, &list_block_sql, NO_PARAMS) + let blocks = query_rows::(blocks_conn, &list_block_sql, NO_PARAMS) .map_err(Error::DBError)?; Ok(blocks - .drain(..) + .into_iter() .map(|b| (b.consensus_hash, b.anchored_block_hash)) .collect()) } @@ -767,20 +767,23 @@ impl StacksChainState { blocks_conn: &DBConn, blocks_dir: &str, ) -> Result)>, Error> { - let mut blocks = StacksChainState::list_blocks(blocks_conn)?; + let blocks = StacksChainState::list_blocks(blocks_conn)?; let mut ret = vec![]; - for (consensus_hash, block_hash) in blocks.drain(..) { + for (consensus_hash, block_hash) in blocks.into_iter() { let list_microblock_sql = "SELECT * FROM staging_microblocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 ORDER BY sequence".to_string(); let list_microblock_args = params![block_hash, consensus_hash]; - let mut microblocks = query_rows::( + let microblocks = query_rows::( blocks_conn, &list_microblock_sql, list_microblock_args, ) .map_err(Error::DBError)?; - let microblock_hashes = microblocks.drain(..).map(|mb| mb.microblock_hash).collect(); + let microblock_hashes = microblocks + .into_iter() + .map(|mb| mb.microblock_hash) + .collect(); ret.push((consensus_hash, block_hash, microblock_hashes)); } @@ -936,7 +939,7 @@ impl StacksChainState { 0 => Ok(None), 1 => { let blob = blobs.pop().unwrap(); - if blob.len() == 0 { + if blob.is_empty() { // cleared Ok(None) } else { @@ -1041,7 +1044,7 @@ impl StacksChainState { block_hash, )? { Some(staging_block) => { - if staging_block.block_data.len() == 0 { + if staging_block.block_data.is_empty() { return Ok(None); } @@ -1255,7 +1258,7 @@ impl StacksChainState { } ret.reverse(); - if ret.len() > 0 { + if !ret.is_empty() { // should start with 0 if ret[0].header.sequence != 0 { warn!("Invalid microblock stream from {}/{} to {}: sequence does not start with 0, but with {}", @@ -1338,7 +1341,7 @@ impl StacksChainState { let staging_microblocks = query_rows::(blocks_conn, &sql, args).map_err(Error::DBError)?; - if staging_microblocks.len() == 0 { + if staging_microblocks.is_empty() { // haven't seen any microblocks that descend from this block yet test_debug!( "No microblocks built on {} up to {}", @@ -1433,7 +1436,7 @@ impl StacksChainState { ret.push(mblock); } - if fork_poison.is_none() && ret.len() == 0 { + if fork_poison.is_none() && ret.is_empty() { // just as if there were no blocks loaded Ok(None) } else { @@ -1456,9 +1459,9 @@ impl StacksChainState { seq, seq, ) - .and_then(|list_opt| match list_opt { - Some(mut list) => Ok(list.pop()), - None => Ok(None), + .map(|list_opt| match list_opt { + Some(mut list) => list.pop(), + None => None, }) } @@ -1540,8 +1543,8 @@ impl StacksChainState { /// Store a preprocessed block, queuing it up for subsequent processing. /// The caller should at least verify that the block is attached to some fork in the burn /// chain. - fn store_staging_block<'a>( - tx: &mut DBTx<'a>, + fn store_staging_block( + tx: &mut DBTx<'_>, blocks_path: &str, consensus_hash: &ConsensusHash, block: &StacksBlock, @@ -1583,8 +1586,8 @@ impl StacksChainState { ) .map_err(Error::DBError)?; let parent_not_in_staging_blocks = - has_parent_rows.len() == 0 && block.header.parent_block != FIRST_STACKS_BLOCK_HASH; - if has_unprocessed_parent_rows.len() > 0 || parent_not_in_staging_blocks { + has_parent_rows.is_empty() && block.header.parent_block != FIRST_STACKS_BLOCK_HASH; + if !has_unprocessed_parent_rows.is_empty() || parent_not_in_staging_blocks { // still have unprocessed parent OR its parent is not in staging_blocks at all -- this block is not attachable debug!( "Store non-attachable anchored block {}/{}", @@ -1662,8 +1665,8 @@ impl StacksChainState { /// order, this method does not check that. /// The consensus_hash and anchored_block_hash correspond to the _parent_ Stacks block. /// Microblocks ought to only be stored if they are first confirmed to have been signed. - pub fn store_staging_microblock<'a>( - tx: &mut DBTx<'a>, + pub fn store_staging_microblock( + tx: &mut DBTx<'_>, parent_consensus_hash: &ConsensusHash, parent_anchored_block_hash: &BlockHeaderHash, microblock: &StacksMicroblock, @@ -1750,7 +1753,7 @@ impl StacksChainState { ) -> Result, Error> { StacksChainState::read_i64s(blocks_conn, "SELECT processed FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2", &[block_hash, consensus_hash]) .and_then(|processed| { - if processed.len() == 0 { + if processed.is_empty() { Ok(None) } else if processed.len() == 1 { @@ -1783,7 +1786,7 @@ impl StacksChainState { ) -> Result { StacksChainState::read_i64s(blocks_conn, "SELECT orphaned FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2", &[block_hash, consensus_hash]) .and_then(|orphaned| { - if orphaned.len() == 0 { + if orphaned.is_empty() { Ok(false) } else if orphaned.len() == 1 { @@ -1807,7 +1810,7 @@ impl StacksChainState { ) -> Result, Error> { StacksChainState::read_i64s(&self.db(), "SELECT processed FROM staging_microblocks WHERE anchored_block_hash = ?1 AND microblock_hash = ?2 AND consensus_hash = ?3", &[&parent_block_hash, microblock_hash, &parent_consensus_hash]) .and_then(|processed| { - if processed.len() == 0 { + if processed.is_empty() { Ok(None) } else if processed.len() == 1 { @@ -1881,7 +1884,7 @@ impl StacksChainState { FROM staging_blocks JOIN staging_microblocks ON staging_blocks.parent_anchored_block_hash = staging_microblocks.anchored_block_hash AND staging_blocks.parent_consensus_hash = staging_microblocks.consensus_hash WHERE staging_blocks.index_block_hash = ?1 AND staging_microblocks.microblock_hash = ?2 AND staging_microblocks.orphaned = 0", &[child_index_block_hash, &parent_microblock_hash]) .and_then(|processed| { - if processed.len() == 0 { + if processed.is_empty() { Ok(false) } else if processed.len() == 1 { @@ -2007,8 +2010,8 @@ impl StacksChainState { Ok(BlocksInvData { bitlen: u16::try_from(block_bits.len()) .expect("FATAL: unreachable: more than 2^16 block bits"), - block_bitvec: block_bitvec, - microblocks_bitvec: microblocks_bitvec, + block_bitvec, + microblocks_bitvec, }) } @@ -2130,8 +2133,8 @@ impl StacksChainState { Ok(BlocksInvData { bitlen: u16::try_from(block_bits.len()) .expect("FATAL: block bits has more than 2^16 members"), - block_bitvec: block_bitvec, - microblocks_bitvec: microblocks_bitvec, + block_bitvec, + microblocks_bitvec, }) } @@ -2162,13 +2165,12 @@ impl StacksChainState { /// Used to see if we have the block data for an unaffirmed PoX anchor block /// (hence the test_debug! macros referring to PoX anchor blocks) fn has_stacks_block_for(chainstate_conn: &DBConn, block_commit: LeaderBlockCommitOp) -> bool { - StacksChainState::get_known_consensus_hashes_for_block( + !StacksChainState::get_known_consensus_hashes_for_block( chainstate_conn, &block_commit.block_header_hash, ) .expect("FATAL: failed to query staging blocks DB") - .len() - > 0 + .is_empty() } /// Find the canonical affirmation map. Handle unaffirmed anchor blocks by simply seeing if we @@ -2292,8 +2294,8 @@ impl StacksChainState { /// Mark an anchored block as orphaned and both orphan and delete its descendant microblock data. /// The blocks database will eventually delete all orphaned data. - fn delete_orphaned_epoch_data<'a>( - tx: &mut DBTx<'a>, + fn delete_orphaned_epoch_data( + tx: &mut DBTx<'_>, blocks_path: &str, consensus_hash: &ConsensusHash, anchored_block_hash: &BlockHeaderHash, @@ -2354,8 +2356,8 @@ impl StacksChainState { /// fork but processable on another (i.e. the same block can show up in two different PoX /// forks, but will only be valid in at most one of them). /// This does not restore any block data; it merely makes it possible to go re-process them. - pub fn forget_orphaned_epoch_data<'a>( - tx: &mut DBTx<'a>, + pub fn forget_orphaned_epoch_data( + tx: &mut DBTx<'_>, consensus_hash: &ConsensusHash, anchored_block_hash: &BlockHeaderHash, ) -> Result<(), Error> { @@ -2381,9 +2383,9 @@ impl StacksChainState { /// Mark its children as attachable. /// Idempotent. /// sort_tx_opt is required if accept is true - fn set_block_processed<'a, 'b>( - tx: &mut DBTx<'a>, - mut sort_tx_opt: Option<&mut SortitionHandleTx<'b>>, + fn set_block_processed( + tx: &mut DBTx<'_>, + mut sort_tx_opt: Option<&mut SortitionHandleTx<'_>>, blocks_path: &str, consensus_hash: &ConsensusHash, anchored_block_hash: &BlockHeaderHash, @@ -2503,8 +2505,8 @@ impl StacksChainState { } #[cfg(test)] - fn set_block_orphaned<'a>( - tx: &mut DBTx<'a>, + fn set_block_orphaned( + tx: &mut DBTx<'_>, blocks_path: &str, consensus_hash: &ConsensusHash, anchored_block_hash: &BlockHeaderHash, @@ -2568,8 +2570,8 @@ impl StacksChainState { /// Drop a trail of staging microblocks. Mark them as orphaned and delete their data. /// Also, orphan any anchored children blocks that build off of the now-orphaned microblocks. - fn drop_staging_microblocks<'a>( - tx: &mut DBTx<'a>, + fn drop_staging_microblocks( + tx: &mut DBTx<'_>, consensus_hash: &ConsensusHash, anchored_block_hash: &BlockHeaderHash, invalid_block_hash: &BlockHeaderHash, @@ -2635,8 +2637,8 @@ impl StacksChainState { /// Mark a range of a stream of microblocks as confirmed. /// All the corresponding blocks must have been validated and proven contiguous. - fn set_microblocks_processed<'a>( - tx: &mut DBTx<'a>, + fn set_microblocks_processed( + tx: &mut DBTx<'_>, child_consensus_hash: &ConsensusHash, child_anchored_block_hash: &BlockHeaderHash, last_microblock_hash: &BlockHeaderHash, @@ -2714,7 +2716,7 @@ impl StacksChainState { StacksBlockHeader::make_index_block_hash(&parent_consensus_hash, &parent_block_hash); StacksChainState::read_i64s(&self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence = ?2", &[&parent_index_block_hash, &seq]) .and_then(|processed| { - if processed.len() == 0 { + if processed.is_empty() { Ok(false) } else if processed.len() == 1 { @@ -2785,7 +2787,7 @@ impl StacksChainState { min_seq: u16, ) -> Result { StacksChainState::read_i64s(&self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence >= ?2 LIMIT 1", &[&parent_index_block_hash, &min_seq]) - .and_then(|processed| Ok(processed.len() > 0)) + .map(|processed| !processed.is_empty()) } /// Do we have a given microblock as a descendant of a given anchored block? @@ -2798,7 +2800,7 @@ impl StacksChainState { microblock_hash: &BlockHeaderHash, ) -> Result { StacksChainState::read_i64s(&self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 AND microblock_hash = ?2 LIMIT 1", &[parent_index_block_hash, microblock_hash]) - .and_then(|processed| Ok(processed.len() > 0)) + .map(|processed| !processed.is_empty()) } /// Do we have any microblock available to serve in any capacity, given its parent anchored block's @@ -2813,7 +2815,7 @@ impl StacksChainState { "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 LIMIT 1", &[&parent_index_block_hash], ) - .and_then(|processed| Ok(processed.len() > 0)) + .map(|processed| !processed.is_empty()) } /// Given an index block hash, get the consensus hash and block hash @@ -2917,7 +2919,7 @@ impl StacksChainState { let extended_header = ExtendedStacksHeader { consensus_hash: header_info.consensus_hash, - header: header, + header, parent_block_id: parent_index_block_hash, }; Ok(extended_header) @@ -3014,7 +3016,7 @@ impl StacksChainState { microblocks.to_owned() }; - if signed_microblocks.len() == 0 { + if signed_microblocks.is_empty() { if anchored_block_header.parent_microblock == EMPTY_MICROBLOCK_PARENT_HASH && anchored_block_header.parent_microblock_sequence == 0 { @@ -3739,8 +3741,8 @@ impl StacksChainState { /// Call this method repeatedly to remove long chains of orphaned blocks and microblocks from /// staging. /// Returns true if an orphan block was processed - fn process_next_orphaned_staging_block<'a>( - blocks_tx: &mut DBTx<'a>, + fn process_next_orphaned_staging_block( + blocks_tx: &mut DBTx<'_>, blocks_path: &str, ) -> Result { test_debug!("Find next orphaned block"); @@ -3750,7 +3752,7 @@ impl StacksChainState { let sql = "SELECT * FROM staging_blocks WHERE processed = 0 AND orphaned = 1 ORDER BY RANDOM() LIMIT 1"; let mut rows = query_rows::(blocks_tx, sql, NO_PARAMS).map_err(Error::DBError)?; - if rows.len() == 0 { + if rows.is_empty() { test_debug!("No orphans to remove"); return Ok(false); } @@ -3836,8 +3838,8 @@ impl StacksChainState { /// can process, as well as its parent microblocks that it confirms /// Returns Some(microblocks, staging block) if we found a sequence of blocks to process. /// Returns None if not. - fn find_next_staging_block<'a>( - blocks_tx: &mut StacksDBTx<'a>, + fn find_next_staging_block( + blocks_tx: &mut StacksDBTx<'_>, blocks_path: &str, sort_tx: &mut SortitionHandleTx, ) -> Result, StagingBlock)>, Error> { @@ -3955,7 +3957,7 @@ impl StacksChainState { &candidate.anchored_block_hash, )? { Some(bytes) => { - if bytes.len() == 0 { + if bytes.is_empty() { error!( "CORRUPTION: No block data for {}/{}", &candidate.consensus_hash, &candidate.anchored_block_hash @@ -4643,8 +4645,8 @@ impl StacksChainState { /// Process matured miner rewards for this block. /// Returns the number of liquid uSTX created -- i.e. the coinbase - pub fn process_matured_miner_rewards<'a, 'b>( - clarity_tx: &mut ClarityTx<'a, 'b>, + pub fn process_matured_miner_rewards( + clarity_tx: &mut ClarityTx<'_, '_>, miner_share: &MinerReward, users_share: &[MinerReward], parent_share: &MinerReward, @@ -4664,8 +4666,8 @@ impl StacksChainState { /// Process all STX that unlock at this block height. /// Return the total number of uSTX unlocked in this block - pub fn process_stx_unlocks<'a, 'b>( - clarity_tx: &mut ClarityTx<'a, 'b>, + pub fn process_stx_unlocks( + clarity_tx: &mut ClarityTx<'_, '_>, ) -> Result<(u128, Vec), Error> { let mainnet = clarity_tx.config.mainnet; let lockup_contract_id = boot_code_id("lockup", mainnet); @@ -5483,7 +5485,9 @@ impl StacksChainState { ) }; - let (last_microblock_hash, last_microblock_seq) = if microblocks.len() > 0 { + let (last_microblock_hash, last_microblock_seq) = if microblocks.is_empty() { + (EMPTY_MICROBLOCK_PARENT_HASH.clone(), 0) + } else { let _first_mblock_hash = microblocks[0].block_hash(); let num_mblocks = microblocks.len(); let last_microblock_hash = microblocks[num_mblocks - 1].block_hash(); @@ -5499,8 +5503,6 @@ impl StacksChainState { parent_block_hash ); (last_microblock_hash, last_microblock_seq) - } else { - (EMPTY_MICROBLOCK_PARENT_HASH.clone(), 0) }; if last_microblock_hash != block.header.parent_microblock @@ -5698,7 +5700,7 @@ impl StacksChainState { }; // if any, append lockups events to the coinbase receipt - if lockup_events.len() > 0 { + if !lockup_events.is_empty() { // Receipts are appended in order, so the first receipt should be // the one of the coinbase transaction if let Some(receipt) = tx_receipts.get_mut(0) { @@ -5710,7 +5712,7 @@ impl StacksChainState { } } // if any, append auto unlock events to the coinbase receipt - if auto_unlock_events.len() > 0 { + if !auto_unlock_events.is_empty() { // Receipts are appended in order, so the first receipt should be // the one of the coinbase transaction if let Some(receipt) = tx_receipts.get_mut(0) { @@ -6062,11 +6064,11 @@ impl StacksChainState { /// Return a poison microblock transaction payload if the microblock stream contains a /// deliberate miner fork (this is NOT consensus-critical information, but is instead meant for /// consumption by future miners). - pub fn process_next_staging_block<'a, T: BlockEventDispatcher>( + pub fn process_next_staging_block( &mut self, burnchain_dbconn: &DBConn, sort_tx: &mut SortitionHandleTx, - dispatcher_opt: Option<&'a T>, + dispatcher_opt: Option<&T>, ) -> Result<(Option, Option), Error> { let blocks_path = self.blocks_path.clone(); let (mut chainstate_tx, clarity_instance) = self.chainstate_tx_begin()?; @@ -6464,12 +6466,12 @@ impl StacksChainState { /// found. For each chain tip produced, return the header info, receipts, parent microblock /// stream execution cost, and block execution cost. A value of None will be returned for the /// epoch receipt if the block was invalid. - pub fn process_blocks<'a, T: BlockEventDispatcher>( + pub fn process_blocks( &mut self, burnchain_db_conn: &DBConn, mut sort_tx: SortitionHandleTx, max_blocks: usize, - dispatcher_opt: Option<&'a T>, + dispatcher_opt: Option<&T>, ) -> Result, Option)>, Error> { // first, clear out orphans let blocks_path = self.blocks_path.clone(); @@ -7185,7 +7187,7 @@ pub mod test { all_txs[3 * i + 2].clone(), ]; - let txid_vecs = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); + let txid_vecs: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); let merkle_tree = MerkleTree::::new(&txid_vecs); let tx_merkle_root = merkle_tree.root(); @@ -7200,15 +7202,12 @@ pub mod test { let header = StacksMicroblockHeader { version: 0x12, sequence: initial_seq + (i as u16), - prev_block: prev_block, - tx_merkle_root: tx_merkle_root, + prev_block, + tx_merkle_root, signature: MessageSignature([0u8; 65]), }; - let mut mblock = StacksMicroblock { - header: header, - txs: txs, - }; + let mut mblock = StacksMicroblock { header, txs }; mblock.sign(privk).unwrap(); microblocks.push(mblock); @@ -7243,7 +7242,7 @@ pub mod test { chainstate: &mut StacksChainState, consensus_hash: &ConsensusHash, block: &StacksBlock, - ) -> () { + ) { assert!(StacksChainState::load_staging_block_data( &chainstate.db(), &chainstate.blocks_path, @@ -7286,7 +7285,7 @@ pub mod test { chainstate: &mut StacksChainState, consensus_hash: &ConsensusHash, block: &StacksBlock, - ) -> () { + ) { assert!(!StacksChainState::has_stored_block( &chainstate.db(), &chainstate.blocks_path, @@ -7310,7 +7309,7 @@ pub mod test { chainstate: &mut StacksChainState, consensus_hash: &ConsensusHash, block: &StacksBlock, - ) -> () { + ) { assert!(StacksChainState::has_stored_block( &chainstate.db(), &chainstate.blocks_path, @@ -7371,7 +7370,7 @@ pub mod test { chainstate: &mut StacksChainState, consensus_hash: &ConsensusHash, block: &StacksBlock, - ) -> () { + ) { assert!(StacksChainState::has_stored_block( &chainstate.db(), &chainstate.blocks_path, @@ -8866,7 +8865,7 @@ pub mod test { conflicting_microblock.txs.push(extra_tx); - let txid_vecs = conflicting_microblock + let txid_vecs: Vec<_> = conflicting_microblock .txs .iter() .map(|tx| tx.txid().as_bytes().to_vec()) @@ -9630,17 +9629,14 @@ pub mod test { ) .unwrap() .is_none()); - assert!( - StacksChainState::load_block_bytes( - &chainstate.blocks_path, - &consensus_hashes[i + 1], - &blocks[i + 1].block_hash() - ) - .unwrap() - .unwrap() - .len() - > 0 - ); + assert!(!StacksChainState::load_block_bytes( + &chainstate.blocks_path, + &consensus_hashes[i + 1], + &blocks[i + 1].block_hash() + ) + .unwrap() + .unwrap() + .is_empty()); for mblock in microblocks[i + 1].iter() { let staging_mblock = StacksChainState::load_staging_microblock( @@ -9653,7 +9649,7 @@ pub mod test { .unwrap(); assert!(!staging_mblock.processed); assert!(!staging_mblock.orphaned); - assert!(staging_mblock.block_data.len() > 0); + assert!(!staging_mblock.block_data.is_empty()); } } @@ -9976,7 +9972,7 @@ pub mod test { .unwrap(); mblocks.push(next_mblock); } - if mblock_ptr.len() == 0 { + if mblock_ptr.is_empty() { break; } } @@ -10266,10 +10262,7 @@ pub mod test { SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) .unwrap(); - assert_eq!( - tip.block_height, - first_stacks_block_height + (tenure_id as u64) - ); + assert_eq!(tip.block_height, first_stacks_block_height + tenure_id); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, diff --git a/stackslib/src/chainstate/stacks/db/headers.rs b/stackslib/src/chainstate/stacks/db/headers.rs index 98f41bf9c7..92584e362a 100644 --- a/stackslib/src/chainstate/stacks/db/headers.rs +++ b/stackslib/src/chainstate/stacks/db/headers.rs @@ -35,7 +35,7 @@ use crate::util_lib::db::{ }; impl FromRow for StacksBlockHeader { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let version: u8 = row.get_unwrap("version"); let total_burn_str: String = row.get_unwrap("total_burn"); let total_work_str: String = row.get_unwrap("total_work"); @@ -80,7 +80,7 @@ impl FromRow for StacksBlockHeader { } impl FromRow for StacksMicroblockHeader { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let version: u8 = row.get_unwrap("version"); let sequence: u16 = row.get_unwrap("sequence"); let prev_block = BlockHeaderHash::from_column(row, "prev_block")?; diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 42f72d5165..31159137ac 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -413,7 +413,7 @@ impl StacksHeaderInfo { } impl FromRow for DBConfig { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let version: String = row.get_unwrap("version"); let mainnet_i64: i64 = row.get_unwrap("mainnet"); let chain_id_i64: i64 = row.get_unwrap("chain_id"); @@ -430,7 +430,7 @@ impl FromRow for DBConfig { } impl FromRow for StacksHeaderInfo { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let block_height: u64 = u64::from_column(row, "block_height")?; let index_root = TrieHash::from_column(row, "index_root")?; let consensus_hash = ConsensusHash::from_column(row, "consensus_hash")?; @@ -485,7 +485,7 @@ pub struct ClarityTx<'a, 'b> { pub config: DBConfig, } -impl<'a, 'b> ClarityConnection for ClarityTx<'a, 'b> { +impl ClarityConnection for ClarityTx<'_, '_> { fn with_clarity_db_readonly_owned(&mut self, to_do: F) -> R where F: FnOnce(ClarityDatabase) -> (R, ClarityDatabase), @@ -546,7 +546,7 @@ impl<'a, 'b> ClarityTx<'a, 'b> { } #[cfg(test)] - pub fn commit_block(self) -> () { + pub fn commit_block(self) { self.block.commit_block(); } @@ -557,11 +557,7 @@ impl<'a, 'b> ClarityTx<'a, 'b> { Ok(self.block.commit_mined_block(block_hash)?.get_total()) } - pub fn commit_to_block( - self, - consensus_hash: &ConsensusHash, - block_hash: &BlockHeaderHash, - ) -> () { + pub fn commit_to_block(self, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash) { let index_block_hash = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); self.block.commit_to_block(&index_block_hash); } @@ -575,19 +571,19 @@ impl<'a, 'b> ClarityTx<'a, 'b> { self.block.precommit_to_block(index_block_hash) } - pub fn commit_unconfirmed(self) -> () { + pub fn commit_unconfirmed(self) { self.block.commit_unconfirmed(); } - pub fn rollback_block(self) -> () { + pub fn rollback_block(self) { self.block.rollback_block() } - pub fn rollback_unconfirmed(self) -> () { + pub fn rollback_unconfirmed(self) { self.block.rollback_unconfirmed() } - pub fn reset_cost(&mut self, cost: ExecutionCost) -> () { + pub fn reset_cost(&mut self, cost: ExecutionCost) { self.block.reset_block_cost(cost); } @@ -681,9 +677,9 @@ impl<'a> DerefMut for ChainstateTx<'a> { } } -pub const CHAINSTATE_VERSION: &'static str = "8"; +pub const CHAINSTATE_VERSION: &str = "8"; -const CHAINSTATE_INITIAL_SCHEMA: &'static [&'static str] = &[ +const CHAINSTATE_INITIAL_SCHEMA: &[&str] = &[ "PRAGMA foreign_keys = ON;", r#" -- Anchored stacks block headers @@ -815,7 +811,7 @@ const CHAINSTATE_INITIAL_SCHEMA: &'static [&'static str] = &[ );"#, ]; -const CHAINSTATE_SCHEMA_2: &'static [&'static str] = &[ +const CHAINSTATE_SCHEMA_2: &[&str] = &[ // new in epoch 2.05 (schema version 2) // table of blocks that applied an epoch transition r#" @@ -827,7 +823,7 @@ const CHAINSTATE_SCHEMA_2: &'static [&'static str] = &[ "#, ]; -const CHAINSTATE_SCHEMA_3: &'static [&'static str] = &[ +const CHAINSTATE_SCHEMA_3: &[&str] = &[ // new in epoch 2.1 (schema version 3) // track mature miner rewards paid out, so we can report them in Clarity. r#" @@ -880,7 +876,7 @@ const CHAINSTATE_SCHEMA_3: &'static [&'static str] = &[ "#, ]; -const CHAINSTATE_INDEXES: &'static [&'static str] = &[ +const CHAINSTATE_INDEXES: &[&str] = &[ "CREATE INDEX IF NOT EXISTS index_block_hash_to_primary_key ON block_headers(index_block_hash,consensus_hash,block_hash);", "CREATE INDEX IF NOT EXISTS block_headers_hash_index ON block_headers(block_hash,block_height);", "CREATE INDEX IF NOT EXISTS block_index_hash_index ON block_headers(index_block_hash,consensus_hash,block_hash);", @@ -959,7 +955,7 @@ pub struct ChainStateBootData { pub first_burnchain_block_timestamp: u32, pub initial_balances: Vec<(PrincipalData, u64)>, pub pox_constants: PoxConstants, - pub post_flight_callback: Option ()>>, + pub post_flight_callback: Option>, pub get_bulk_initial_lockups: Option Box>>>, pub get_bulk_initial_balances: @@ -974,7 +970,7 @@ impl ChainStateBootData { pub fn new( burnchain: &Burnchain, initial_balances: Vec<(PrincipalData, u64)>, - post_flight_callback: Option ()>>, + post_flight_callback: Option>, ) -> ChainStateBootData { ChainStateBootData { first_burnchain_block_hash: burnchain.first_block_hash.clone(), @@ -1073,11 +1069,7 @@ impl StacksChainState { Ok(db_config.version != CHAINSTATE_VERSION) } - fn apply_schema_migrations<'a>( - tx: &DBTx<'a>, - mainnet: bool, - chain_id: u32, - ) -> Result<(), Error> { + fn apply_schema_migrations(tx: &DBTx<'_>, mainnet: bool, chain_id: u32) -> Result<(), Error> { if !Self::need_schema_migrations(tx, mainnet, chain_id)? { return Ok(()); } @@ -1149,7 +1141,7 @@ impl StacksChainState { Ok(()) } - fn add_indexes<'a>(tx: &DBTx<'a>) -> Result<(), Error> { + fn add_indexes(tx: &DBTx<'_>) -> Result<(), Error> { for cmd in CHAINSTATE_INDEXES { tx.execute_batch(cmd)?; } @@ -1335,7 +1327,7 @@ impl StacksChainState { } let mut allocation_events: Vec = vec![]; - if boot_data.initial_balances.len() > 0 { + if !boot_data.initial_balances.is_empty() { warn!( "Seeding {} balances coming from the config", boot_data.initial_balances.len() @@ -1554,7 +1546,7 @@ impl StacksChainState { StacksChainState::parse_genesis_address(&entry.owner, mainnet); let zonefile_hash = { - if entry.zonefile_hash.len() == 0 { + if entry.zonefile_hash.is_empty() { Value::buff_from(vec![]).unwrap() } else { let buffer = Hash160::from_hex(&entry.zonefile_hash) @@ -1869,18 +1861,18 @@ impl StacksChainState { let clarity_state = ClarityInstance::new(mainnet, chain_id, vm_state); let mut chainstate = StacksChainState { - mainnet: mainnet, - chain_id: chain_id, - clarity_state: clarity_state, + mainnet, + chain_id, + clarity_state, nakamoto_staging_blocks_conn, - state_index: state_index, + state_index, blocks_path: blocks_path_root, clarity_state_index_path: clarity_state_index_marf, - clarity_state_index_root: clarity_state_index_root, + clarity_state_index_root, root_path: path_str.to_string(), unconfirmed_state: None, fault_injection: StacksChainStateFaults::new(), - marf_opts: marf_opts, + marf_opts, }; let mut receipts = vec![]; @@ -1911,25 +1903,25 @@ impl StacksChainState { /// Begin a transaction against the (indexed) stacks chainstate DB. /// Does not create a Clarity instance. - pub fn index_tx_begin<'a>(&'a mut self) -> StacksDBTx<'a> { + pub fn index_tx_begin(&mut self) -> StacksDBTx<'_> { StacksDBTx::new(&mut self.state_index, ()) } - pub fn index_conn<'a>(&'a self) -> StacksDBConn<'a> { + pub fn index_conn(&self) -> StacksDBConn<'_> { StacksDBConn::new(&self.state_index, ()) } /// Begin a transaction against the underlying DB /// Does not create a Clarity instance, and does not affect the MARF. - pub fn db_tx_begin<'a>(&'a mut self) -> Result, Error> { + pub fn db_tx_begin(&mut self) -> Result, Error> { self.state_index.storage_tx().map_err(Error::DBError) } /// Simultaneously begin a transaction against both the headers and blocks. /// Used when considering a new block to append the chain state. - pub fn chainstate_tx_begin<'a>( - &'a mut self, - ) -> Result<(ChainstateTx<'a>, &'a mut ClarityInstance), Error> { + pub fn chainstate_tx_begin( + &mut self, + ) -> Result<(ChainstateTx<'_>, &mut ClarityInstance), Error> { let config = self.config(); let blocks_path = self.blocks_path.clone(); let clarity_instance = &mut self.clarity_state; @@ -2596,8 +2588,8 @@ impl StacksChainState { /// Append a Stacks block to an existing Stacks block, and grant the miner the block reward. /// Return the new Stacks header info. - pub fn advance_tip<'a>( - headers_tx: &mut StacksDBTx<'a>, + pub fn advance_tip( + headers_tx: &mut StacksDBTx<'_>, parent_tip: &StacksBlockHeader, parent_consensus_hash: &ConsensusHash, new_tip: &StacksBlockHeader, @@ -2903,7 +2895,7 @@ pub mod test { // Just update the expected value assert_eq!( genesis_root_hash.to_string(), - "c771616ff6acb710051238c9f4a3c48020a6d70cda637d34b89f2311a7e27886" + "0eb3076f0635ccdfcdc048afb8dea9048c5180a2e2b2952874af1d18f06321e8" ); } diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 3df99ea886..e56624b84f 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -250,7 +250,7 @@ impl StacksTransactionReceipt { transaction: tx.into(), events: vec![], post_condition_aborted: false, - result: result, + result, stx_burned: 0, contract_analysis: None, execution_cost: cost, @@ -449,10 +449,10 @@ impl StacksChainState { txid: tx.txid(), principal: payer_account.principal.clone(), is_origin: false, - quiet: quiet, + quiet, }; if !quiet { - warn!("{}", &e); + warn!("{e}"); } return Err((e, (origin_account, payer_account))); } @@ -470,10 +470,10 @@ impl StacksChainState { txid: tx.txid(), principal: origin_account.principal.clone(), is_origin: true, - quiet: quiet, + quiet, }; if !quiet { - warn!("{}", &e); + warn!("{e}"); } return Err((e, (origin_account, payer_account))); } @@ -979,7 +979,7 @@ impl StacksChainState { TransactionPayload::TokenTransfer(ref addr, ref amount, ref memo) => { // post-conditions are not allowed for this variant, since they're non-sensical. // Their presence in this variant makes the transaction invalid. - if tx.post_conditions.len() > 0 { + if !tx.post_conditions.is_empty() { let msg = format!("Invalid Stacks transaction: TokenTransfer transactions do not support post-conditions"); info!("{}", &msg; "txid" => %tx.txid()); @@ -1391,7 +1391,7 @@ impl StacksChainState { TransactionPayload::PoisonMicroblock(ref mblock_header_1, ref mblock_header_2) => { // post-conditions are not allowed for this variant, since they're non-sensical. // Their presence in this variant makes the transaction invalid. - if tx.post_conditions.len() > 0 { + if !tx.post_conditions.is_empty() { let msg = format!("Invalid Stacks transaction: PoisonMicroblock transactions do not support post-conditions"); info!("{}", &msg); @@ -1423,7 +1423,7 @@ impl StacksChainState { TransactionPayload::TenureChange(ref payload) => { // post-conditions are not allowed for this variant, since they're non-sensical. // Their presence in this variant makes the transaction invalid. - if tx.post_conditions.len() > 0 { + if !tx.post_conditions.is_empty() { let msg = format!("Invalid Stacks transaction: TenureChange transactions do not support post-conditions"); info!("{msg}"); @@ -8238,7 +8238,7 @@ pub mod test { // make block let txs = vec![signed_contract_tx]; - let txid_vecs = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); + let txid_vecs: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); let merkle_tree = MerkleTree::::new(&txid_vecs); let tx_merkle_root = merkle_tree.root(); @@ -8247,10 +8247,10 @@ pub mod test { version: 0x12, sequence: seq, prev_block: parent_block, - tx_merkle_root: tx_merkle_root, + tx_merkle_root, signature: MessageSignature([0u8; 65]), }, - txs: txs, + txs, }; mblock.sign(block_privk).unwrap(); mblock diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index 6f7a9fe9ea..b39de26c18 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -189,7 +189,7 @@ impl UnconfirmedState { unconfirmed_chain_tip: unconfirmed_tip, clarity_inst: clarity_instance, mined_txs: UnconfirmedTxMap::new(), - cost_so_far: cost_so_far, + cost_so_far, bytes_so_far: 0, last_mblock: None, @@ -259,7 +259,7 @@ impl UnconfirmedState { let mut num_new_mblocks = 0; let mut have_state = self.have_state; - if mblocks.len() > 0 { + if !mblocks.is_empty() { let cur_cost = self.cost_so_far.clone(); // NOTE: we *must* commit the clarity_tx now that it's begun. diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index a4082627fd..b917dffe41 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -239,7 +239,7 @@ pub trait MarfConnection { } } -impl<'a, T: MarfTrieId> MarfConnection for MarfTransaction<'a, T> { +impl MarfConnection for MarfTransaction<'_, T> { fn with_conn(&mut self, exec: F) -> R where F: FnOnce(&mut TrieStorageConnection) -> R, @@ -529,7 +529,7 @@ impl<'a, T: MarfTrieId> MarfTransaction<'a, T> { Some(WriteChainTip { ref block_hash, .. }) => Ok(block_hash.clone()), }?; - if keys.len() == 0 { + if keys.is_empty() { return Ok(()); } @@ -683,7 +683,7 @@ impl MARF { } } - fn node_copy_update_ptrs(ptrs: &mut [TriePtr], child_block_id: u32) -> () { + fn node_copy_update_ptrs(ptrs: &mut [TriePtr], child_block_id: u32) { for pointer in ptrs.iter_mut() { // if the node is empty, do nothing, if it's a back pointer, if pointer.id() == TrieNodeID::Empty as u8 || is_backptr(pointer.id()) { @@ -1145,7 +1145,7 @@ impl MARF { /// Instantiate the MARF from a TrieFileStorage instance pub fn from_storage(storage: TrieFileStorage) -> MARF { MARF { - storage: storage, + storage, open_chain_tip: None, } } @@ -1348,7 +1348,7 @@ impl MARF { ) -> Result<(), Error> { assert_eq!(keys.len(), values.len()); - if keys.len() == 0 { + if keys.is_empty() { return Ok(()); } @@ -1394,7 +1394,7 @@ impl MARF { // instance methods impl MARF { - pub fn begin_tx<'a>(&'a mut self) -> Result, Error> { + pub fn begin_tx(&mut self) -> Result, Error> { let storage = self.storage.transaction()?; Ok(MarfTransaction { storage, @@ -1456,7 +1456,7 @@ impl MARF { Some(WriteChainTip { ref block_hash, .. }) => Ok(block_hash.clone()), }?; - if keys.len() == 0 { + if keys.is_empty() { return Ok(()); } @@ -1620,7 +1620,7 @@ impl MARF { } /// Make a raw transaction to the underlying storage - pub fn storage_tx<'a>(&'a mut self) -> Result, db_error> { + pub fn storage_tx(&mut self) -> Result, db_error> { self.storage.sqlite_tx() } diff --git a/stackslib/src/chainstate/stacks/index/node.rs b/stackslib/src/chainstate/stacks/index/node.rs index da9fc8bbd2..b689035675 100644 --- a/stackslib/src/chainstate/stacks/index/node.rs +++ b/stackslib/src/chainstate/stacks/index/node.rs @@ -231,9 +231,9 @@ impl TriePtr { #[inline] pub fn new(id: u8, chr: u8, ptr: u32) -> TriePtr { TriePtr { - id: id, - chr: chr, - ptr: ptr, + id, + chr, + ptr, back_block: 0, } } @@ -308,10 +308,10 @@ impl TriePtr { let back_block = u32::from_be_bytes([bytes[6], bytes[7], bytes[8], bytes[9]]); TriePtr { - id: id, - chr: chr, - ptr: ptr, - back_block: back_block, + id, + chr, + ptr, + back_block, } } } @@ -372,7 +372,7 @@ impl TrieCursor { /// last ptr visited pub fn ptr(&self) -> TriePtr { // should always be true by construction - assert!(self.node_ptrs.len() > 0); + assert!(!self.node_ptrs.is_empty()); self.node_ptrs[self.node_ptrs.len() - 1].clone() } @@ -495,7 +495,7 @@ impl TrieCursor { /// Replace the last-visited node and ptr within this trie. Used when doing a copy-on-write or /// promoting a node, so the cursor state accurately reflects the nodes and tries visited. #[inline] - pub fn repair_retarget(&mut self, node: &TrieNodeType, ptr: &TriePtr, hash: &T) -> () { + pub fn repair_retarget(&mut self, node: &TrieNodeType, ptr: &TriePtr, hash: &T) { // this can only be called if we failed to walk to a node (this method _should not_ be // called if we walked to a backptr). if Some(CursorError::ChrNotFound) != self.last_error @@ -526,7 +526,7 @@ impl TrieCursor { next_node: &TrieNodeType, ptr: &TriePtr, block_hash: T, - ) -> () { + ) { // this can only be called if we walked to a backptr. // If it's anything else, we're in trouble. if Some(CursorError::ChrNotFound) == self.last_error @@ -553,7 +553,7 @@ impl TrieCursor { /// Record that we landed on a non-backptr from a backptr. /// ptr is a non-backptr that refers to the node we landed on. #[inline] - pub fn repair_backptr_finish(&mut self, ptr: &TriePtr, block_hash: T) -> () { + pub fn repair_backptr_finish(&mut self, ptr: &TriePtr, block_hash: T) { // this can only be called if we walked to a backptr. // If it's anything else, we're in trouble. if Some(CursorError::ChrNotFound) == self.last_error @@ -781,7 +781,7 @@ impl TrieNode256 { } TrieNode256 { path: node4.path.clone(), - ptrs: ptrs, + ptrs, } } @@ -794,7 +794,7 @@ impl TrieNode256 { } TrieNode256 { path: node48.path.clone(), - ptrs: ptrs, + ptrs, } } } @@ -1191,7 +1191,7 @@ impl TrieNode for TrieLeaf { } Ok(TrieLeaf { - path: path, + path, data: MARFValue(leaf_data), }) } @@ -1334,7 +1334,7 @@ impl TrieNodeType { with_node!(self, ref data, &data.path) } - pub fn set_path(&mut self, new_path: Vec) -> () { + pub fn set_path(&mut self, new_path: Vec) { with_node!(self, ref mut data, data.path = new_path) } } diff --git a/stackslib/src/chainstate/stacks/index/proofs.rs b/stackslib/src/chainstate/stacks/index/proofs.rs index 85e91ebefb..4d399c9f70 100644 --- a/stackslib/src/chainstate/stacks/index/proofs.rs +++ b/stackslib/src/chainstate/stacks/index/proofs.rs @@ -610,7 +610,7 @@ impl TrieMerkleProof { // need the target node's root trie ptr, unless this is the first proof (in which case // it's a junction proof) - if proof.len() > 0 { + if !proof.is_empty() { let root_ptr = storage.root_trieptr(); let (root_node, _) = storage.read_nodetype(&root_ptr)?; @@ -706,7 +706,7 @@ impl TrieMerkleProof { return None; } - if hashes.len() == 0 { + if hashes.is_empty() { // special case -- if this shunt proof has no hashes (i.e. this is a leaf from the first // block), then we can safely skip this step trace!( @@ -839,7 +839,7 @@ impl TrieMerkleProof { ) -> Result>, Error> { trace!("make_segment_proof: ptrs = {:?}", &ptrs); - assert!(ptrs.len() > 0); + assert!(!ptrs.is_empty()); assert_eq!(ptrs[0], storage.root_trieptr()); for i in 1..ptrs.len() { assert!(!is_backptr(ptrs[i].id())); @@ -1004,7 +1004,7 @@ impl TrieMerkleProof { /// * segment proof 0 must end in a leaf /// * all segment proofs must end in a Node256 (a root) fn is_proof_well_formed(proof: &Vec>, expected_path: &TrieHash) -> bool { - if proof.len() == 0 { + if proof.is_empty() { trace!("Proof is empty"); return false; } diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 6e7ca815c9..d8d1b9133a 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -103,7 +103,7 @@ impl BlockMap for TrieFileStorage { } } -impl<'a, T: MarfTrieId> BlockMap for TrieStorageConnection<'a, T> { +impl BlockMap for TrieStorageConnection<'_, T> { type TrieId = T; fn get_block_hash(&self, id: u32) -> Result { @@ -142,7 +142,7 @@ impl<'a, T: MarfTrieId> BlockMap for TrieStorageConnection<'a, T> { } } -impl<'a, T: MarfTrieId> BlockMap for TrieStorageTransaction<'a, T> { +impl BlockMap for TrieStorageTransaction<'_, T> { type TrieId = T; fn get_block_hash(&self, id: u32) -> Result { @@ -422,8 +422,8 @@ impl TrieRAM { /// Inner method to instantiate a TrieRAM from existing Trie data. fn from_data(block_header: T, data: Vec<(TrieNodeType, TrieHash)>, parent: T) -> TrieRAM { TrieRAM { - data: data, - block_header: block_header, + data, + block_header, readonly: false, read_count: 0, @@ -439,7 +439,7 @@ impl TrieRAM { is_moved: false, - parent: parent, + parent, } } @@ -925,7 +925,7 @@ impl TrieRAM { data.push((root_node, root_hash)); - while frontier.len() > 0 { + while !frontier.is_empty() { let next_ptr = frontier .pop_front() .expect("BUG: no ptr in non-empty frontier"); @@ -1162,7 +1162,7 @@ enum SqliteConnection<'a> { Tx(Transaction<'a>), } -impl<'a> Deref for SqliteConnection<'a> { +impl Deref for SqliteConnection<'_> { type Target = Connection; fn deref(&self) -> &Connection { match self { @@ -1322,7 +1322,7 @@ impl TrieStorageTransientData { } impl TrieFileStorage { - pub fn connection<'a>(&'a mut self) -> TrieStorageConnection<'a, T> { + pub fn connection(&mut self) -> TrieStorageConnection<'_, T> { TrieStorageConnection { db: SqliteConnection::ConnRef(&self.db), db_path: &self.db_path, @@ -1338,7 +1338,7 @@ impl TrieFileStorage { } } - pub fn transaction<'a>(&'a mut self) -> Result, Error> { + pub fn transaction(&mut self) -> Result, Error> { if self.readonly() { return Err(Error::ReadOnlyError); } @@ -1363,7 +1363,7 @@ impl TrieFileStorage { &self.db } - pub fn sqlite_tx<'a>(&'a mut self) -> Result, db_error> { + pub fn sqlite_tx(&mut self) -> Result, db_error> { tx_begin_immediate(&mut self.db) } @@ -1469,8 +1469,8 @@ impl TrieFileStorage { trie_ancestor_hash_bytes_cache: None, - readonly: readonly, - unconfirmed: unconfirmed, + readonly, + unconfirmed, }, // used in testing in order to short-circuit block-height lookups @@ -1536,9 +1536,9 @@ impl TrieFileStorage { // TODO: borrow self.uncommitted_writes; don't copy them let ret = TrieFileStorage { db_path: self.db_path.clone(), - db: db, + db, blobs, - cache: cache, + cache, bench: TrieBenchmark::new(), hash_calculation_mode: self.hash_calculation_mode, @@ -1605,9 +1605,9 @@ impl<'a, T: MarfTrieId> TrieStorageTransaction<'a, T> { // TODO: borrow self.uncommitted_writes; don't copy them let ret = TrieFileStorage { db_path: self.db_path.to_string(), - db: db, - blobs: blobs, - cache: cache, + db, + blobs, + cache, bench: TrieBenchmark::new(), hash_calculation_mode: self.hash_calculation_mode, @@ -1956,7 +1956,7 @@ impl<'a, T: MarfTrieId> TrieStorageTransaction<'a, T> { } } -impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { +impl TrieStorageConnection<'_, T> { pub fn readonly(&self) -> bool { self.data.readonly } diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index e7535e9553..7f92bb678d 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -107,16 +107,16 @@ fn marf_insert_different_leaf_different_path_different_block_100() { for i in 0..100 { debug!("insert {}", i); - let block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); + let block_header = BlockHeaderHash::from_bytes(&[i + 1; 32]).unwrap(); let path_bytes = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, - 23, 24, 25, 26, 27, 28, 29, 30, i as u8, + 23, 24, 25, 26, 27, 28, 29, 30, i, ]; marf.commit().unwrap(); marf.begin(&BlockHeaderHash::sentinel(), &block_header) .unwrap(); let path = TrieHash::from_bytes(&path_bytes).unwrap(); - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); marf.insert_raw(path, value).unwrap(); } @@ -133,26 +133,26 @@ fn marf_insert_different_leaf_different_path_different_block_100() { debug!("---------"); for i in 0..100 { - let block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); + let block_header = BlockHeaderHash::from_bytes(&[i + 1; 32]).unwrap(); let path_bytes = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, - 23, 24, 25, 26, 27, 28, 29, 30, i as u8, + 23, 24, 25, 26, 27, 28, 29, 30, i, ]; let path = TrieHash::from_bytes(&path_bytes).unwrap(); - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); let leaf = MARF::get_path(&mut marf.borrow_storage_backend(), &block_header, &path) .unwrap() .unwrap(); - assert_eq!(leaf.data.to_vec(), [i as u8; 40].to_vec()); + assert_eq!(leaf.data.to_vec(), [i; 40].to_vec()); assert_eq!(marf.borrow_storage_backend().get_cur_block(), block_header); merkle_test_marf( &mut marf.borrow_storage_backend(), &block_header, &path_bytes.to_vec(), - &[i as u8; 40].to_vec(), + &[i; 40].to_vec(), None, ); } @@ -190,13 +190,13 @@ fn marf_insert_same_leaf_different_block_100() { let path = TrieHash::from_bytes(&path_bytes).unwrap(); for i in 0..100 { - let next_block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let next_block_header = BlockHeaderHash::from_bytes(&[i + 1; 32]).unwrap(); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); marf.commit().unwrap(); marf.begin(&BlockHeaderHash::sentinel(), &next_block_header) .unwrap(); let path = TrieHash::from_bytes(&path_bytes).unwrap(); - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); marf.insert_raw(path, value).unwrap(); } @@ -213,8 +213,8 @@ fn marf_insert_same_leaf_different_block_100() { debug!("---------"); for i in 0..100 { - let next_block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let next_block_header = BlockHeaderHash::from_bytes(&[i + 1; 32]).unwrap(); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); let leaf = MARF::get_path( &mut marf.borrow_storage_backend(), &next_block_header, @@ -223,7 +223,7 @@ fn marf_insert_same_leaf_different_block_100() { .unwrap() .unwrap(); - assert_eq!(leaf.data.to_vec(), [i as u8; 40].to_vec()); + assert_eq!(leaf.data.to_vec(), [i; 40].to_vec()); assert_eq!( marf.borrow_storage_backend().get_cur_block(), next_block_header @@ -233,7 +233,7 @@ fn marf_insert_same_leaf_different_block_100() { &mut marf.borrow_storage_backend(), &next_block_header, &path_bytes.to_vec(), - &[i as u8; 40].to_vec(), + &[i; 40].to_vec(), None, ); } @@ -266,16 +266,16 @@ fn marf_insert_leaf_sequence_2() { for i in 0..2 { let path_bytes = [ - i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + i, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; let path = TrieHash::from_bytes(&path_bytes).unwrap(); - let prior_block_header = BlockHeaderHash::from_bytes(&[i as u8; 32]).unwrap(); - let next_block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); + let prior_block_header = BlockHeaderHash::from_bytes(&[i; 32]).unwrap(); + let next_block_header = BlockHeaderHash::from_bytes(&[i + 1; 32]).unwrap(); marf.commit().unwrap(); marf.begin(&prior_block_header, &next_block_header).unwrap(); - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); marf.insert_raw(path, value).unwrap(); } @@ -287,14 +287,14 @@ fn marf_insert_leaf_sequence_2() { debug!("---------"); for i in 0..2 { - let next_block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); + let next_block_header = BlockHeaderHash::from_bytes(&[i + 1; 32]).unwrap(); let path_bytes = [ - i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + i, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; let path = TrieHash::from_bytes(&path_bytes).unwrap(); - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); let leaf = MARF::get_path( &mut marf.borrow_storage_backend(), &last_block_header, @@ -303,7 +303,7 @@ fn marf_insert_leaf_sequence_2() { .unwrap() .unwrap(); - assert_eq!(leaf.data.to_vec(), [i as u8; 40].to_vec()); + assert_eq!(leaf.data.to_vec(), [i; 40].to_vec()); assert_eq!( marf.borrow_storage_backend().get_cur_block(), next_block_header @@ -313,7 +313,7 @@ fn marf_insert_leaf_sequence_2() { &mut marf.borrow_storage_backend(), &last_block_header, &path_bytes.to_vec(), - &[i as u8; 40].to_vec(), + &[i; 40].to_vec(), None, ); } @@ -343,17 +343,17 @@ fn marf_insert_leaf_sequence_100() { for i in 1..101 { let path_bytes = [ - i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + i, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; let path = TrieHash::from_bytes(&path_bytes).unwrap(); marf.commit().unwrap(); - let next_block_header = BlockHeaderHash::from_bytes(&[i as u8; 32]).unwrap(); + let next_block_header = BlockHeaderHash::from_bytes(&[i; 32]).unwrap(); marf.begin(&last_block_header, &next_block_header).unwrap(); last_block_header = next_block_header; - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); marf.insert_raw(path, value).unwrap(); } marf.commit().unwrap(); @@ -365,26 +365,26 @@ fn marf_insert_leaf_sequence_100() { let mut f = marf.borrow_storage_backend(); for i in 1..101 { - let next_block_header = BlockHeaderHash::from_bytes(&[i as u8; 32]).unwrap(); + let next_block_header = BlockHeaderHash::from_bytes(&[i; 32]).unwrap(); let path_bytes = [ - i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + i, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; let path = TrieHash::from_bytes(&path_bytes).unwrap(); - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); eprintln!("Finding value inserted at {}", &next_block_header); let leaf = MARF::get_path(&mut f, &last_block_header, &path) .unwrap() .unwrap(); - assert_eq!(leaf.data.to_vec(), [i as u8; 40].to_vec()); + assert_eq!(leaf.data.to_vec(), [i; 40].to_vec()); merkle_test_marf( &mut f, &last_block_header, &path_bytes.to_vec(), - &[i as u8; 40].to_vec(), + &[i; 40].to_vec(), None, ); } @@ -846,7 +846,7 @@ fn marf_merkle_verify_backptrs() { marf.begin(&block_header_1, &block_header_2).unwrap(); marf.insert_raw( TrieHash::from_bytes(&path_2[..]).unwrap(), - TrieLeaf::new(&vec![], &[20 as u8; 40].to_vec()), + TrieLeaf::new(&vec![], &[20; 40].to_vec()), ) .unwrap(); @@ -864,14 +864,14 @@ fn marf_merkle_verify_backptrs() { marf.begin(&block_header_2, &block_header_3).unwrap(); marf.insert_raw( TrieHash::from_bytes(&path_3[..]).unwrap(), - TrieLeaf::new(&vec![], &[21 as u8; 40].to_vec()), + TrieLeaf::new(&vec![], &[21; 40].to_vec()), ) .unwrap(); debug!("----------------"); debug!( "Merkle verify {:?} from {:?}", - &to_hex(&[21 as u8; 40]), + &to_hex(&[21; 40]), block_header_3 ); debug!("----------------"); @@ -882,7 +882,7 @@ fn marf_merkle_verify_backptrs() { &mut marf.borrow_storage_backend(), &block_header_3, &path_3, - &[21 as u8; 40].to_vec(), + &[21; 40].to_vec(), None, ); if let Some(root_hashes) = last_root_hashes.take() { @@ -1351,7 +1351,7 @@ fn marf_insert_random_10485760_4096_file_storage() { start_time = get_epoch_time_ms(); let values = values - .drain(..) + .into_iter() .map(|x| MARFValue::from_value(&x)) .collect(); @@ -1772,7 +1772,7 @@ fn marf_insert_get_128_fork_256() { } let values = values - .drain(..) + .into_iter() .map(|x| MARFValue::from_value(&x)) .collect(); diff --git a/stackslib/src/chainstate/stacks/index/test/mod.rs b/stackslib/src/chainstate/stacks/index/test/mod.rs index 0ccdffa78b..f563d507a7 100644 --- a/stackslib/src/chainstate/stacks/index/test/mod.rs +++ b/stackslib/src/chainstate/stacks/index/test/mod.rs @@ -62,7 +62,7 @@ where let (root, root_hash) = Trie::read_root(s).unwrap(); frontier.push((root, root_hash, 0)); - while frontier.len() > 0 { + while !frontier.is_empty() { let (next, next_hash, depth) = frontier.pop().unwrap(); let (ptrs, path_len) = match next { TrieNodeType::Leaf(ref leaf_data) => { @@ -104,7 +104,7 @@ pub fn merkle_test( s: &mut TrieStorageConnection, path: &Vec, value: &Vec, -) -> () { +) { let (_, root_hash) = Trie::read_root(s).unwrap(); let triepath = TrieHash::from_bytes(&path[..]).unwrap(); @@ -248,16 +248,16 @@ pub fn make_node_path( // update parent match parent { TrieNodeType::Node256(ref mut data) => { - assert!(data.insert(&TriePtr::new(node_id, chr, node_ptr as u32))) + assert!(data.insert(&TriePtr::new(node_id, chr, node_ptr))) } TrieNodeType::Node48(ref mut data) => { - assert!(data.insert(&TriePtr::new(node_id, chr, node_ptr as u32))) + assert!(data.insert(&TriePtr::new(node_id, chr, node_ptr))) } TrieNodeType::Node16(ref mut data) => { - assert!(data.insert(&TriePtr::new(node_id, chr, node_ptr as u32))) + assert!(data.insert(&TriePtr::new(node_id, chr, node_ptr))) } TrieNodeType::Node4(ref mut data) => { - assert!(data.insert(&TriePtr::new(node_id, chr, node_ptr as u32))) + assert!(data.insert(&TriePtr::new(node_id, chr, node_ptr))) } TrieNodeType::Leaf(_) => panic!("can't insert into leaf"), }; @@ -270,7 +270,7 @@ pub fn make_node_path( .unwrap(); nodes.push(parent.clone()); - node_ptrs.push(TriePtr::new(node_id, chr, node_ptr as u32)); + node_ptrs.push(TriePtr::new(node_id, chr, node_ptr)); hashes.push(TrieHash::from_data(&[(seg_id + 1) as u8; 32])); parent = node; @@ -292,26 +292,18 @@ pub fn make_node_path( // update parent match parent { - TrieNodeType::Node256(ref mut data) => assert!(data.insert(&TriePtr::new( - TrieNodeID::Leaf as u8, - child_chr, - child_ptr as u32 - ))), - TrieNodeType::Node48(ref mut data) => assert!(data.insert(&TriePtr::new( - TrieNodeID::Leaf as u8, - child_chr, - child_ptr as u32 - ))), - TrieNodeType::Node16(ref mut data) => assert!(data.insert(&TriePtr::new( - TrieNodeID::Leaf as u8, - child_chr, - child_ptr as u32 - ))), - TrieNodeType::Node4(ref mut data) => assert!(data.insert(&TriePtr::new( - TrieNodeID::Leaf as u8, - child_chr, - child_ptr as u32 - ))), + TrieNodeType::Node256(ref mut data) => { + assert!(data.insert(&TriePtr::new(TrieNodeID::Leaf as u8, child_chr, child_ptr))) + } + TrieNodeType::Node48(ref mut data) => { + assert!(data.insert(&TriePtr::new(TrieNodeID::Leaf as u8, child_chr, child_ptr))) + } + TrieNodeType::Node16(ref mut data) => { + assert!(data.insert(&TriePtr::new(TrieNodeID::Leaf as u8, child_chr, child_ptr))) + } + TrieNodeType::Node4(ref mut data) => { + assert!(data.insert(&TriePtr::new(TrieNodeID::Leaf as u8, child_chr, child_ptr))) + } TrieNodeType::Leaf(_) => panic!("can't insert into leaf"), }; @@ -323,11 +315,7 @@ pub fn make_node_path( .unwrap(); nodes.push(parent.clone()); - node_ptrs.push(TriePtr::new( - TrieNodeID::Leaf as u8, - child_chr, - child_ptr as u32, - )); + node_ptrs.push(TriePtr::new(TrieNodeID::Leaf as u8, child_chr, child_ptr)); hashes.push(TrieHash::from_data(&[(seg_id + 1) as u8; 32])); (nodes, node_ptrs, hashes) diff --git a/stackslib/src/chainstate/stacks/index/test/node.rs b/stackslib/src/chainstate/stacks/index/test/node.rs index 227adda439..45e07014a3 100644 --- a/stackslib/src/chainstate/stacks/index/test/node.rs +++ b/stackslib/src/chainstate/stacks/index/test/node.rs @@ -3938,7 +3938,7 @@ fn read_write_node256() { assert!(wres.is_ok()); let root_ptr = trie_io.root_ptr(); - let rres = trie_io.read_nodetype(&TriePtr::new(TrieNodeID::Node256 as u8, 0, root_ptr as u32)); + let rres = trie_io.read_nodetype(&TriePtr::new(TrieNodeID::Node256 as u8, 0, root_ptr)); assert!(rres.is_ok()); assert_eq!(rres.unwrap(), (node256.as_trie_node_type(), hash)); diff --git a/stackslib/src/chainstate/stacks/index/test/storage.rs b/stackslib/src/chainstate/stacks/index/test/storage.rs index fdd3e30191..ebd97fd5c7 100644 --- a/stackslib/src/chainstate/stacks/index/test/storage.rs +++ b/stackslib/src/chainstate/stacks/index/test/storage.rs @@ -81,8 +81,8 @@ fn trie_cmp( let mut frontier_1 = VecDeque::new(); let mut frontier_2 = VecDeque::new(); - assert!(t1.data().len() > 0); - assert!(t2.data().len() > 0); + assert!(!t1.data().is_empty()); + assert!(!t2.data().is_empty()); let (n1_data, n1_hash) = t1.data()[0].clone(); let (n2_data, n2_hash) = t2.data()[0].clone(); @@ -99,7 +99,7 @@ fn trie_cmp( frontier_1.push_back((n1_data, n1_hash)); frontier_2.push_back((n2_data, n2_hash)); - while frontier_1.len() > 0 && frontier_2.len() > 0 { + while !frontier_1.is_empty() && !frontier_2.is_empty() { if frontier_1.len() != frontier_2.len() { debug!("frontier len mismatch"); return false; diff --git a/stackslib/src/chainstate/stacks/index/test/trie.rs b/stackslib/src/chainstate/stacks/index/test/trie.rs index 9bac45508c..8625527a16 100644 --- a/stackslib/src/chainstate/stacks/index/test/trie.rs +++ b/stackslib/src/chainstate/stacks/index/test/trie.rs @@ -474,7 +474,7 @@ fn trie_cursor_promote_node4_to_node16() { Trie::test_try_attach_leaf( &mut f, &mut c, - &mut TrieLeaf::new(&vec![], &[128 + j as u8; 40].to_vec()), + &mut TrieLeaf::new(&vec![], &[128 + j; 40].to_vec()), &mut node, ) .unwrap() @@ -490,12 +490,12 @@ fn trie_cursor_promote_node4_to_node16() { ) .unwrap() .unwrap(), - TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j as u8; 40].to_vec()) + TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j; 40].to_vec()) ); // without a MARF commit, merkle tests will fail in deferred mode if f.hash_calculation_mode != TrieHashCalculationMode::Deferred { - merkle_test(&mut f, &path.to_vec(), &[(j + 128) as u8; 40].to_vec()); + merkle_test(&mut f, &path.to_vec(), &[j + 128; 40].to_vec()); } } } @@ -635,7 +635,7 @@ fn trie_cursor_promote_node16_to_node48() { Trie::test_try_attach_leaf( &mut f, &mut c, - &mut TrieLeaf::new(&vec![], &[128 + j as u8; 40].to_vec()), + &mut TrieLeaf::new(&vec![], &[128 + j; 40].to_vec()), &mut node, ) .unwrap() @@ -652,12 +652,12 @@ fn trie_cursor_promote_node16_to_node48() { ) .unwrap() .unwrap(), - TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j as u8; 40].to_vec()) + TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j; 40].to_vec()) ); // without a MARF commit, merkle tests will fail in deferred mode if f.hash_calculation_mode != TrieHashCalculationMode::Deferred { - merkle_test(&mut f, &path.to_vec(), &[(j + 128) as u8; 40].to_vec()); + merkle_test(&mut f, &path.to_vec(), &[j + 128; 40].to_vec()); } } } @@ -742,7 +742,7 @@ fn trie_cursor_promote_node16_to_node48() { Trie::test_try_attach_leaf( &mut f, &mut c, - &mut TrieLeaf::new(&vec![], &[128 + j as u8; 40].to_vec()), + &mut TrieLeaf::new(&vec![], &[128 + j; 40].to_vec()), &mut node, ) .unwrap() @@ -759,12 +759,12 @@ fn trie_cursor_promote_node16_to_node48() { ) .unwrap() .unwrap(), - TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j as u8; 40].to_vec()) + TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j; 40].to_vec()) ); // without a MARF commit, merkle tests will fail in deferred mode if f.hash_calculation_mode != TrieHashCalculationMode::Deferred { - merkle_test(&mut f, &path.to_vec(), &[(j + 128) as u8; 40].to_vec()); + merkle_test(&mut f, &path.to_vec(), &[j + 128; 40].to_vec()); } } } @@ -905,7 +905,7 @@ fn trie_cursor_promote_node48_to_node256() { Trie::test_try_attach_leaf( &mut f, &mut c, - &mut TrieLeaf::new(&vec![], &[128 + j as u8; 40].to_vec()), + &mut TrieLeaf::new(&vec![], &[128 + j; 40].to_vec()), &mut node, ) .unwrap() @@ -922,12 +922,12 @@ fn trie_cursor_promote_node48_to_node256() { ) .unwrap() .unwrap(), - TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j as u8; 40].to_vec()) + TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j; 40].to_vec()) ); // without a MARF commit, merkle tests will fail in deferred mode if f.hash_calculation_mode != TrieHashCalculationMode::Deferred { - merkle_test(&mut f, &path.to_vec(), &[(j + 128) as u8; 40].to_vec()); + merkle_test(&mut f, &path.to_vec(), &[j + 128; 40].to_vec()); } } } @@ -1012,7 +1012,7 @@ fn trie_cursor_promote_node48_to_node256() { Trie::test_try_attach_leaf( &mut f, &mut c, - &mut TrieLeaf::new(&vec![], &[128 + j as u8; 40].to_vec()), + &mut TrieLeaf::new(&vec![], &[128 + j; 40].to_vec()), &mut node, ) .unwrap() @@ -1028,12 +1028,12 @@ fn trie_cursor_promote_node48_to_node256() { ) .unwrap() .unwrap(), - TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j as u8; 40].to_vec()) + TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j; 40].to_vec()) ); // without a MARF commit, merkle tests will fail in deferred mode if f.hash_calculation_mode != TrieHashCalculationMode::Deferred { - merkle_test(&mut f, &path.to_vec(), &[(j + 128) as u8; 40].to_vec()); + merkle_test(&mut f, &path.to_vec(), &[j + 128; 40].to_vec()); } } } @@ -1118,7 +1118,7 @@ fn trie_cursor_promote_node48_to_node256() { Trie::test_try_attach_leaf( &mut f, &mut c, - &mut TrieLeaf::new(&vec![], &[128 + j as u8; 40].to_vec()), + &mut TrieLeaf::new(&vec![], &[128 + j; 40].to_vec()), &mut node, ) .unwrap() @@ -1135,12 +1135,12 @@ fn trie_cursor_promote_node48_to_node256() { ) .unwrap() .unwrap(), - TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j as u8; 40].to_vec()) + TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j; 40].to_vec()) ); // without a MARF commit, merkle tests will fail in deferred mode if f.hash_calculation_mode != TrieHashCalculationMode::Deferred { - merkle_test(&mut f, &path.to_vec(), &[(j + 128) as u8; 40].to_vec()); + merkle_test(&mut f, &path.to_vec(), &[j + 128; 40].to_vec()); } } } diff --git a/stackslib/src/chainstate/stacks/index/trie.rs b/stackslib/src/chainstate/stacks/index/trie.rs index 65e41cf3ed..251c363561 100644 --- a/stackslib/src/chainstate/stacks/index/trie.rs +++ b/stackslib/src/chainstate/stacks/index/trie.rs @@ -281,7 +281,7 @@ impl Trie { ))); } - value.path = cur_leaf.path_bytes().clone(); + value.path.clone_from(cur_leaf.path_bytes()); let leaf_hash = get_leaf_hash(value); @@ -341,7 +341,7 @@ impl Trie { ) -> Result { // can only work if we're not at the end of the path, and the current node has a path assert!(!cursor.eop()); - assert!(cur_leaf_data.path.len() > 0); + assert!(!cur_leaf_data.path.is_empty()); // switch from lazy expansion to path compression -- // * the current and new leaves will have unique suffixes @@ -361,11 +361,8 @@ impl Trie { // update current leaf (path changed) and save it let cur_leaf_disk_ptr = cur_leaf_ptr.ptr(); - let cur_leaf_new_ptr = TriePtr::new( - TrieNodeID::Leaf as u8, - cur_leaf_chr, - cur_leaf_disk_ptr as u32, - ); + let cur_leaf_new_ptr = + TriePtr::new(TrieNodeID::Leaf as u8, cur_leaf_chr, cur_leaf_disk_ptr); assert!(cur_leaf_path.len() <= cur_leaf_data.path.len()); let _sav_cur_leaf_data = cur_leaf_data.clone(); @@ -563,7 +560,7 @@ impl Trie { // append this leaf to the Trie let new_node_disk_ptr = storage.last_ptr()?; - let ret = TriePtr::new(new_node.id(), node_ptr.chr(), new_node_disk_ptr as u32); + let ret = TriePtr::new(new_node.id(), node_ptr.chr(), new_node_disk_ptr); storage.write_nodetype(new_node_disk_ptr, &new_node, new_node_hash)?; // update the cursor so its path of nodes and ptrs accurately reflects that we would have @@ -639,7 +636,7 @@ impl Trie { let new_cur_node_ptr = TriePtr::new( cur_node_cur_ptr.id(), new_cur_node_chr, - new_cur_node_disk_ptr as u32, + new_cur_node_disk_ptr, ); node.set_path(new_cur_node_path); @@ -873,13 +870,13 @@ impl Trie { cursor: &TrieCursor, update_skiplist: bool, ) -> Result<(), Error> { - assert!(cursor.node_ptrs.len() > 0); + assert!(!cursor.node_ptrs.is_empty()); let mut ptrs = cursor.node_ptrs.clone(); trace!("update_root_hash: ptrs = {:?}", &ptrs); let mut child_ptr = ptrs.pop().unwrap(); - if ptrs.len() == 0 { + if ptrs.is_empty() { // root node was already updated by trie operations, but it will have the wrong hash. // we need to "fix" the root node so it mixes in its ancestor hashes. trace!("Fix up root node so it mixes in its ancestor hashes"); @@ -910,10 +907,9 @@ impl Trie { if cfg!(test) && is_trace() { let node_hash = my_hash.clone(); let _ = Trie::get_trie_root_ancestor_hashes_bytes(storage, &node_hash) - .and_then(|_hs| { + .map(|_hs| { storage.clear_cached_ancestor_hashes_bytes(); trace!("update_root_hash: Updated {:?} with {:?} from {} to {} + {:?} = {} (fixed root)", &node, &child_ptr, &_cur_hash, &node_hash, &_hs[1..].to_vec(), &h); - Ok(()) }); } @@ -974,10 +970,9 @@ impl Trie { if cfg!(test) && is_trace() { let _ = Trie::get_trie_root_ancestor_hashes_bytes(storage, &content_hash) - .and_then(|_hs| { + .map(|_hs| { storage.clear_cached_ancestor_hashes_bytes(); trace!("update_root_hash: Updated {:?} with {:?} from {:?} to {:?} + {:?} = {:?}", &node, &child_ptr, &_cur_hash, &content_hash, &_hs[1..].to_vec(), &h); - Ok(()) }); } diff --git a/stackslib/src/chainstate/stacks/index/trie_sql.rs b/stackslib/src/chainstate/stacks/index/trie_sql.rs index 8134db9d44..28c3da23aa 100644 --- a/stackslib/src/chainstate/stacks/index/trie_sql.rs +++ b/stackslib/src/chainstate/stacks/index/trie_sql.rs @@ -399,7 +399,7 @@ pub fn write_trie_blob_to_unconfirmed( } /// Open a trie blob. Returns a Blob<'a> readable/writeable handle to it. -pub fn open_trie_blob<'a>(conn: &'a Connection, block_id: u32) -> Result, Error> { +pub fn open_trie_blob(conn: &Connection, block_id: u32) -> Result, Error> { let blob = conn.blob_open( DatabaseName::Main, "marf_data", @@ -411,7 +411,7 @@ pub fn open_trie_blob<'a>(conn: &'a Connection, block_id: u32) -> Result readable handle to it. -pub fn open_trie_blob_readonly<'a>(conn: &'a Connection, block_id: u32) -> Result, Error> { +pub fn open_trie_blob_readonly(conn: &Connection, block_id: u32) -> Result, Error> { let blob = conn.blob_open( DatabaseName::Main, "marf_data", @@ -569,7 +569,7 @@ pub fn set_migrated(conn: &Connection) -> Result<(), Error> { &[&u64_to_sql(SQL_MARF_SCHEMA_VERSION)?], ) .map_err(|e| e.into()) - .and_then(|_| Ok(())) + .map(|_| ()) } pub fn get_node_hash_bytes( diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 082e9c374c..eae3e1f14d 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -120,7 +120,7 @@ impl MinerStatus { } pub fn is_blocked(&self) -> bool { - if self.blockers.len() > 0 { + if !self.blockers.is_empty() { debug!("Miner: blocked by {:?}", &self.blockers); true } else { @@ -876,11 +876,11 @@ impl<'a> StacksMicroblockBuilder<'a> { ) -> Result { let miner_pubkey_hash = Hash160::from_node_public_key(&StacksPublicKey::from_private(miner_key)); - if txs.len() == 0 { + if txs.is_empty() { return Err(Error::NoTransactionsToMine); } - let txid_vecs = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); + let txid_vecs: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); let merkle_tree = MerkleTree::::new(&txid_vecs); let tx_merkle_root = merkle_tree.root(); @@ -903,7 +903,7 @@ impl<'a> StacksMicroblockBuilder<'a> { next_microblock_header.verify(&miner_pubkey_hash).unwrap(); Ok(StacksMicroblock { header: next_microblock_header, - txs: txs, + txs, }) } @@ -1387,7 +1387,7 @@ impl<'a> StacksMicroblockBuilder<'a> { }, ); - if to_drop_and_blacklist.len() > 0 { + if !to_drop_and_blacklist.is_empty() { debug!( "Dropping and blacklisting {} problematic transaction(s)", &to_drop_and_blacklist.len() @@ -1427,8 +1427,16 @@ impl<'a> StacksMicroblockBuilder<'a> { self.runtime.num_mined = num_txs; mem_pool.drop_txs(&invalidated_txs)?; - event_dispatcher.mempool_txs_dropped(invalidated_txs, MemPoolDropReason::TOO_EXPENSIVE); - event_dispatcher.mempool_txs_dropped(to_drop_and_blacklist, MemPoolDropReason::PROBLEMATIC); + event_dispatcher.mempool_txs_dropped( + invalidated_txs, + None, + MemPoolDropReason::TOO_EXPENSIVE, + ); + event_dispatcher.mempool_txs_dropped( + to_drop_and_blacklist, + None, + MemPoolDropReason::PROBLEMATIC, + ); if blocked { debug!( @@ -1463,7 +1471,7 @@ impl<'a> StacksMicroblockBuilder<'a> { } } -impl<'a> Drop for StacksMicroblockBuilder<'a> { +impl Drop for StacksMicroblockBuilder<'_> { fn drop(&mut self) { debug!( "Drop StacksMicroblockBuilder"; @@ -1509,11 +1517,11 @@ impl StacksBlockBuilder { total_anchored_fees: 0, total_confirmed_streamed_fees: 0, total_streamed_fees: 0, - bytes_so_far: bytes_so_far, + bytes_so_far, anchored_done: false, parent_consensus_hash: parent_chain_tip.consensus_hash.clone(), parent_header_hash: header.parent_block.clone(), - header: header, + header, parent_microblock_hash: parent_chain_tip .microblock_tail .as_ref() @@ -1524,7 +1532,7 @@ impl StacksBlockBuilder { ), // will be updated miner_privkey: StacksPrivateKey::new(), // caller should overwrite this, or refrain from mining microblocks miner_payouts: None, - miner_id: miner_id, + miner_id, } } @@ -1610,7 +1618,7 @@ impl StacksBlockBuilder { } /// Assign the block parent - pub fn set_parent_block(&mut self, parent_block_hash: &BlockHeaderHash) -> () { + pub fn set_parent_block(&mut self, parent_block_hash: &BlockHeaderHash) { self.header.parent_block = parent_block_hash.clone(); } @@ -1619,7 +1627,7 @@ impl StacksBlockBuilder { &mut self, parent_mblock_hash: &BlockHeaderHash, parent_mblock_seq: u16, - ) -> () { + ) { self.header.parent_microblock = parent_mblock_hash.clone(); self.header.parent_microblock_sequence = parent_mblock_seq; } @@ -1641,7 +1649,7 @@ impl StacksBlockBuilder { } /// Reset measured costs and fees - pub fn reset_costs(&mut self) -> () { + pub fn reset_costs(&mut self) { self.total_anchored_fees = 0; self.total_confirmed_streamed_fees = 0; self.total_streamed_fees = 0; @@ -1704,7 +1712,7 @@ impl StacksBlockBuilder { pub fn finalize_block(&mut self, clarity_tx: &mut ClarityTx) -> StacksBlock { // done! Calculate state root and tx merkle root - let txid_vecs = self + let txid_vecs: Vec<_> = self .txs .iter() .map(|tx| tx.txid().as_bytes().to_vec()) @@ -1770,7 +1778,7 @@ impl StacksBlockBuilder { /// Cut the next microblock. pub fn mine_next_microblock<'a>(&mut self) -> Result { - let txid_vecs = self + let txid_vecs: Vec<_> = self .micro_txs .iter() .map(|tx| tx.txid().as_bytes().to_vec()) @@ -1953,7 +1961,7 @@ impl StacksBlockBuilder { parent_microblocks.len() ); - if parent_microblocks.len() == 0 { + if parent_microblocks.is_empty() { self.set_parent_microblock(&EMPTY_MICROBLOCK_PARENT_HASH, 0); } else { let num_mblocks = parent_microblocks.len(); @@ -2073,7 +2081,7 @@ impl StacksBlockBuilder { mut builder: StacksBlockBuilder, chainstate_handle: &StacksChainState, burn_dbconn: &SortitionHandleConn, - mut txs: Vec, + txs: Vec, mut mblock_txs: Vec, ) -> Result<(StacksBlock, u64, ExecutionCost, Option), Error> { debug!("Build anchored block from {} transactions", txs.len()); @@ -2081,7 +2089,7 @@ impl StacksBlockBuilder { let mut miner_epoch_info = builder.pre_epoch_begin(&mut chainstate, burn_dbconn, true)?; let ast_rules = miner_epoch_info.ast_rules; let (mut epoch_tx, _) = builder.epoch_begin(burn_dbconn, &mut miner_epoch_info)?; - for tx in txs.drain(..) { + for tx in txs.into_iter() { match builder.try_mine_tx(&mut epoch_tx, &tx, ast_rules.clone()) { Ok(_) => { debug!("Included {}", &tx.txid()); @@ -2114,12 +2122,12 @@ impl StacksBlockBuilder { let block = builder.mine_anchored_block(&mut epoch_tx); let size = builder.bytes_so_far; - let mblock_opt = if mblock_txs.len() > 0 { + let mblock_opt = if mblock_txs.is_empty() { + None + } else { builder.micro_txs.append(&mut mblock_txs); let mblock = builder.mine_next_microblock()?; Some(mblock) - } else { - None }; let cost = builder.epoch_finish(epoch_tx)?; @@ -2513,7 +2521,7 @@ impl StacksBlockBuilder { } } - if to_drop_and_blacklist.len() > 0 { + if !to_drop_and_blacklist.is_empty() { let _ = mempool.drop_and_blacklist_txs(&to_drop_and_blacklist); } @@ -2543,8 +2551,12 @@ impl StacksBlockBuilder { mempool.drop_txs(&invalidated_txs)?; if let Some(observer) = event_observer { - observer.mempool_txs_dropped(invalidated_txs, MemPoolDropReason::TOO_EXPENSIVE); - observer.mempool_txs_dropped(to_drop_and_blacklist, MemPoolDropReason::PROBLEMATIC); + observer.mempool_txs_dropped(invalidated_txs, None, MemPoolDropReason::TOO_EXPENSIVE); + observer.mempool_txs_dropped( + to_drop_and_blacklist, + None, + MemPoolDropReason::PROBLEMATIC, + ); } if let Err(e) = result { diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index fd370a8b12..dcb9348a21 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -1572,7 +1572,7 @@ pub mod test { tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; - let mut all_txs = codec_all_transactions( + let all_txs = codec_all_transactions( &TransactionVersion::Testnet, 0x80000000, &TransactionAnchorMode::OnChainOnly, @@ -1589,7 +1589,7 @@ pub mod test { txs_anchored.push(tx_coinbase); } - for tx in all_txs.drain(..) { + for tx in all_txs.into_iter() { match tx.payload { TransactionPayload::Coinbase(..) => { continue; @@ -1602,7 +1602,7 @@ pub mod test { } } - let txid_vecs = txs_anchored + let txid_vecs: Vec<_> = txs_anchored .iter() .map(|tx| tx.txid().as_bytes().to_vec()) .collect(); @@ -1626,7 +1626,7 @@ pub mod test { parent_block: BlockHeaderHash([5u8; 32]), parent_microblock: BlockHeaderHash([6u8; 32]), parent_microblock_sequence: 4, - tx_merkle_root: tx_merkle_root, + tx_merkle_root, state_index_root: TrieHash([8u8; 32]), microblock_pubkey_hash: Hash160([9u8; 20]), }; @@ -1718,7 +1718,7 @@ pub mod test { ); let txs_mblock: Vec<_> = all_txs.into_iter().take(num_txs).collect(); - let txid_vecs = txs_mblock + let txid_vecs: Vec<_> = txs_mblock .iter() .map(|tx| tx.txid().as_bytes().to_vec()) .collect(); @@ -1736,7 +1736,7 @@ pub mod test { header.sign(&privk).unwrap(); StacksMicroblock { - header: header, + header, txs: txs_mblock, } } diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 90fc7f1705..bcf7611695 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -801,7 +801,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { ) .unwrap(); - if parent_mblock_stream.len() > 0 { + if !parent_mblock_stream.is_empty() { if tenure_id != 5 { assert_eq!( anchored_block.0.header.parent_microblock, @@ -1058,7 +1058,9 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { ) .unwrap(); - if parent_mblock_stream.len() > 0 { + if parent_mblock_stream.is_empty() { + assert_eq!(tenure_id, 0); + } else { // force the block to confirm a microblock stream, even if it would result in // an invalid block. test_debug!( @@ -1074,8 +1076,6 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { parent_mblock_stream.last().unwrap().block_hash() ); test_debug!("New block hash is {}", &anchored_block.0.block_hash()); - } else { - assert_eq!(tenure_id, 0); } (anchored_block.0, parent_mblock_stream) @@ -1256,7 +1256,7 @@ fn test_build_anchored_blocks_incrementing_nonces() { &parent_tip, tip.total_burn, vrf_proof, - Hash160([0 as u8; 20]), + Hash160([0; 20]), &coinbase_tx, BlockBuilderSettings::limited(), None, @@ -1637,7 +1637,7 @@ fn test_build_anchored_blocks_mempool_fee_transaction_too_low() { &parent_tip, tip.total_burn, vrf_proof, - Hash160([0 as u8; 20]), + Hash160([0; 20]), &coinbase_tx, BlockBuilderSettings::max_value(), None, @@ -1728,7 +1728,7 @@ fn test_build_anchored_blocks_zero_fee_transaction() { &parent_tip, vrf_proof, tip.total_burn, - Hash160([0 as u8; 20]), + Hash160([0; 20]), ) .unwrap(); @@ -2794,7 +2794,7 @@ fn test_build_microblock_stream_forks() { forked_parent_microblock_stream[i].txs[0] = forked_mblock_tx; // re-calculate merkle root - let txid_vecs = forked_parent_microblock_stream[i].txs + let txid_vecs: Vec<_> = forked_parent_microblock_stream[i].txs .iter() .map(|tx| tx.txid().as_bytes().to_vec()) .collect(); @@ -3121,7 +3121,7 @@ fn test_build_microblock_stream_forks_with_descendants() { forked_parent_microblock_stream[i].txs[0] = forked_mblock_tx; // re-calculate merkle root - let txid_vecs = forked_parent_microblock_stream[i].txs + let txid_vecs: Vec<_> = forked_parent_microblock_stream[i].txs .iter() .map(|tx| tx.txid().as_bytes().to_vec()) .collect(); @@ -3355,10 +3355,10 @@ fn test_build_microblock_stream_forks_with_descendants() { for burn_op in burn_ops.iter_mut() { if let BlockstackOperationType::LeaderBlockCommit(ref mut op) = burn_op { // patch it up - op.parent_block_ptr = (*parent_block_ptrs + op.parent_block_ptr = *parent_block_ptrs .borrow() .get(&stacks_block.header.parent_block) - .unwrap()) as u32; + .unwrap(); } } @@ -4841,7 +4841,7 @@ fn test_fee_order_mismatch_nonce_order() { &parent_tip, tip.total_burn, vrf_proof, - Hash160([0 as u8; 20]), + Hash160([0; 20]), &coinbase_tx, BlockBuilderSettings::max_value(), None, diff --git a/stackslib/src/chainstate/stacks/tests/chain_histories.rs b/stackslib/src/chainstate/stacks/tests/chain_histories.rs index 4e1b774ba7..4859451cb1 100644 --- a/stackslib/src/chainstate/stacks/tests/chain_histories.rs +++ b/stackslib/src/chainstate/stacks/tests/chain_histories.rs @@ -2513,7 +2513,7 @@ fn assert_chainstate_blocks_eq(test_name_1: &str, test_name_2: &str) { } for i in 0..all_microblocks_1.len() { - if all_microblocks_1[i].2.len() == 0 { + if all_microblocks_1[i].2.is_empty() { continue; } @@ -2650,7 +2650,25 @@ fn miner_trace_replay_randomized(miner_trace: &mut TestMinerTrace) { &block_commit_op, ); - if microblocks.len() > 0 { + if microblocks.is_empty() { + // process all the blocks we can + test_debug!( + "Process Stacks block {} and {} microblocks in {}", + &stacks_block.block_hash(), + microblocks.len(), + &node_name + ); + let tip_info_list = node + .chainstate + .process_blocks_at_tip( + connect_burnchain_db(&miner_trace.burn_node.burnchain).conn(), + &mut miner_trace.burn_node.sortdb, + expected_num_blocks, + ) + .unwrap(); + + num_processed += tip_info_list.len(); + } else { for mblock in microblocks.iter() { preprocess_stacks_block_data( &mut node, @@ -2680,24 +2698,6 @@ fn miner_trace_replay_randomized(miner_trace: &mut TestMinerTrace) { num_processed += tip_info_list.len(); } - } else { - // process all the blocks we can - test_debug!( - "Process Stacks block {} and {} microblocks in {}", - &stacks_block.block_hash(), - microblocks.len(), - &node_name - ); - let tip_info_list = node - .chainstate - .process_blocks_at_tip( - connect_burnchain_db(&miner_trace.burn_node.burnchain).conn(), - &mut miner_trace.burn_node.sortdb, - expected_num_blocks, - ) - .unwrap(); - - num_processed += tip_info_list.len(); } } } @@ -2857,7 +2857,7 @@ pub fn mine_invalid_token_transfers_block( ); builder.force_mine_tx(clarity_tx, &tx1).unwrap(); - if miner.spent_at_nonce.get(&1).is_none() { + if !miner.spent_at_nonce.contains_key(&1) { miner.spent_at_nonce.insert(1, 11111); } @@ -2871,7 +2871,7 @@ pub fn mine_invalid_token_transfers_block( ); builder.force_mine_tx(clarity_tx, &tx2).unwrap(); - if miner.spent_at_nonce.get(&2).is_none() { + if !miner.spent_at_nonce.contains_key(&2) { miner.spent_at_nonce.insert(2, 22222); } diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 9a6a84507e..714800b1a9 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -86,7 +86,7 @@ pub fn copy_dir(src_dir: &str, dest_dir: &str) -> Result<(), io::Error> { let mut dir_queue = VecDeque::new(); dir_queue.push_back("/".to_string()); - while dir_queue.len() > 0 { + while !dir_queue.is_empty() { let next_dir = dir_queue.pop_front().unwrap(); let next_src_dir = path_join(&src_dir, &next_dir); let next_dest_dir = path_join(&dest_dir, &next_dir); @@ -140,7 +140,7 @@ impl TestMinerTracePoint { stacks_block: StacksBlock, microblocks: Vec, block_commit: LeaderBlockCommitOp, - ) -> () { + ) { self.fork_snapshots.insert(miner_id, fork_snapshot); self.stacks_blocks.insert(miner_id, stacks_block); self.microblocks.insert(miner_id, microblocks); @@ -203,9 +203,9 @@ impl TestMinerTrace { points: Vec, ) -> TestMinerTrace { TestMinerTrace { - points: points, - burn_node: burn_node, - miners: miners, + points, + burn_node, + miners, } } @@ -214,7 +214,7 @@ impl TestMinerTrace { let mut num_blocks = 0; for p in self.points.iter() { for miner_id in p.stacks_blocks.keys() { - if p.stacks_blocks.get(miner_id).is_some() { + if p.stacks_blocks.contains_key(miner_id) { num_blocks += 1; } } @@ -227,7 +227,7 @@ impl TestMinerTrace { let mut num_sortitions = 0; for p in self.points.iter() { for miner_id in p.fork_snapshots.keys() { - if p.fork_snapshots.get(miner_id).is_some() { + if p.fork_snapshots.contains_key(miner_id) { num_sortitions += 1; } } @@ -288,7 +288,7 @@ impl TestStacksNode { let chainstate = instantiate_chainstate_with_balances(mainnet, chain_id, test_name, initial_balances); TestStacksNode { - chainstate: chainstate, + chainstate, prev_keys: vec![], key_ops: HashMap::new(), anchored_blocks: vec![], @@ -304,7 +304,7 @@ impl TestStacksNode { pub fn open(mainnet: bool, chain_id: u32, test_name: &str) -> TestStacksNode { let chainstate = open_chainstate(mainnet, chain_id, test_name); TestStacksNode { - chainstate: chainstate, + chainstate, prev_keys: vec![], key_ops: HashMap::new(), anchored_blocks: vec![], @@ -319,7 +319,7 @@ impl TestStacksNode { pub fn from_chainstate(chainstate: StacksChainState) -> TestStacksNode { TestStacksNode { - chainstate: chainstate, + chainstate, prev_keys: vec![], key_ops: HashMap::new(), anchored_blocks: vec![], @@ -356,7 +356,7 @@ impl TestStacksNode { new_test_name, ); TestStacksNode { - chainstate: chainstate, + chainstate, prev_keys: self.prev_keys.clone(), key_ops: self.key_ops.clone(), anchored_blocks: self.anchored_blocks.clone(), @@ -392,7 +392,7 @@ impl TestStacksNode { key_register_op } - pub fn add_key_register_op(&mut self, op: &LeaderKeyRegisterOp) -> () { + pub fn add_key_register_op(&mut self, op: &LeaderKeyRegisterOp) { self.prev_keys.push(op.clone()); self.key_ops .insert(op.public_key.clone(), self.prev_keys.len() - 1); @@ -974,7 +974,7 @@ pub fn get_last_microblock_header( let last_microblock_header_opt = match last_microblocks_opt { Some(last_microblocks) => { - if last_microblocks.len() == 0 { + if last_microblocks.is_empty() { None } else { let l = last_microblocks.len() - 1; @@ -1421,7 +1421,7 @@ pub fn instantiate_and_exec( chain_id: u32, test_name: &str, balances: Vec<(StacksAddress, u64)>, - post_flight_callback: Option ()>>, + post_flight_callback: Option>, ) -> StacksChainState { let path = chainstate_path(test_name); match fs::metadata(&path) { diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index c45b212b68..d813dbcf01 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -34,6 +34,7 @@ use crate::chainstate::stacks::{TransactionPayloadID, *}; use crate::codec::Error as CodecError; use crate::core::*; use crate::net::Error as net_error; +use crate::util_lib::boot::boot_code_addr; impl StacksMessageCodec for TransactionContractCall { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { @@ -345,7 +346,7 @@ where H: MerkleHashFunc + Clone + PartialEq + fmt::Debug, { fn from_iter>(iter: T) -> Self { - let txid_vec = iter + let txid_vec: Vec<_> = iter .into_iter() .map(|x| x.txid().as_bytes().to_vec()) .collect(); @@ -722,13 +723,13 @@ impl StacksTransaction { }; StacksTransaction { - version: version, + version, chain_id: 0, - auth: auth, - anchor_mode: anchor_mode, + auth, + anchor_mode, post_condition_mode: TransactionPostConditionMode::Deny, post_conditions: vec![], - payload: payload, + payload, } } @@ -738,7 +739,7 @@ impl StacksTransaction { } /// Set fee rate - pub fn set_tx_fee(&mut self, tx_fee: u64) -> () { + pub fn set_tx_fee(&mut self, tx_fee: u64) { self.auth.set_tx_fee(tx_fee); } @@ -753,7 +754,7 @@ impl StacksTransaction { } /// set origin nonce - pub fn set_origin_nonce(&mut self, n: u64) -> () { + pub fn set_origin_nonce(&mut self, n: u64) { self.auth.set_origin_nonce(n); } @@ -763,17 +764,17 @@ impl StacksTransaction { } /// Set anchor mode - pub fn set_anchor_mode(&mut self, anchor_mode: TransactionAnchorMode) -> () { + pub fn set_anchor_mode(&mut self, anchor_mode: TransactionAnchorMode) { self.anchor_mode = anchor_mode; } /// Set post-condition mode - pub fn set_post_condition_mode(&mut self, postcond_mode: TransactionPostConditionMode) -> () { + pub fn set_post_condition_mode(&mut self, postcond_mode: TransactionPostConditionMode) { self.post_condition_mode = postcond_mode; } /// Add a post-condition - pub fn add_post_condition(&mut self, post_condition: TransactionPostCondition) -> () { + pub fn add_post_condition(&mut self, post_condition: TransactionPostCondition) { self.post_conditions.push(post_condition); } @@ -1031,6 +1032,16 @@ impl StacksTransaction { _ => false, } } + + /// Is this a phantom transaction? + pub fn is_phantom(&self) -> bool { + let boot_address = boot_code_addr(self.is_mainnet()).into(); + if let TransactionPayload::TokenTransfer(address, amount, _) = &self.payload { + *address == boot_address && *amount == 0 + } else { + false + } + } } impl StacksTransactionSigner { @@ -1064,11 +1075,11 @@ impl StacksTransactionSigner { }) } - pub fn resume(&mut self, tx: &StacksTransaction) -> () { + pub fn resume(&mut self, tx: &StacksTransaction) { self.tx = tx.clone() } - pub fn disable_checks(&mut self) -> () { + pub fn disable_checks(&mut self) { self.check_oversign = false; self.check_overlap = false; } @@ -1553,7 +1564,7 @@ mod test { signed_tx: &StacksTransaction, corrupt_origin: bool, corrupt_sponsor: bool, - ) -> () { + ) { // signature is well-formed otherwise signed_tx.verify().unwrap(); @@ -3384,7 +3395,7 @@ mod test { .consensus_serialize(&mut contract_call_bytes) .unwrap(); - let mut transaction_contract_call = vec![0xff as u8]; + let mut transaction_contract_call = vec![0xff]; transaction_contract_call.append(&mut contract_call_bytes.clone()); assert!( @@ -3489,14 +3500,14 @@ mod test { let asset_name = ClarityName::try_from("hello-asset").unwrap(); let mut asset_name_bytes = vec![ // length - asset_name.len() as u8, + asset_name.len(), ]; asset_name_bytes.extend_from_slice(&asset_name.to_string().as_str().as_bytes()); let contract_name = ContractName::try_from("hello-world").unwrap(); let mut contract_name_bytes = vec![ // length - contract_name.len() as u8, + contract_name.len(), ]; contract_name_bytes.extend_from_slice(&contract_name.to_string().as_str().as_bytes()); @@ -3969,7 +3980,7 @@ mod test { txs } - fn check_oversign_origin_singlesig(signed_tx: &mut StacksTransaction) -> () { + fn check_oversign_origin_singlesig(signed_tx: &mut StacksTransaction) { let txid_before = signed_tx.txid(); match signed_tx.append_next_origin( &StacksPublicKey::from_hex( @@ -3990,7 +4001,7 @@ mod test { assert_eq!(txid_before, signed_tx.txid()); } - fn check_sign_no_sponsor(signed_tx: &mut StacksTransaction) -> () { + fn check_sign_no_sponsor(signed_tx: &mut StacksTransaction) { let txid_before = signed_tx.txid(); match signed_tx.append_next_sponsor( &StacksPublicKey::from_hex( @@ -4010,7 +4021,7 @@ mod test { assert_eq!(txid_before, signed_tx.txid()); } - fn check_oversign_sponsor_singlesig(signed_tx: &mut StacksTransaction) -> () { + fn check_oversign_sponsor_singlesig(signed_tx: &mut StacksTransaction) { let txid_before = signed_tx.txid(); match signed_tx.append_next_sponsor( &StacksPublicKey::from_hex( @@ -4038,7 +4049,7 @@ mod test { } } - fn check_oversign_origin_multisig(signed_tx: &StacksTransaction) -> () { + fn check_oversign_origin_multisig(signed_tx: &StacksTransaction) { let tx = signed_tx.clone(); let privk = StacksPrivateKey::from_hex( "c6ebf45dabca8cac9a25ae39ab690743b96eb2b0960066e98ba6df50d6f9293b01", @@ -4068,7 +4079,7 @@ mod test { } } - fn check_oversign_origin_multisig_uncompressed(signed_tx: &StacksTransaction) -> () { + fn check_oversign_origin_multisig_uncompressed(signed_tx: &StacksTransaction) { let tx = signed_tx.clone(); let privk = StacksPrivateKey::from_hex( "c6ebf45dabca8cac9a25ae39ab690743b96eb2b0960066e98ba6df50d6f9293b", @@ -4102,7 +4113,7 @@ mod test { } } - fn check_oversign_sponsor_multisig(signed_tx: &StacksTransaction) -> () { + fn check_oversign_sponsor_multisig(signed_tx: &StacksTransaction) { let tx = signed_tx.clone(); let privk = StacksPrivateKey::from_hex( "c6ebf45dabca8cac9a25ae39ab690743b96eb2b0960066e98ba6df50d6f9293b01", @@ -4132,7 +4143,7 @@ mod test { } } - fn check_oversign_sponsor_multisig_uncompressed(signed_tx: &StacksTransaction) -> () { + fn check_oversign_sponsor_multisig_uncompressed(signed_tx: &StacksTransaction) { let tx = signed_tx.clone(); let privk = StacksPrivateKey::from_hex( "c6ebf45dabca8cac9a25ae39ab690743b96eb2b0960066e98ba6df50d6f9293b", diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index f23be191ff..f67ab22eaa 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -173,7 +173,7 @@ trait ClarityStorage { headers_db: &'a dyn HeadersDB, burn_db: &'a dyn BurnStateDB, ) -> ClarityDatabase<'a>; - fn get_analysis_db<'a>(&'a mut self) -> AnalysisDatabase<'a>; + fn get_analysis_db(&mut self) -> AnalysisDatabase<'_>; } impl ClarityStorage for WritableMarfStore<'_> { @@ -185,7 +185,7 @@ impl ClarityStorage for WritableMarfStore<'_> { self.as_clarity_db(headers_db, burn_db) } - fn get_analysis_db<'a>(&'a mut self) -> AnalysisDatabase<'a> { + fn get_analysis_db(&mut self) -> AnalysisDatabase<'_> { self.as_analysis_db() } } @@ -199,7 +199,7 @@ impl ClarityStorage for MemoryBackingStore { self.as_clarity_db() } - fn get_analysis_db<'a>(&'a mut self) -> AnalysisDatabase<'a> { + fn get_analysis_db(&mut self) -> AnalysisDatabase<'_> { self.as_analysis_db() } } @@ -547,7 +547,7 @@ impl CLIHeadersDB { let conn = create_or_open_db(&cli_db_path); let mut db = CLIHeadersDB { db_path: db_path.to_string(), - conn: conn, + conn, }; if instantiate { @@ -567,7 +567,7 @@ impl CLIHeadersDB { let conn = create_or_open_db(&cli_db_path); let db = CLIHeadersDB { db_path: db_path.to_string(), - conn: conn, + conn, }; Ok(db) @@ -708,7 +708,7 @@ impl HeadersDB for CLIHeadersDB { ) -> Option { let conn = self.conn(); if let Some(height) = get_cli_block_height(&conn, id_bhh) { - Some((height * 600 + 1231006505) as u64) + Some(height * 600 + 1231006505) } else { None } @@ -717,7 +717,7 @@ impl HeadersDB for CLIHeadersDB { fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { let conn = self.conn(); if let Some(height) = get_cli_block_height(&conn, id_bhh) { - Some((height * 10 + 1713799973) as u64) + Some(height * 10 + 1713799973) } else { None } @@ -995,7 +995,7 @@ pub fn add_serialized_output(result: &mut serde_json::Value, value: Value) { /// Returns (process-exit-code, Option) pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option) { - if args.len() < 1 { + if args.is_empty() { print_usage(invoked_by); return (1, None); } @@ -1948,6 +1948,10 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option(relative_path: P) -> String + where + P: AsRef, + { + cargo_workspace(relative_path).display().to_string() + } + #[test] fn test_samples() { let db_name = format!("/tmp/db_{}", rand::thread_rng().gen::()); @@ -2046,7 +2057,7 @@ mod test { "test", &[ "check".to_string(), - "../sample-contracts/tokens.clar".to_string(), + cargo_workspace_as_string("sample/contracts/tokens.clar"), ], ); @@ -2054,14 +2065,14 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); eprintln!("check tokens (idempotency)"); let invoked = invoke_command( "test", &[ "check".to_string(), - "../sample-contracts/tokens.clar".to_string(), + cargo_workspace_as_string("sample/contracts/tokens.clar"), db_name.clone(), ], ); @@ -2070,7 +2081,7 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); eprintln!("launch tokens"); let invoked = invoke_command( @@ -2078,7 +2089,7 @@ mod test { &[ "launch".to_string(), "S1G2081040G2081040G2081040G208105NK8PE5.tokens".to_string(), - "../sample-contracts/tokens.clar".to_string(), + cargo_workspace_as_string("sample/contracts/tokens.clar"), db_name.clone(), ], ); @@ -2087,14 +2098,14 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); eprintln!("check names"); let invoked = invoke_command( "test", &[ "check".to_string(), - "../sample-contracts/names.clar".to_string(), + cargo_workspace_as_string("sample/contracts/names.clar"), db_name.clone(), ], ); @@ -2103,14 +2114,14 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); eprintln!("check names with different contract ID"); let invoked = invoke_command( "test", &[ "check".to_string(), - "../sample-contracts/names.clar".to_string(), + cargo_workspace_as_string("sample/contracts/names.clar"), db_name.clone(), "--contract_id".to_string(), "S1G2081040G2081040G2081040G208105NK8PE5.tokens".to_string(), @@ -2121,7 +2132,7 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); eprintln!("check names with analysis"); let invoked = invoke_command( @@ -2129,7 +2140,7 @@ mod test { &[ "check".to_string(), "--output_analysis".to_string(), - "../sample-contracts/names.clar".to_string(), + cargo_workspace_as_string("sample/contracts/names.clar"), db_name.clone(), ], ); @@ -2138,7 +2149,7 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); assert!(result["analysis"] != json!(null)); eprintln!("check names with cost"); @@ -2147,7 +2158,7 @@ mod test { &[ "check".to_string(), "--costs".to_string(), - "../sample-contracts/names.clar".to_string(), + cargo_workspace_as_string("sample/contracts/names.clar"), db_name.clone(), ], ); @@ -2156,7 +2167,7 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); assert!(result["costs"] != json!(null)); assert!(result["assets"] == json!(null)); @@ -2166,7 +2177,7 @@ mod test { &[ "launch".to_string(), "S1G2081040G2081040G2081040G208105NK8PE5.names".to_string(), - "../sample-contracts/names.clar".to_string(), + cargo_workspace_as_string("sample/contracts/names.clar"), "--costs".to_string(), "--assets".to_string(), db_name.clone(), @@ -2177,7 +2188,7 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); assert!(result["costs"] != json!(null)); assert!(result["assets"] != json!(null)); @@ -2198,8 +2209,8 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); - assert!(result["events"].as_array().unwrap().len() == 0); + assert!(!result["message"].as_str().unwrap().is_empty()); + assert!(result["events"].as_array().unwrap().is_empty()); assert_eq!(result["output"], json!({"UInt": 1000})); eprintln!("eval tokens"); @@ -2208,7 +2219,7 @@ mod test { &[ "eval".to_string(), "S1G2081040G2081040G2081040G208105NK8PE5.tokens".to_string(), - "../sample-contracts/tokens-mint.clar".to_string(), + cargo_workspace_as_string("sample/contracts/tokens-mint.clar"), db_name.clone(), ], ); @@ -2236,7 +2247,7 @@ mod test { "eval".to_string(), "--costs".to_string(), "S1G2081040G2081040G2081040G208105NK8PE5.tokens".to_string(), - "../sample-contracts/tokens-mint.clar".to_string(), + cargo_workspace_as_string("sample/contracts/tokens-mint.clar"), db_name.clone(), ], ); @@ -2264,7 +2275,7 @@ mod test { &[ "eval_at_chaintip".to_string(), "S1G2081040G2081040G2081040G208105NK8PE5.tokens".to_string(), - "../sample-contracts/tokens-mint.clar".to_string(), + cargo_workspace_as_string("sample/contracts/tokens-mint.clar"), db_name.clone(), ], ); @@ -2291,7 +2302,7 @@ mod test { &[ "eval_at_chaintip".to_string(), "S1G2081040G2081040G2081040G208105NK8PE5.tokens".to_string(), - "../sample-contracts/tokens-mint.clar".to_string(), + cargo_workspace_as_string("sample/contracts/tokens-mint.clar"), db_name.clone(), "--costs".to_string(), ], @@ -2327,7 +2338,7 @@ mod test { "test", &[ "check".to_string(), - "../sample-contracts/tokens-ft.clar".to_string(), + cargo_workspace_as_string("sample/contracts/tokens-ft.clar"), ], ); @@ -2335,7 +2346,7 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); eprintln!("launch tokens"); let invoked = invoke_command( @@ -2343,7 +2354,7 @@ mod test { &[ "launch".to_string(), "S1G2081040G2081040G2081040G208105NK8PE5.tokens-ft".to_string(), - "../sample-contracts/tokens-ft.clar".to_string(), + cargo_workspace_as_string("sample/contracts/tokens-ft.clar"), db_name.clone(), "--assets".to_string(), ], @@ -2355,7 +2366,7 @@ mod test { eprintln!("{}", serde_json::to_string(&result).unwrap()); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); assert!( result["assets"]["tokens"]["S1G2081040G2081040G2081040G208105NK8PE5"] ["S1G2081040G2081040G2081040G208105NK8PE5.tokens-ft::tokens"] diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index ee8795cadd..c2553d57d5 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -175,9 +175,9 @@ macro_rules! using { }}; } -impl<'a, 'b> ClarityBlockConnection<'a, 'b> { +impl ClarityBlockConnection<'_, '_> { #[cfg(test)] - pub fn new_test_conn( + pub fn new_test_conn<'a, 'b>( datastore: WritableMarfStore<'a>, header_db: &'b dyn HeadersDB, burn_state_db: &'b dyn BurnStateDB, @@ -190,13 +190,13 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { cost_track: Some(LimitedCostTracker::new_free()), mainnet: false, chain_id: CHAIN_ID_TESTNET, - epoch: epoch, + epoch, } } /// Reset the block's total execution to the given cost, if there is a cost tracker at all. /// Used by the miner to "undo" applying a transaction that exceeded the budget. - pub fn reset_block_cost(&mut self, cost: ExecutionCost) -> () { + pub fn reset_block_cost(&mut self, cost: ExecutionCost) { if let Some(ref mut cost_tracker) = self.cost_track { cost_tracker.set_total(cost); } @@ -647,7 +647,7 @@ impl ClarityInstance { } } -impl<'a, 'b> ClarityConnection for ClarityBlockConnection<'a, 'b> { +impl ClarityConnection for ClarityBlockConnection<'_, '_> { /// Do something with ownership of the underlying DB that involves only reading. fn with_clarity_db_readonly_owned(&mut self, to_do: F) -> R where @@ -711,7 +711,7 @@ impl ClarityConnection for ClarityReadOnlyConnection<'_> { } } -impl<'a> PreCommitClarityBlock<'a> { +impl PreCommitClarityBlock<'_> { pub fn commit(self) { debug!("Committing Clarity block connection"; "index_block" => %self.commit_to); self.datastore @@ -720,7 +720,7 @@ impl<'a> PreCommitClarityBlock<'a> { } } -impl<'a, 'b> ClarityBlockConnection<'a, 'b> { +impl<'a> ClarityBlockConnection<'a, '_> { /// Rolls back all changes in the current block by /// (1) dropping all writes from the current MARF tip, /// (2) rolling back side-storage @@ -837,9 +837,9 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { // instantiate costs 2 contract... let cost_2_code = if mainnet { - &*BOOT_CODE_COSTS_2 + BOOT_CODE_COSTS_2 } else { - &*BOOT_CODE_COSTS_2_TESTNET + BOOT_CODE_COSTS_2_TESTNET }; let payload = TransactionPayload::SmartContract( @@ -1028,7 +1028,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { } /////////////////// .costs-3 //////////////////////// - let cost_3_code = &*BOOT_CODE_COSTS_3; + let cost_3_code = BOOT_CODE_COSTS_3; let payload = TransactionPayload::SmartContract( TransactionSmartContract { @@ -1638,7 +1638,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { } } -impl<'a, 'b> ClarityConnection for ClarityTransactionConnection<'a, 'b> { +impl ClarityConnection for ClarityTransactionConnection<'_, '_> { /// Do something with ownership of the underlying DB that involves only reading. fn with_clarity_db_readonly_owned(&mut self, to_do: F) -> R where @@ -1677,7 +1677,7 @@ impl<'a, 'b> ClarityConnection for ClarityTransactionConnection<'a, 'b> { } } -impl<'a, 'b> Drop for ClarityTransactionConnection<'a, 'b> { +impl Drop for ClarityTransactionConnection<'_, '_> { fn drop(&mut self) { if thread::panicking() { // if the thread is panicking, we've likely lost our cost_tracker handle, @@ -1697,7 +1697,7 @@ impl<'a, 'b> Drop for ClarityTransactionConnection<'a, 'b> { } } -impl<'a, 'b> TransactionConnection for ClarityTransactionConnection<'a, 'b> { +impl TransactionConnection for ClarityTransactionConnection<'_, '_> { fn with_abort_callback( &mut self, to_do: F, @@ -1771,7 +1771,7 @@ impl<'a, 'b> TransactionConnection for ClarityTransactionConnection<'a, 'b> { } } -impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { +impl ClarityTransactionConnection<'_, '_> { /// Do something to the underlying DB that involves writing. pub fn with_clarity_db(&mut self, to_do: F) -> Result where @@ -1833,7 +1833,7 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { }, |_, _| false, ) - .and_then(|(value, ..)| Ok(value)) + .map(|(value, ..)| value) } pub fn is_mainnet(&self) -> bool { @@ -1947,7 +1947,7 @@ mod tests { clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -1955,8 +1955,8 @@ mod tests { { let mut conn = clarity_instance.begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2000,7 +2000,7 @@ mod tests { clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -2008,8 +2008,8 @@ mod tests { { let mut conn = clarity_instance.begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2061,7 +2061,7 @@ mod tests { clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -2069,8 +2069,8 @@ mod tests { { let mut conn = clarity_instance.begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2173,7 +2173,7 @@ mod tests { clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -2181,8 +2181,8 @@ mod tests { { let mut conn = clarity_instance.begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2229,7 +2229,7 @@ mod tests { } let mut marf = clarity_instance.destroy(); - let mut conn = marf.begin_read_only(Some(&StacksBlockId([1 as u8; 32]))); + let mut conn = marf.begin_read_only(Some(&StacksBlockId([1; 32]))); assert!(conn.get_contract_hash(&contract_identifier).is_ok()); } @@ -2242,7 +2242,7 @@ mod tests { { let mut conn = clarity_instance.begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2276,7 +2276,7 @@ mod tests { let mut marf = clarity_instance.destroy(); - let mut conn = marf.begin(&StacksBlockId::sentinel(), &StacksBlockId([0 as u8; 32])); + let mut conn = marf.begin(&StacksBlockId::sentinel(), &StacksBlockId([0; 32])); // should not be in the marf. assert_eq!( conn.get_contract_hash(&contract_identifier).unwrap_err(), @@ -2314,7 +2314,7 @@ mod tests { confirmed_clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -2336,7 +2336,7 @@ mod tests { // make an unconfirmed block off of the confirmed block { let mut conn = clarity_instance.begin_unconfirmed( - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2369,7 +2369,7 @@ mod tests { // contract is still there, in unconfirmed status { let mut conn = clarity_instance.begin_unconfirmed( - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2388,7 +2388,7 @@ mod tests { // rolled back (but that should only drop the current TrieRAM) { let mut conn = clarity_instance.begin_unconfirmed( - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2406,7 +2406,7 @@ mod tests { // contract is now absent, now that we did a rollback of unconfirmed state { let mut conn = clarity_instance.begin_unconfirmed( - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2421,7 +2421,7 @@ mod tests { } let mut marf = clarity_instance.destroy(); - let mut conn = marf.begin_unconfirmed(&StacksBlockId([0 as u8; 32])); + let mut conn = marf.begin_unconfirmed(&StacksBlockId([0; 32])); // should not be in the marf. assert_eq!( @@ -2452,7 +2452,7 @@ mod tests { clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -2460,8 +2460,8 @@ mod tests { { let mut conn = clarity_instance.begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2672,7 +2672,7 @@ mod tests { clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -2680,8 +2680,8 @@ mod tests { { let mut conn = clarity_instance.begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2833,7 +2833,7 @@ mod tests { clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -2841,8 +2841,8 @@ mod tests { { let mut conn = clarity_instance.begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2883,8 +2883,8 @@ mod tests { { let mut conn = clarity_instance.begin_block( - &StacksBlockId([1 as u8; 32]), - &StacksBlockId([2 as u8; 32]), + &StacksBlockId([1; 32]), + &StacksBlockId([2; 32]), &TEST_HEADER_DB, &burn_state_db, ); diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index 3a8636b3b5..56a1fde107 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -262,7 +262,7 @@ impl MarfedKV { self.marf.sqlite_conn() } - pub fn index_conn<'a, C>(&'a self, context: C) -> IndexDBConn<'a, C, StacksBlockId> { + pub fn index_conn(&self, context: C) -> IndexDBConn<'_, C, StacksBlockId> { IndexDBConn { index: &self.marf, context, @@ -280,7 +280,7 @@ pub struct ReadOnlyMarfStore<'a> { marf: &'a mut MARF, } -impl<'a> ReadOnlyMarfStore<'a> { +impl ReadOnlyMarfStore<'_> { pub fn as_clarity_db<'b>( &'b mut self, headers_db: &'b dyn HeadersDB, @@ -289,7 +289,7 @@ impl<'a> ReadOnlyMarfStore<'a> { ClarityDatabase::new(self, headers_db, burn_state_db) } - pub fn as_analysis_db<'b>(&'b mut self) -> AnalysisDatabase<'b> { + pub fn as_analysis_db(&mut self) -> AnalysisDatabase<'_> { AnalysisDatabase::new(self) } @@ -301,7 +301,7 @@ impl<'a> ReadOnlyMarfStore<'a> { } } -impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { +impl ClarityBackingStore for ReadOnlyMarfStore<'_> { fn get_side_store(&mut self) -> &Connection { self.marf.sqlite_conn() } @@ -546,7 +546,7 @@ impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { } } -impl<'a> WritableMarfStore<'a> { +impl WritableMarfStore<'_> { pub fn as_clarity_db<'b>( &'b mut self, headers_db: &'b dyn HeadersDB, @@ -555,7 +555,7 @@ impl<'a> WritableMarfStore<'a> { ClarityDatabase::new(self, headers_db, burn_state_db) } - pub fn as_analysis_db<'b>(&'b mut self) -> AnalysisDatabase<'b> { + pub fn as_analysis_db(&mut self) -> AnalysisDatabase<'_> { AnalysisDatabase::new(self) } @@ -625,7 +625,7 @@ impl<'a> WritableMarfStore<'a> { } } -impl<'a> ClarityBackingStore for WritableMarfStore<'a> { +impl ClarityBackingStore for WritableMarfStore<'_> { fn set_block_hash(&mut self, bhh: StacksBlockId) -> InterpreterResult { self.marf .check_ancestor_block_hash(&bhh) diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 0bce54dcfb..f92fbceb76 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -70,8 +70,7 @@ impl GetTenureStartId for StacksDBConn<'_> { tip, &nakamoto_keys::tenure_start_block_id(tenure_id_consensus_hash), )? - .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) - .flatten() + .and_then(|id_str| nakamoto_keys::parse_block_id(&id_str)) .map(|block_id| TenureBlockId::from(block_id))) } @@ -85,8 +84,7 @@ impl GetTenureStartId for StacksDBConn<'_> { tip, &nakamoto_keys::ongoing_tenure_coinbase_height(coinbase_height), )? - .map(|hex_inp| nakamoto_keys::parse_block_id(&hex_inp)) - .flatten(); + .and_then(|hex_inp| nakamoto_keys::parse_block_id(&hex_inp)); Ok(opt_out) } @@ -106,8 +104,7 @@ impl GetTenureStartId for StacksDBTx<'_> { tip, &nakamoto_keys::tenure_start_block_id(tenure_id_consensus_hash), )? - .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) - .flatten() + .and_then(|id_str| nakamoto_keys::parse_block_id(&id_str)) .map(|block_id| TenureBlockId::from(block_id))) } @@ -121,8 +118,7 @@ impl GetTenureStartId for StacksDBTx<'_> { tip, &nakamoto_keys::ongoing_tenure_coinbase_height(coinbase_height), )? - .map(|hex_inp| nakamoto_keys::parse_block_id(&hex_inp)) - .flatten(); + .and_then(|hex_inp| nakamoto_keys::parse_block_id(&hex_inp)); Ok(opt_out) } @@ -157,7 +153,7 @@ impl GetTenureStartId for MARF { pub struct HeadersDBConn<'a>(pub StacksDBConn<'a>); -impl<'a> HeadersDB for HeadersDBConn<'a> { +impl HeadersDB for HeadersDBConn<'_> { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, @@ -328,7 +324,7 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { } } -impl<'a> HeadersDB for ChainstateTx<'a> { +impl HeadersDB for ChainstateTx<'_> { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, @@ -1205,7 +1201,7 @@ impl MemoryBackingStore { memory_marf } - pub fn as_clarity_db<'a>(&'a mut self) -> ClarityDatabase<'a> { + pub fn as_clarity_db(&mut self) -> ClarityDatabase<'_> { ClarityDatabase::new(self, &NULL_HEADER_DB, &NULL_BURN_STATE_DB) } @@ -1219,7 +1215,7 @@ impl MemoryBackingStore { ClarityDatabase::new(self, headers_db, burn_state_db) } - pub fn as_analysis_db<'a>(&'a mut self) -> AnalysisDatabase<'a> { + pub fn as_analysis_db(&mut self) -> AnalysisDatabase<'_> { AnalysisDatabase::new(self) } } diff --git a/stackslib/src/clarity_vm/tests/analysis_costs.rs b/stackslib/src/clarity_vm/tests/analysis_costs.rs index 4fe887f2c3..dc5b33fd31 100644 --- a/stackslib/src/clarity_vm/tests/analysis_costs.rs +++ b/stackslib/src/clarity_vm/tests/analysis_costs.rs @@ -81,7 +81,7 @@ fn setup_tracked_cost_test( clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &burn_state_db, ) @@ -89,8 +89,8 @@ fn setup_tracked_cost_test( { let mut conn = clarity_instance.begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &burn_state_db, ); @@ -107,8 +107,8 @@ fn setup_tracked_cost_test( { let mut conn = clarity_instance.begin_block( - &StacksBlockId([1 as u8; 32]), - &StacksBlockId([2 as u8; 32]), + &StacksBlockId([1; 32]), + &StacksBlockId([2; 32]), &TEST_HEADER_DB, &burn_state_db, ); @@ -145,8 +145,8 @@ fn setup_tracked_cost_test( { let mut conn = clarity_instance.begin_block( - &StacksBlockId([2 as u8; 32]), - &StacksBlockId([3 as u8; 32]), + &StacksBlockId([2; 32]), + &StacksBlockId([3; 32]), &TEST_HEADER_DB, &burn_state_db, ); @@ -221,7 +221,7 @@ fn test_tracked_costs( { let mut conn = clarity_instance.begin_block( - &StacksBlockId([3 as u8; 32]), + &StacksBlockId([3; 32]), &StacksBlockId([4 + prog_id as u8; 32]), &TEST_HEADER_DB, &burn_state_db, diff --git a/stackslib/src/clarity_vm/tests/ast.rs b/stackslib/src/clarity_vm/tests/ast.rs index 2074fa7636..edeaf9d553 100644 --- a/stackslib/src/clarity_vm/tests/ast.rs +++ b/stackslib/src/clarity_vm/tests/ast.rs @@ -35,7 +35,7 @@ fn dependency_edge_counting_runtime( clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -43,8 +43,8 @@ fn dependency_edge_counting_runtime( let mut cost_track = clarity_instance .begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) diff --git a/stackslib/src/clarity_vm/tests/costs.rs b/stackslib/src/clarity_vm/tests/costs.rs index 29c57b2e92..030b62af93 100644 --- a/stackslib/src/clarity_vm/tests/costs.rs +++ b/stackslib/src/clarity_vm/tests/costs.rs @@ -204,7 +204,7 @@ where let mut tip = first_block.clone(); if epoch >= StacksEpochId::Epoch2_05 { - let next_block = StacksBlockId([1 as u8; 32]); + let next_block = StacksBlockId([1; 32]); let mut clarity_conn = clarity_instance.begin_block(&tip, &next_block, &TEST_HEADER_DB, &TEST_BURN_STATE_DB); clarity_conn.initialize_epoch_2_05().unwrap(); @@ -213,7 +213,7 @@ where } if epoch >= StacksEpochId::Epoch21 { - let next_block = StacksBlockId([2 as u8; 32]); + let next_block = StacksBlockId([2; 32]); let mut clarity_conn = clarity_instance.begin_block(&tip, &next_block, &TEST_HEADER_DB, &TEST_BURN_STATE_DB); clarity_conn.initialize_epoch_2_1().unwrap(); @@ -223,7 +223,7 @@ where let mut marf_kv = clarity_instance.destroy(); - let mut store = marf_kv.begin(&tip, &StacksBlockId([3 as u8; 32])); + let mut store = marf_kv.begin(&tip, &StacksBlockId([3; 32])); to_do(OwnedEnvironment::new_max_limit( store.as_clarity_db(&TEST_HEADER_DB, &TEST_BURN_STATE_DB), @@ -1052,7 +1052,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity let mut clarity_inst = ClarityInstance::new(use_mainnet, chain_id, marf_kv); let mut block_conn = clarity_inst.begin_block( &StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, burn_db, ); @@ -1111,7 +1111,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity }; let without_interposing_5 = { - let mut store = marf_kv.begin(&StacksBlockId([1 as u8; 32]), &StacksBlockId([2 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([1; 32]), &StacksBlockId([2; 32])); let mut owned_env = OwnedEnvironment::new_max_limit( store.as_clarity_db(&TEST_HEADER_DB, burn_db), StacksEpochId::Epoch20, @@ -1134,7 +1134,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity }; let without_interposing_10 = { - let mut store = marf_kv.begin(&StacksBlockId([2 as u8; 32]), &StacksBlockId([3 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([2; 32]), &StacksBlockId([3; 32])); let mut owned_env = OwnedEnvironment::new_max_limit( store.as_clarity_db(&TEST_HEADER_DB, burn_db), StacksEpochId::Epoch20, @@ -1163,7 +1163,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity }; { - let mut store = marf_kv.begin(&StacksBlockId([3 as u8; 32]), &StacksBlockId([4 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([3; 32]), &StacksBlockId([4; 32])); let mut db = store.as_clarity_db(&TEST_HEADER_DB, burn_db); db.begin(); db.set_variable_unknown_descriptor( @@ -1194,7 +1194,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity } let with_interposing_5 = { - let mut store = marf_kv.begin(&StacksBlockId([4 as u8; 32]), &StacksBlockId([5 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([4; 32]), &StacksBlockId([5; 32])); let mut owned_env = OwnedEnvironment::new_max_limit( store.as_clarity_db(&TEST_HEADER_DB, burn_db), @@ -1218,7 +1218,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity }; let with_interposing_10 = { - let mut store = marf_kv.begin(&StacksBlockId([5 as u8; 32]), &StacksBlockId([6 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([5; 32]), &StacksBlockId([6; 32])); let mut owned_env = OwnedEnvironment::new_max_limit( store.as_clarity_db(&TEST_HEADER_DB, burn_db), StacksEpochId::Epoch20, @@ -1304,7 +1304,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi let mut clarity_inst = ClarityInstance::new(use_mainnet, chain_id, marf_kv); let mut block_conn = clarity_inst.begin_block( &StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, burn_db, ); @@ -1479,7 +1479,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi }; { - let mut store = marf_kv.begin(&StacksBlockId([1 as u8; 32]), &StacksBlockId([2 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([1; 32]), &StacksBlockId([2; 32])); let mut db = store.as_clarity_db(&TEST_HEADER_DB, burn_db); db.begin(); @@ -1517,7 +1517,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi } let le_cost_without_interception = { - let mut store = marf_kv.begin(&StacksBlockId([2 as u8; 32]), &StacksBlockId([3 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([2; 32]), &StacksBlockId([3; 32])); let mut owned_env = OwnedEnvironment::new_max_limit( store.as_clarity_db(&TEST_HEADER_DB, burn_db), StacksEpochId::Epoch20, @@ -1578,7 +1578,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi ]; { - let mut store = marf_kv.begin(&StacksBlockId([3 as u8; 32]), &StacksBlockId([4 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([3; 32]), &StacksBlockId([4; 32])); let mut db = store.as_clarity_db(&TEST_HEADER_DB, burn_db); db.begin(); @@ -1618,7 +1618,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi } { - let mut store = marf_kv.begin(&StacksBlockId([4 as u8; 32]), &StacksBlockId([5 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([4; 32]), &StacksBlockId([5; 32])); let mut owned_env = OwnedEnvironment::new_max_limit( store.as_clarity_db(&TEST_HEADER_DB, burn_db), StacksEpochId::Epoch20, diff --git a/stackslib/src/clarity_vm/tests/events.rs b/stackslib/src/clarity_vm/tests/events.rs index 7037e8dcf3..3e09b6b924 100644 --- a/stackslib/src/clarity_vm/tests/events.rs +++ b/stackslib/src/clarity_vm/tests/events.rs @@ -88,7 +88,7 @@ fn helper_execute_epoch( &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, ), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([1; 32]), ); let mut owned_env = OwnedEnvironment::new_max_limit( diff --git a/stackslib/src/clarity_vm/tests/forking.rs b/stackslib/src/clarity_vm/tests/forking.rs index c74cb0c8b0..22a3f07321 100644 --- a/stackslib/src/clarity_vm/tests/forking.rs +++ b/stackslib/src/clarity_vm/tests/forking.rs @@ -195,7 +195,7 @@ fn test_at_block_good(#[case] version: ClarityVersion, #[case] epoch: StacksEpoc Error::Runtime(x, _) => assert_eq!( x, RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash::from( - vec![2 as u8; 32].as_slice() + vec![2; 32].as_slice() )) ), _ => panic!("Unexpected error"), @@ -287,7 +287,7 @@ fn with_separate_forks_environment( let mut marf_kv = MarfedKV::temporary(); { - let mut store = marf_kv.begin(&StacksBlockId::sentinel(), &StacksBlockId([0 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId::sentinel(), &StacksBlockId([0; 32])); store .as_clarity_db(&TEST_HEADER_DB, &TEST_BURN_STATE_DB) .initialize(); @@ -295,7 +295,7 @@ fn with_separate_forks_environment( } { - let mut store = marf_kv.begin(&StacksBlockId([0 as u8; 32]), &StacksBlockId([1 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([0; 32]), &StacksBlockId([1; 32])); let mut owned_env = OwnedEnvironment::new( store.as_clarity_db(&TEST_HEADER_DB, &TEST_BURN_STATE_DB), epoch, @@ -307,7 +307,7 @@ fn with_separate_forks_environment( // Now, we can do our forking. { - let mut store = marf_kv.begin(&StacksBlockId([1 as u8; 32]), &StacksBlockId([2 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([1; 32]), &StacksBlockId([2; 32])); let mut owned_env = OwnedEnvironment::new( store.as_clarity_db(&TEST_HEADER_DB, &TEST_BURN_STATE_DB), epoch, @@ -317,7 +317,7 @@ fn with_separate_forks_environment( } { - let mut store = marf_kv.begin(&StacksBlockId([1 as u8; 32]), &StacksBlockId([3 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([1; 32]), &StacksBlockId([3; 32])); let mut owned_env = OwnedEnvironment::new( store.as_clarity_db(&TEST_HEADER_DB, &TEST_BURN_STATE_DB), epoch, @@ -327,7 +327,7 @@ fn with_separate_forks_environment( } { - let mut store = marf_kv.begin(&StacksBlockId([2 as u8; 32]), &StacksBlockId([4 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([2; 32]), &StacksBlockId([4; 32])); let mut owned_env = OwnedEnvironment::new( store.as_clarity_db(&TEST_HEADER_DB, &TEST_BURN_STATE_DB), epoch, diff --git a/stackslib/src/clarity_vm/tests/large_contract.rs b/stackslib/src/clarity_vm/tests/large_contract.rs index e7d8faff0c..6e2255446a 100644 --- a/stackslib/src/clarity_vm/tests/large_contract.rs +++ b/stackslib/src/clarity_vm/tests/large_contract.rs @@ -48,7 +48,7 @@ use crate::clarity_vm::database::MemoryBackingStore; use crate::util_lib::boot::boot_code_id; fn test_block_headers(n: u8) -> StacksBlockId { - StacksBlockId([n as u8; 32]) + StacksBlockId([n; 32]) } pub const TEST_BURN_STATE_DB_AST_PRECHECK: UnitTestBurnStateDB = UnitTestBurnStateDB { @@ -131,7 +131,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac let mut gb = clarity.begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0xfe as u8; 32]), + &StacksBlockId([0xfe; 32]), &TEST_HEADER_DB, burn_db, ); @@ -197,8 +197,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac { let mut block = new_block( &mut clarity, - &StacksBlockId([0xfe as u8; 32]), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0xfe; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, burn_db, ); @@ -436,7 +436,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac pub fn with_versioned_memory_environment(f: F, version: ClarityVersion, top_level: bool) where - F: FnOnce(&mut OwnedEnvironment, ClarityVersion) -> (), + F: FnOnce(&mut OwnedEnvironment, ClarityVersion), { let mut marf_kv = MemoryBackingStore::new(); @@ -697,7 +697,7 @@ pub fn rollback_log_memory_test( clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, burn_db, ) @@ -706,8 +706,8 @@ pub fn rollback_log_memory_test( { let mut conn = new_block( &mut clarity_instance, - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, burn_db, ); @@ -768,7 +768,7 @@ pub fn let_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, burn_db, ) @@ -777,8 +777,8 @@ pub fn let_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id { let mut conn = new_block( &mut clarity_instance, - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, burn_db, ); @@ -847,7 +847,7 @@ pub fn argument_memory_test( clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, burn_db, ) @@ -856,8 +856,8 @@ pub fn argument_memory_test( { let mut conn = new_block( &mut clarity_instance, - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, burn_db, ); @@ -924,7 +924,7 @@ pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, burn_db, ) @@ -933,8 +933,8 @@ pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ { let mut conn = new_block( &mut clarity_instance, - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, burn_db, ); @@ -1043,7 +1043,7 @@ pub fn ccall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, burn_db, ) @@ -1052,8 +1052,8 @@ pub fn ccall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ { let mut conn = new_block( &mut clarity_instance, - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, burn_db, ); diff --git a/stackslib/src/clarity_vm/tests/simple_tests.rs b/stackslib/src/clarity_vm/tests/simple_tests.rs index 0367bd8448..a73489bb95 100644 --- a/stackslib/src/clarity_vm/tests/simple_tests.rs +++ b/stackslib/src/clarity_vm/tests/simple_tests.rs @@ -11,7 +11,7 @@ use crate::clarity_vm::database::marf::MarfedKV; pub fn with_marfed_environment(f: F, top_level: bool) where - F: FnOnce(&mut OwnedEnvironment) -> (), + F: FnOnce(&mut OwnedEnvironment), { let mut marf_kv = MarfedKV::temporary(); @@ -30,7 +30,7 @@ where { let mut store = marf_kv.begin( &StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([1; 32]), ); let mut owned_env = OwnedEnvironment::new( @@ -65,7 +65,7 @@ fn test_at_unknown_block() { Error::Runtime(x, _) => assert_eq!( x, RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash::from( - vec![2 as u8; 32].as_slice() + vec![2; 32].as_slice() )) ), _ => panic!("Unexpected error"), diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index f703f8a367..66e14d4b5d 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -16,8 +16,9 @@ //! Subcommands used by `stacks-inspect` binary +use std::any::type_name; use std::cell::LazyCell; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::time::Instant; use std::{env, fs, io, process, thread}; @@ -28,96 +29,101 @@ use regex::Regex; use rusqlite::{Connection, OpenFlags}; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; use stacks_common::types::sqlite::NO_PARAMS; +use stacks_common::util::get_epoch_time_ms; +use stacks_common::util::hash::Hash160; +use stacks_common::util::vrf::VRFProof; use crate::burnchains::db::BurnchainDB; -use crate::burnchains::PoxConstants; +use crate::burnchains::{Burnchain, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ get_ancestor_sort_id, SortitionDB, SortitionHandle, SortitionHandleContext, }; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::coordinator::OnChainRewardSetProvider; +use crate::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::db::blocks::StagingBlock; use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; use crate::chainstate::stacks::miner::*; use crate::chainstate::stacks::{Error as ChainstateError, *}; use crate::clarity_vm::clarity::ClarityInstance; +use crate::config::{Config, ConfigFile, DEFAULT_MAINNET_CONFIG}; use crate::core::*; +use crate::cost_estimates::metrics::UnitMetric; +use crate::cost_estimates::UnitEstimator; use crate::util_lib::db::IndexDBTx; -/// Can be used with CLI commands to support non-mainnet chainstate -/// Allows integration testing of these functions -#[derive(Deserialize)] -pub struct StacksChainConfig { - pub chain_id: u32, - pub first_block_height: u64, - pub first_burn_header_hash: BurnchainHeaderHash, - pub first_burn_header_timestamp: u64, - pub pox_constants: PoxConstants, - pub epochs: EpochList, +/// Options common to many `stacks-inspect` subcommands +/// Returned by `process_common_opts()` +#[derive(Debug, Default)] +pub struct CommonOpts { + pub config: Option, } -impl StacksChainConfig { - pub fn default_mainnet() -> Self { - Self { - chain_id: CHAIN_ID_MAINNET, - first_block_height: BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, - first_burn_header_hash: BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH) - .unwrap(), - first_burn_header_timestamp: BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), - pox_constants: PoxConstants::mainnet_default(), - epochs: (*STACKS_EPOCHS_MAINNET).clone(), +/// Process arguments common to many `stacks-inspect` subcommands and drain them from `argv` +/// +/// Args: +/// - `argv`: Full CLI args `Vec` +/// - `start_at`: Position in args vec where to look for common options. +/// For example, if `start_at` is `1`, then look for these options **before** the subcommand: +/// ```console +/// stacks-inspect --config testnet.toml replay-block path/to/chainstate +/// ``` +pub fn drain_common_opts(argv: &mut Vec, start_at: usize) -> CommonOpts { + let mut i = start_at; + let mut opts = CommonOpts::default(); + while let Some(arg) = argv.get(i) { + let (prefix, opt) = arg.split_at(2); + if prefix != "--" { + // No args left to take + break; } - } - - pub fn default_testnet() -> Self { - let mut pox_constants = PoxConstants::regtest_default(); - pox_constants.prepare_length = 100; - pox_constants.reward_cycle_length = 900; - pox_constants.v1_unlock_height = 3; - pox_constants.v2_unlock_height = 5; - pox_constants.pox_3_activation_height = 5; - pox_constants.pox_4_activation_height = 6; - pox_constants.v3_unlock_height = 7; - let mut epochs = EpochList::new(&*STACKS_EPOCHS_REGTEST); - epochs[StacksEpochId::Epoch10].start_height = 0; - epochs[StacksEpochId::Epoch10].end_height = 0; - epochs[StacksEpochId::Epoch20].start_height = 0; - epochs[StacksEpochId::Epoch20].end_height = 1; - epochs[StacksEpochId::Epoch2_05].start_height = 1; - epochs[StacksEpochId::Epoch2_05].end_height = 2; - epochs[StacksEpochId::Epoch21].start_height = 2; - epochs[StacksEpochId::Epoch21].end_height = 3; - epochs[StacksEpochId::Epoch22].start_height = 3; - epochs[StacksEpochId::Epoch22].end_height = 4; - epochs[StacksEpochId::Epoch23].start_height = 4; - epochs[StacksEpochId::Epoch23].end_height = 5; - epochs[StacksEpochId::Epoch24].start_height = 5; - epochs[StacksEpochId::Epoch24].end_height = 6; - epochs[StacksEpochId::Epoch25].start_height = 6; - epochs[StacksEpochId::Epoch25].end_height = 56_457; - epochs[StacksEpochId::Epoch30].start_height = 56_457; - Self { - chain_id: CHAIN_ID_TESTNET, - first_block_height: 0, - first_burn_header_hash: BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH) - .unwrap(), - first_burn_header_timestamp: BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP.into(), - pox_constants, - epochs, + // "Take" arg + i += 1; + match opt { + "config" => { + let path = &argv[i]; + i += 1; + let config_file = ConfigFile::from_path(&path).unwrap_or_else(|e| { + panic!("Failed to read '{path}' as stacks-node config: {e}") + }); + let config = Config::from_config_file(config_file, false).unwrap_or_else(|e| { + panic!("Failed to convert config file into node config: {e}") + }); + opts.config.replace(config); + } + "network" => { + let network = &argv[i]; + i += 1; + let config_file = match network.to_lowercase().as_str() { + "helium" => ConfigFile::helium(), + "mainnet" => ConfigFile::mainnet(), + "mocknet" => ConfigFile::mocknet(), + "xenon" => ConfigFile::xenon(), + other => { + eprintln!("Unknown network choice `{other}`"); + process::exit(1); + } + }; + let config = Config::from_config_file(config_file, false).unwrap_or_else(|e| { + panic!("Failed to convert config file into node config: {e}") + }); + opts.config.replace(config); + } + _ => panic!("Unrecognized option: {opt}"), } } + // Remove options processed + argv.drain(start_at..i); + opts } -const STACKS_CHAIN_CONFIG_DEFAULT_MAINNET: LazyCell = - LazyCell::new(StacksChainConfig::default_mainnet); - /// Replay blocks from chainstate database /// Terminates on error using `process::exit()` /// /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` -pub fn command_replay_block(argv: &[String], conf: Option<&StacksChainConfig>) { +pub fn command_replay_block(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -195,7 +201,7 @@ pub fn command_replay_block(argv: &[String], conf: Option<&StacksChainConfig>) { /// /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` -pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&StacksChainConfig>) { +pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -212,12 +218,15 @@ pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&StacksChainC let chain_state_path = format!("{db_path}/chainstate/"); - let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; - let conf = conf.unwrap_or(&default_conf); + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); - let mainnet = conf.chain_id == CHAIN_ID_MAINNET; - let (chainstate, _) = - StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + let (chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap(); let conn = chainstate.nakamoto_blocks_db(); @@ -281,7 +290,7 @@ pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&StacksChainC /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` /// - `conf`: Optional config for running on non-mainnet chainstate -pub fn command_replay_mock_mining(argv: &[String], conf: Option<&StacksChainConfig>) { +pub fn command_replay_mock_mining(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -369,32 +378,191 @@ pub fn command_replay_mock_mining(argv: &[String], conf: Option<&StacksChainConf } } +/// Replay mock mined blocks from JSON files +/// Terminates on error using `process::exit()` +/// +/// Arguments: +/// - `argv`: Args in CLI format: ` [args...]` +/// - `conf`: Optional config for running on non-mainnet chainstate +pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { + let print_help_and_exit = || { + let n = &argv[0]; + eprintln!("Usage: {n} [min-fee [max-time]]"); + eprintln!(""); + eprintln!("Given a , try to ''mine'' an anchored block. This invokes the miner block"); + eprintln!("assembly, but does not attempt to broadcast a block commit. This is useful for determining"); + eprintln!("what transactions a given chain state would include in an anchor block,"); + eprintln!("or otherwise simulating a miner."); + process::exit(1); + }; + + // Parse subcommand-specific args + let db_path = argv.get(1).unwrap_or_else(print_help_and_exit); + let min_fee = argv + .get(2) + .map(|arg| arg.parse().expect("Could not parse min_fee")) + .unwrap_or(u64::MAX); + let max_time = argv + .get(3) + .map(|arg| arg.parse().expect("Could not parse max_time")) + .unwrap_or(u64::MAX); + + let start = Instant::now(); + + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); + + let burnchain_path = format!("{db_path}/burnchain"); + let sort_db_path = format!("{db_path}/burnchain/sortition"); + let chain_state_path = format!("{db_path}/chainstate/"); + + let burnchain = conf.get_burnchain(); + let sort_db = SortitionDB::open(&sort_db_path, false, burnchain.pox_constants.clone()) + .unwrap_or_else(|e| panic!("Failed to open {sort_db_path}: {e}")); + let (chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap_or_else(|e| panic!("Failed to open stacks chain state: {e}")); + let chain_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) + .unwrap_or_else(|e| panic!("Failed to get sortition chain tip: {e}")); + + let estimator = Box::new(UnitEstimator); + let metric = Box::new(UnitMetric); + + let mut mempool_db = MemPoolDB::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + estimator, + metric, + ) + .unwrap_or_else(|e| panic!("Failed to open mempool db: {e}")); + + // Parent Stacks header for block we are going to mine + let parent_stacks_header = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sort_db) + .unwrap_or_else(|e| panic!("Error looking up chain tip: {e}")) + .expect("No chain tip found"); + + let burn_dbconn = sort_db.index_handle(&chain_tip.sortition_id); + + let mut settings = BlockBuilderSettings::limited(); + settings.max_miner_time_ms = max_time; + + let result = match &parent_stacks_header.anchored_header { + StacksBlockHeaderTypes::Epoch2(..) => { + let sk = StacksPrivateKey::new(); + let mut tx_auth = TransactionAuth::from_p2pkh(&sk).unwrap(); + tx_auth.set_origin_nonce(0); + + let mut coinbase_tx = StacksTransaction::new( + TransactionVersion::Mainnet, + tx_auth, + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), + ); + + coinbase_tx.chain_id = conf.burnchain.chain_id; + coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + let mut tx_signer = StacksTransactionSigner::new(&coinbase_tx); + tx_signer.sign_origin(&sk).unwrap(); + let coinbase_tx = tx_signer.get_tx().unwrap(); + + StacksBlockBuilder::build_anchored_block( + &chainstate, + &burn_dbconn, + &mut mempool_db, + &parent_stacks_header, + chain_tip.total_burn, + VRFProof::empty(), + Hash160([0; 20]), + &coinbase_tx, + settings, + None, + &Burnchain::new( + &burnchain_path, + &burnchain.chain_name, + &burnchain.network_name, + ) + .unwrap_or_else(|e| panic!("Failed to instantiate burnchain: {e}")), + ) + .map(|(block, cost, size)| (block.block_hash(), block.txs, cost, size)) + } + StacksBlockHeaderTypes::Nakamoto(..) => { + NakamotoBlockBuilder::build_nakamoto_block( + &chainstate, + &burn_dbconn, + &mut mempool_db, + &parent_stacks_header, + // tenure ID consensus hash of this block + &parent_stacks_header.consensus_hash, + // the burn so far on the burnchain (i.e. from the last burnchain block) + chain_tip.total_burn, + NakamotoTenureInfo::default(), + settings, + None, + 0, + ) + .map(|(block, cost, size, _)| (block.header.block_hash(), block.txs, cost, size)) + } + }; + + let elapsed = start.elapsed(); + let summary = format!( + "block @ height = {h} off of {pid} ({pch}/{pbh}) in {t}ms. Min-fee: {min_fee}, Max-time: {max_time}", + h=parent_stacks_header.stacks_block_height + 1, + pid=&parent_stacks_header.index_block_hash(), + pch=&parent_stacks_header.consensus_hash, + pbh=&parent_stacks_header.anchored_header.block_hash(), + t=elapsed.as_millis(), + ); + + let code = match result { + Ok((block_hash, txs, cost, size)) => { + let total_fees: u64 = txs.iter().map(|tx| tx.get_tx_fee()).sum(); + + println!("Successfully mined {summary}"); + println!("Block {block_hash}: {total_fees} uSTX, {size} bytes, cost {cost:?}"); + 0 + } + Err(e) => { + println!("Failed to mine {summary}"); + println!("Error: {e}"); + 1 + } + }; + + process::exit(code); +} + /// Fetch and process a `StagingBlock` from database and call `replay_block()` to validate -fn replay_staging_block( - db_path: &str, - index_block_hash_hex: &str, - conf: Option<&StacksChainConfig>, -) { +fn replay_staging_block(db_path: &str, index_block_hash_hex: &str, conf: Option<&Config>) { let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); let burn_db_path = format!("{db_path}/burnchain/burnchain.sqlite"); let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); - let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; - let conf = conf.unwrap_or(&default_conf); + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); - let mainnet = conf.chain_id == CHAIN_ID_MAINNET; - let (mut chainstate, _) = - StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap(); + let burnchain = conf.get_burnchain(); + let epochs = conf.burnchain.get_epoch_list(); let mut sortdb = SortitionDB::connect( &sort_db_path, - conf.first_block_height, - &conf.first_burn_header_hash, - conf.first_burn_header_timestamp, - &conf.epochs, - conf.pox_constants.clone(), + burnchain.first_block_height, + &burnchain.first_block_hash, + u64::from(burnchain.first_block_timestamp), + &epochs, + burnchain.pox_constants.clone(), None, true, ) @@ -448,30 +616,31 @@ fn replay_staging_block( } /// Process a mock mined block and call `replay_block()` to validate -fn replay_mock_mined_block( - db_path: &str, - block: AssembledAnchorBlock, - conf: Option<&StacksChainConfig>, -) { +fn replay_mock_mined_block(db_path: &str, block: AssembledAnchorBlock, conf: Option<&Config>) { let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); let burn_db_path = format!("{db_path}/burnchain/burnchain.sqlite"); let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); - let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; - let conf = conf.unwrap_or(&default_conf); + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); - let mainnet = conf.chain_id == CHAIN_ID_MAINNET; - let (mut chainstate, _) = - StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap(); + let burnchain = conf.get_burnchain(); + let epochs = conf.burnchain.get_epoch_list(); let mut sortdb = SortitionDB::connect( &sort_db_path, - conf.first_block_height, - &conf.first_burn_header_hash, - conf.first_burn_header_timestamp, - &conf.epochs, - conf.pox_constants.clone(), + burnchain.first_block_height, + &burnchain.first_block_hash, + u64::from(burnchain.first_block_timestamp), + &epochs, + burnchain.pox_constants.clone(), None, true, ) @@ -650,22 +819,28 @@ fn replay_block( } /// Fetch and process a NakamotoBlock from database and call `replay_block_nakamoto()` to validate -fn replay_naka_staging_block(db_path: &str, index_block_hash_hex: &str, conf: &StacksChainConfig) { +fn replay_naka_staging_block(db_path: &str, index_block_hash_hex: &str, conf: &Config) { let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); - let mainnet = conf.chain_id == CHAIN_ID_MAINNET; - let (mut chainstate, _) = - StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap(); + let burnchain = conf.get_burnchain(); + let epochs = conf.burnchain.get_epoch_list(); let mut sortdb = SortitionDB::connect( &sort_db_path, - conf.first_block_height, - &conf.first_burn_header_hash, - conf.first_burn_header_timestamp, - &conf.epochs, - conf.pox_constants.clone(), + burnchain.first_block_height, + &burnchain.first_block_hash, + u64::from(burnchain.first_block_timestamp), + &epochs, + burnchain.pox_constants.clone(), None, true, ) @@ -696,7 +871,7 @@ fn replay_block_nakamoto( ) }); - debug!("Process staging Nakamoto block"; + info!("Process staging Nakamoto block"; "consensus_hash" => %block.header.consensus_hash, "stacks_block_hash" => %block.header.block_hash(), "stacks_block_id" => %block.header.block_id(), @@ -940,3 +1115,36 @@ fn replay_block_nakamoto( Ok(()) } + +#[cfg(test)] +pub mod test { + use super::*; + + fn parse_cli_command(s: &str) -> Vec { + s.split(' ').map(String::from).collect() + } + + #[test] + pub fn test_drain_common_opts() { + // Should find/remove no options + let mut argv = parse_cli_command( + "stacks-inspect try-mine --config my_config.toml /tmp/chainstate/mainnet", + ); + let argv_init = argv.clone(); + let opts = drain_common_opts(&mut argv, 0); + let opts = drain_common_opts(&mut argv, 1); + + assert_eq!(argv, argv_init); + assert!(opts.config.is_none()); + + // Should find config opts and remove from vec + let mut argv = parse_cli_command( + "stacks-inspect --network mocknet --network mainnet try-mine /tmp/chainstate/mainnet", + ); + let opts = drain_common_opts(&mut argv, 1); + let argv_expected = parse_cli_command("stacks-inspect try-mine /tmp/chainstate/mainnet"); + + assert_eq!(argv, argv_expected); + assert!(opts.config.is_some()); + } +} diff --git a/testnet/stacks-node/src/chain_data.rs b/stackslib/src/config/chain_data.rs similarity index 97% rename from testnet/stacks-node/src/chain_data.rs rename to stackslib/src/config/chain_data.rs index cc60f964a3..e4c3899511 100644 --- a/testnet/stacks-node/src/chain_data.rs +++ b/stackslib/src/config/chain_data.rs @@ -17,21 +17,22 @@ use std::collections::HashMap; use std::process::{Command, Stdio}; -use stacks::burnchains::bitcoin::address::BitcoinAddress; -use stacks::burnchains::bitcoin::{BitcoinNetworkType, BitcoinTxOutput}; -use stacks::burnchains::{Burnchain, BurnchainSigner, Error as BurnchainError, Txid}; -use stacks::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; -use stacks::chainstate::burn::distribution::BurnSamplePoint; -use stacks::chainstate::burn::operations::leader_block_commit::{ - MissedBlockCommit, BURN_BLOCK_MINED_AT_MODULUS, -}; -use stacks::chainstate::burn::operations::LeaderBlockCommitOp; -use stacks::chainstate::stacks::address::PoxAddress; -use stacks::core::MINING_COMMITMENT_WINDOW; -use stacks::util_lib::db::Error as DBError; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, VRFSeed}; use stacks_common::util::hash::hex_bytes; +use crate::burnchains::bitcoin::address::BitcoinAddress; +use crate::burnchains::bitcoin::{BitcoinNetworkType, BitcoinTxOutput}; +use crate::burnchains::{Burnchain, BurnchainSigner, Error as BurnchainError, Txid}; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; +use crate::chainstate::burn::distribution::BurnSamplePoint; +use crate::chainstate::burn::operations::leader_block_commit::{ + MissedBlockCommit, BURN_BLOCK_MINED_AT_MODULUS, +}; +use crate::chainstate::burn::operations::LeaderBlockCommitOp; +use crate::chainstate::stacks::address::PoxAddress; +use crate::core::MINING_COMMITMENT_WINDOW; +use crate::util_lib::db::Error as DBError; + pub struct MinerStats { pub unconfirmed_commits_helper: String, } @@ -526,11 +527,6 @@ pub mod tests { use std::fs; use std::io::Write; - use stacks::burnchains::{BurnchainSigner, Txid}; - use stacks::chainstate::burn::distribution::BurnSamplePoint; - use stacks::chainstate::burn::operations::leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS; - use stacks::chainstate::burn::operations::LeaderBlockCommitOp; - use stacks::chainstate::stacks::address::{PoxAddress, PoxAddressType20}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksPublicKey, VRFSeed, }; @@ -538,6 +534,11 @@ pub mod tests { use stacks_common::util::uint::{BitArray, Uint256}; use super::MinerStats; + use crate::burnchains::{BurnchainSigner, Txid}; + use crate::chainstate::burn::distribution::BurnSamplePoint; + use crate::chainstate::burn::operations::leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS; + use crate::chainstate::burn::operations::LeaderBlockCommitOp; + use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20}; #[test] fn test_burn_dist_to_prob_dist() { diff --git a/testnet/stacks-node/src/config.rs b/stackslib/src/config/mod.rs similarity index 95% rename from testnet/stacks-node/src/config.rs rename to stackslib/src/config/mod.rs index 4ad793a4c3..80874d1c48 100644 --- a/testnet/stacks-node/src/config.rs +++ b/stackslib/src/config/mod.rs @@ -14,48 +14,20 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +pub mod chain_data; + use std::collections::{HashMap, HashSet}; use std::net::{Ipv4Addr, SocketAddr, ToSocketAddrs}; use std::path::PathBuf; use std::str::FromStr; -use std::sync::{Arc, Mutex}; +use std::sync::{Arc, LazyLock, Mutex}; use std::time::Duration; use std::{cmp, fs, thread}; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{AssetIdentifier, PrincipalData, QualifiedContractIdentifier}; -use lazy_static::lazy_static; use rand::RngCore; use serde::Deserialize; -use stacks::burnchains::affirmation::AffirmationMap; -use stacks::burnchains::bitcoin::BitcoinNetworkType; -use stacks::burnchains::{Burnchain, MagicBytes, PoxConstants, BLOCKSTACK_MAGIC_MAINNET}; -use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; -use stacks::chainstate::stacks::boot::MINERS_NAME; -use stacks::chainstate::stacks::index::marf::MARFOpenOpts; -use stacks::chainstate::stacks::index::storage::TrieHashCalculationMode; -use stacks::chainstate::stacks::miner::{BlockBuilderSettings, MinerStatus}; -use stacks::chainstate::stacks::MAX_BLOCK_LEN; -use stacks::core::mempool::{MemPoolWalkSettings, MemPoolWalkTxTypes}; -use stacks::core::{ - MemPoolDB, StacksEpoch, StacksEpochExtension, StacksEpochId, - BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT, BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT, - BITCOIN_TESTNET_STACKS_25_REORGED_HEIGHT, CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, - PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, -}; -use stacks::cost_estimates::fee_medians::WeightedMedianFeeRateEstimator; -use stacks::cost_estimates::fee_rate_fuzzer::FeeRateFuzzer; -use stacks::cost_estimates::fee_scalar::ScalarFeeRateEstimator; -use stacks::cost_estimates::metrics::{CostMetric, ProportionalDotProduct, UnitMetric}; -use stacks::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator, UnitEstimator}; -use stacks::net::atlas::AtlasConfig; -use stacks::net::connection::ConnectionOptions; -use stacks::net::{Neighbor, NeighborAddress, NeighborKey}; -use stacks::types::chainstate::BurnchainHeaderHash; -use stacks::types::EpochList; -use stacks::util::hash::to_hex; -use stacks::util_lib::boot::boot_code_id; -use stacks::util_lib::db::Error as DBError; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::chainstate::StacksAddress; use stacks_common::types::net::PeerAddress; @@ -64,7 +36,36 @@ use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::hex_bytes; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use crate::chain_data::MinerStats; +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::bitcoin::BitcoinNetworkType; +use crate::burnchains::{Burnchain, MagicBytes, PoxConstants, BLOCKSTACK_MAGIC_MAINNET}; +use crate::chainstate::nakamoto::signer_set::NakamotoSigners; +use crate::chainstate::stacks::boot::MINERS_NAME; +use crate::chainstate::stacks::index::marf::MARFOpenOpts; +use crate::chainstate::stacks::index::storage::TrieHashCalculationMode; +use crate::chainstate::stacks::miner::{BlockBuilderSettings, MinerStatus}; +use crate::chainstate::stacks::MAX_BLOCK_LEN; +use crate::config::chain_data::MinerStats; +use crate::core::mempool::{MemPoolWalkSettings, MemPoolWalkTxTypes}; +use crate::core::{ + MemPoolDB, StacksEpoch, StacksEpochExtension, StacksEpochId, + BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT, BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT, + BITCOIN_TESTNET_STACKS_25_REORGED_HEIGHT, CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, + PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, STACKS_EPOCHS_REGTEST, STACKS_EPOCHS_TESTNET, +}; +use crate::cost_estimates::fee_medians::WeightedMedianFeeRateEstimator; +use crate::cost_estimates::fee_rate_fuzzer::FeeRateFuzzer; +use crate::cost_estimates::fee_scalar::ScalarFeeRateEstimator; +use crate::cost_estimates::metrics::{CostMetric, ProportionalDotProduct, UnitMetric}; +use crate::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator, UnitEstimator}; +use crate::net::atlas::AtlasConfig; +use crate::net::connection::{ConnectionOptions, DEFAULT_BLOCK_PROPOSAL_MAX_AGE_SECS}; +use crate::net::{Neighbor, NeighborAddress, NeighborKey}; +use crate::types::chainstate::BurnchainHeaderHash; +use crate::types::EpochList; +use crate::util::hash::to_hex; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; pub const DEFAULT_SATS_PER_VB: u64 = 50; pub const OP_TX_BLOCK_COMMIT_ESTIM_SIZE: u64 = 380; @@ -93,6 +94,45 @@ const DEFAULT_FIRST_REJECTION_PAUSE_MS: u64 = 5_000; const DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS: u64 = 10_000; const DEFAULT_BLOCK_COMMIT_DELAY_MS: u64 = 20_000; const DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE: u8 = 25; +// This should be greater than the signers' timeout. This is used for issuing fallback tenure extends +const DEFAULT_TENURE_TIMEOUT_SECS: u64 = 420; + +static HELIUM_DEFAULT_CONNECTION_OPTIONS: LazyLock = + LazyLock::new(|| ConnectionOptions { + inbox_maxlen: 100, + outbox_maxlen: 100, + timeout: 15, + idle_timeout: 15, // how long a HTTP connection can be idle before it's closed + heartbeat: 3600, + // can't use u64::max, because sqlite stores as i64. + private_key_lifetime: 9223372036854775807, + num_neighbors: 32, // number of neighbors whose inventories we track + num_clients: 750, // number of inbound p2p connections + soft_num_neighbors: 16, // soft-limit on the number of neighbors whose inventories we track + soft_num_clients: 750, // soft limit on the number of inbound p2p connections + max_neighbors_per_host: 1, // maximum number of neighbors per host we permit + max_clients_per_host: 4, // maximum number of inbound p2p connections per host we permit + soft_max_neighbors_per_host: 1, // soft limit on the number of neighbors per host we permit + soft_max_neighbors_per_org: 32, // soft limit on the number of neighbors per AS we permit (TODO: for now it must be greater than num_neighbors) + soft_max_clients_per_host: 4, // soft limit on how many inbound p2p connections per host we permit + max_http_clients: 1000, // maximum number of HTTP connections + max_neighbors_of_neighbor: 10, // maximum number of neighbors we'll handshake with when doing a neighbor walk (I/O for this can be expensive, so keep small-ish) + walk_interval: 60, // how often, in seconds, we do a neighbor walk + walk_seed_probability: 0.1, // 10% of the time when not in IBD, walk to a non-seed node even if we aren't connected to a seed node + log_neighbors_freq: 60_000, // every minute, log all peer connections + inv_sync_interval: 45, // how often, in seconds, we refresh block inventories + inv_reward_cycles: 3, // how many reward cycles to look back on, for mainnet + download_interval: 10, // how often, in seconds, we do a block download scan (should be less than inv_sync_interval) + dns_timeout: 15_000, + max_inflight_blocks: 6, + max_inflight_attachments: 6, + ..std::default::Default::default() + }); + +pub static DEFAULT_MAINNET_CONFIG: LazyLock = LazyLock::new(|| { + Config::from_config_file(ConfigFile::mainnet(), false) + .expect("Failed to create default mainnet config") +}); #[derive(Clone, Deserialize, Default, Debug)] #[serde(deny_unknown_fields)] @@ -141,7 +181,7 @@ impl ConfigFile { mode: Some("xenon".to_string()), rpc_port: Some(18332), peer_port: Some(18333), - peer_host: Some("bitcoind.testnet.stacks.co".to_string()), + peer_host: Some("0.0.0.0".to_string()), magic_bytes: Some("T2".into()), ..BurnchainConfigFile::default() }; @@ -187,9 +227,9 @@ impl ConfigFile { mode: Some("mainnet".to_string()), rpc_port: Some(8332), peer_port: Some(8333), - peer_host: Some("bitcoin.blockstack.com".to_string()), - username: Some("blockstack".to_string()), - password: Some("blockstacksystem".to_string()), + peer_host: Some("0.0.0.0".to_string()), + username: Some("bitcoin".to_string()), + password: Some("bitcoin".to_string()), magic_bytes: Some("X2".to_string()), ..BurnchainConfigFile::default() }; @@ -311,39 +351,6 @@ pub struct Config { pub atlas: AtlasConfig, } -lazy_static! { - static ref HELIUM_DEFAULT_CONNECTION_OPTIONS: ConnectionOptions = ConnectionOptions { - inbox_maxlen: 100, - outbox_maxlen: 100, - timeout: 15, - idle_timeout: 15, // how long a HTTP connection can be idle before it's closed - heartbeat: 3600, - // can't use u64::max, because sqlite stores as i64. - private_key_lifetime: 9223372036854775807, - num_neighbors: 32, // number of neighbors whose inventories we track - num_clients: 750, // number of inbound p2p connections - soft_num_neighbors: 16, // soft-limit on the number of neighbors whose inventories we track - soft_num_clients: 750, // soft limit on the number of inbound p2p connections - max_neighbors_per_host: 1, // maximum number of neighbors per host we permit - max_clients_per_host: 4, // maximum number of inbound p2p connections per host we permit - soft_max_neighbors_per_host: 1, // soft limit on the number of neighbors per host we permit - soft_max_neighbors_per_org: 32, // soft limit on the number of neighbors per AS we permit (TODO: for now it must be greater than num_neighbors) - soft_max_clients_per_host: 4, // soft limit on how many inbound p2p connections per host we permit - max_http_clients: 1000, // maximum number of HTTP connections - max_neighbors_of_neighbor: 10, // maximum number of neighbors we'll handshake with when doing a neighbor walk (I/O for this can be expensive, so keep small-ish) - walk_interval: 60, // how often, in seconds, we do a neighbor walk - walk_seed_probability: 0.1, // 10% of the time when not in IBD, walk to a non-seed node even if we aren't connected to a seed node - log_neighbors_freq: 60_000, // every minute, log all peer connections - inv_sync_interval: 45, // how often, in seconds, we refresh block inventories - inv_reward_cycles: 3, // how many reward cycles to look back on, for mainnet - download_interval: 10, // how often, in seconds, we do a block download scan (should be less than inv_sync_interval) - dns_timeout: 15_000, - max_inflight_blocks: 6, - max_inflight_attachments: 6, - .. std::default::Default::default() - }; -} - impl Config { /// get the up-to-date burnchain options from the config. /// If the config file can't be loaded, then return the existing config @@ -516,10 +523,7 @@ impl Config { } fn check_nakamoto_config(&self, burnchain: &Burnchain) { - let epochs = StacksEpoch::get_epochs( - self.burnchain.get_bitcoin_network().1, - self.burnchain.epochs.as_ref(), - ); + let epochs = self.burnchain.get_epoch_list(); let Some(epoch_30) = epochs.get(StacksEpochId::Epoch30) else { // no Epoch 3.0, so just return return; @@ -636,8 +640,8 @@ impl Config { BitcoinNetworkType::Mainnet => { Err("Cannot configure epochs in mainnet mode".to_string()) } - BitcoinNetworkType::Testnet => Ok(stacks::core::STACKS_EPOCHS_TESTNET.to_vec()), - BitcoinNetworkType::Regtest => Ok(stacks::core::STACKS_EPOCHS_REGTEST.to_vec()), + BitcoinNetworkType::Testnet => Ok(STACKS_EPOCHS_TESTNET.to_vec()), + BitcoinNetworkType::Regtest => Ok(STACKS_EPOCHS_REGTEST.to_vec()), }?; let mut matched_epochs = vec![]; for configured_epoch in conf_epochs.iter() { @@ -1283,6 +1287,10 @@ impl BurnchainConfig { other => panic!("Invalid stacks-node mode: {other}"), } } + + pub fn get_epoch_list(&self) -> EpochList { + StacksEpoch::get_epochs(self.get_bitcoin_network().1, self.epochs.as_ref()) + } } #[derive(Clone, Deserialize, Default, Debug)] @@ -1430,7 +1438,7 @@ impl BurnchainConfigFile { // check magic bytes and set if not defined let mainnet_magic = ConfigFile::mainnet().burnchain.unwrap().magic_bytes; if self.magic_bytes.is_none() { - self.magic_bytes = mainnet_magic.clone(); + self.magic_bytes.clone_from(&mainnet_magic); } if self.magic_bytes != mainnet_magic { return Err(format!( @@ -1492,21 +1500,15 @@ impl BurnchainConfigFile { .unwrap_or(default_burnchain_config.commit_anchor_block_within), peer_host: match self.peer_host.as_ref() { Some(peer_host) => { - // Using std::net::LookupHost would be preferable, but it's - // unfortunately unstable at this point. - // https://doc.rust-lang.org/1.6.0/std/net/struct.LookupHost.html - let mut sock_addrs = format!("{peer_host}:1") + format!("{}:1", &peer_host) .to_socket_addrs() - .map_err(|e| format!("Invalid burnchain.peer_host: {e}"))?; - let sock_addr = match sock_addrs.next() { - Some(addr) => addr, - None => { - return Err(format!( - "No IP address could be queried for '{peer_host}'" - )); - } - }; - format!("{}", sock_addr.ip()) + .map_err(|e| format!("Invalid burnchain.peer_host: {}", &e))? + .next() + .is_none() + .then(|| { + return format!("No IP address could be queried for '{}'", &peer_host); + }); + peer_host.clone() } None => default_burnchain_config.peer_host, }, @@ -1648,6 +1650,7 @@ pub struct NodeConfig { pub use_test_genesis_chainstate: Option, pub always_use_affirmation_maps: bool, pub require_affirmed_anchor_blocks: bool, + pub assume_present_anchor_blocks: bool, /// Fault injection for failing to push blocks pub fault_injection_block_push_fail_probability: Option, // fault injection for hiding blocks. @@ -1931,6 +1934,7 @@ impl Default for NodeConfig { use_test_genesis_chainstate: None, always_use_affirmation_maps: true, require_affirmed_anchor_blocks: true, + assume_present_anchor_blocks: true, fault_injection_block_push_fail_probability: None, fault_injection_hide_blocks: false, chain_liveness_poll_time_secs: 300, @@ -2145,6 +2149,8 @@ pub struct MinerConfig { pub block_commit_delay: Duration, /// The percentage of the remaining tenure cost limit to consume each block. pub tenure_cost_limit_per_block_percentage: Option, + /// Duration to wait before attempting to issue a tenure extend + pub tenure_timeout: Duration, } impl Default for MinerConfig { @@ -2181,6 +2187,7 @@ impl Default for MinerConfig { tenure_cost_limit_per_block_percentage: Some( DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE, ), + tenure_timeout: Duration::from_secs(DEFAULT_TENURE_TIMEOUT_SECS), } } } @@ -2234,6 +2241,7 @@ pub struct ConnectionOptionsFile { pub antientropy_retry: Option, pub reject_blocks_pushed: Option, pub stackerdb_hint_replicas: Option, + pub block_proposal_max_age_secs: Option, } impl ConnectionOptionsFile { @@ -2382,6 +2390,9 @@ impl ConnectionOptionsFile { .transpose()? .map(HashMap::from_iter) .unwrap_or(default.stackerdb_hint_replicas), + block_proposal_max_age_secs: self + .block_proposal_max_age_secs + .unwrap_or(DEFAULT_BLOCK_PROPOSAL_MAX_AGE_SECS), ..default }) } @@ -2417,6 +2428,7 @@ pub struct NodeConfigFile { pub use_test_genesis_chainstate: Option, pub always_use_affirmation_maps: Option, pub require_affirmed_anchor_blocks: Option, + pub assume_present_anchor_blocks: Option, /// At most, how often should the chain-liveness thread /// wake up the chains-coordinator. Defaults to 300s (5 min). pub chain_liveness_poll_time_secs: Option, @@ -2498,6 +2510,10 @@ impl NodeConfigFile { // miners should always try to mine, even if they don't have the anchored // blocks in the canonical affirmation map. Followers, however, can stall. require_affirmed_anchor_blocks: self.require_affirmed_anchor_blocks.unwrap_or(!miner), + // as of epoch 3.0, all prepare phases have anchor blocks. + // at the start of epoch 3.0, the chain stalls without anchor blocks. + // only set this to false if you're doing some very extreme testing. + assume_present_anchor_blocks: true, // chainstate fault_injection activation for hide_blocks. // you can't set this in the config file. fault_injection_hide_blocks: false, @@ -2566,6 +2582,7 @@ pub struct MinerConfigFile { pub subsequent_rejection_pause_ms: Option, pub block_commit_delay_ms: Option, pub tenure_cost_limit_per_block_percentage: Option, + pub tenure_timeout_secs: Option, } impl MinerConfigFile { @@ -2706,6 +2723,7 @@ impl MinerConfigFile { subsequent_rejection_pause_ms: self.subsequent_rejection_pause_ms.unwrap_or(miner_default_config.subsequent_rejection_pause_ms), block_commit_delay: self.block_commit_delay_ms.map(Duration::from_millis).unwrap_or(miner_default_config.block_commit_delay), tenure_cost_limit_per_block_percentage, + tenure_timeout: self.tenure_timeout_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_timeout), }) } } diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 46ff54924b..c6369ecfc3 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -78,7 +78,7 @@ pub static MEMPOOL_NAKAMOTO_MAX_TRANSACTION_AGE: Duration = Duration::from_secs(MEMPOOL_MAX_TRANSACTION_AGE * 10 * 60); // name of table for storing the counting bloom filter -pub const BLOOM_COUNTER_TABLE: &'static str = "txid_bloom_counter"; +pub const BLOOM_COUNTER_TABLE: &str = "txid_bloom_counter"; // bloom filter error rate pub const BLOOM_COUNTER_ERROR_RATE: f64 = 0.001; @@ -390,7 +390,12 @@ pub trait ProposalCallbackReceiver: Send { pub trait MemPoolEventDispatcher { fn get_proposal_callback_receiver(&self) -> Option>; - fn mempool_txs_dropped(&self, txids: Vec, reason: MemPoolDropReason); + fn mempool_txs_dropped( + &self, + txids: Vec, + new_txid: Option, + reason: MemPoolDropReason, + ); fn mined_block_event( &self, target_burn_height: u64, @@ -582,13 +587,13 @@ impl MemPoolWalkSettings { } impl FromRow for Txid { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { row.get(0).map_err(db_error::SqliteError) } } impl FromRow for MemPoolTxMetadata { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; let tenure_consensus_hash = ConsensusHash::from_column(row, "consensus_hash")?; let tenure_block_header_hash = BlockHeaderHash::from_column(row, "block_header_hash")?; @@ -624,7 +629,7 @@ impl FromRow for MemPoolTxMetadata { } impl FromRow for MemPoolTxInfo { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let md = MemPoolTxMetadata::from_row(row)?; let tx_bytes: Vec = row.get_unwrap("tx"); let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]) @@ -639,7 +644,7 @@ impl FromRow for MemPoolTxInfo { } impl FromRow for MemPoolTxInfoPartial { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; let fee_rate: Option = match row.get("fee_rate") { Ok(rate) => Some(rate), @@ -662,7 +667,7 @@ impl FromRow for MemPoolTxInfoPartial { } impl FromRow<(u64, u64)> for (u64, u64) { - fn from_row<'a>(row: &'a Row) -> Result<(u64, u64), db_error> { + fn from_row(row: &Row) -> Result<(u64, u64), db_error> { let t1: i64 = row.get_unwrap(0); let t2: i64 = row.get_unwrap(1); if t1 < 0 || t2 < 0 { @@ -672,7 +677,7 @@ impl FromRow<(u64, u64)> for (u64, u64) { } } -const MEMPOOL_INITIAL_SCHEMA: &'static [&'static str] = &[r#" +const MEMPOOL_INITIAL_SCHEMA: &[&str] = &[r#" CREATE TABLE mempool( txid TEXT NOT NULL, origin_address TEXT NOT NULL, @@ -697,7 +702,7 @@ const MEMPOOL_INITIAL_SCHEMA: &'static [&'static str] = &[r#" ); "#]; -const MEMPOOL_SCHEMA_2_COST_ESTIMATOR: &'static [&'static str] = &[ +const MEMPOOL_SCHEMA_2_COST_ESTIMATOR: &[&str] = &[ r#" CREATE TABLE fee_estimates( txid TEXT NOT NULL, @@ -722,7 +727,7 @@ const MEMPOOL_SCHEMA_2_COST_ESTIMATOR: &'static [&'static str] = &[ "#, ]; -const MEMPOOL_SCHEMA_3_BLOOM_STATE: &'static [&'static str] = &[ +const MEMPOOL_SCHEMA_3_BLOOM_STATE: &[&str] = &[ r#" CREATE TABLE IF NOT EXISTS removed_txids( txid TEXT PRIMARY KEY NOT NULL, @@ -743,7 +748,7 @@ const MEMPOOL_SCHEMA_3_BLOOM_STATE: &'static [&'static str] = &[ "#, ]; -const MEMPOOL_SCHEMA_4_BLACKLIST: &'static [&'static str] = &[ +const MEMPOOL_SCHEMA_4_BLACKLIST: &[&str] = &[ r#" -- List of transactions that will never be stored to the mempool again, for as long as the rows exist. -- `arrival_time` indicates when the entry was created. This is used to garbage-collect the list. @@ -782,7 +787,7 @@ const MEMPOOL_SCHEMA_4_BLACKLIST: &'static [&'static str] = &[ "#, ]; -const MEMPOOL_SCHEMA_5: &'static [&'static str] = &[ +const MEMPOOL_SCHEMA_5: &[&str] = &[ r#" ALTER TABLE mempool ADD COLUMN fee_rate NUMBER; "#, @@ -798,7 +803,7 @@ const MEMPOOL_SCHEMA_5: &'static [&'static str] = &[ "#, ]; -const MEMPOOL_SCHEMA_6_NONCES: &'static [&'static str] = &[ +const MEMPOOL_SCHEMA_6_NONCES: &[&str] = &[ r#" CREATE TABLE nonces( address TEXT PRIMARY KEY NOT NULL, @@ -810,7 +815,7 @@ const MEMPOOL_SCHEMA_6_NONCES: &'static [&'static str] = &[ "#, ]; -const MEMPOOL_SCHEMA_7_TIME_ESTIMATES: &'static [&'static str] = &[ +const MEMPOOL_SCHEMA_7_TIME_ESTIMATES: &[&str] = &[ r#" -- ALLOW NULL ALTER TABLE mempool ADD COLUMN time_estimate_ms INTEGER; @@ -820,7 +825,7 @@ const MEMPOOL_SCHEMA_7_TIME_ESTIMATES: &'static [&'static str] = &[ "#, ]; -const MEMPOOL_INDEXES: &'static [&'static str] = &[ +const MEMPOOL_INDEXES: &[&str] = &[ "CREATE INDEX IF NOT EXISTS by_txid ON mempool(txid);", "CREATE INDEX IF NOT EXISTS by_height ON mempool(height);", "CREATE INDEX IF NOT EXISTS by_txid_and_height ON mempool(txid,height);", @@ -1214,6 +1219,12 @@ impl CandidateCache { fn len(&self) -> usize { self.cache.len() + self.next.len() } + + /// Is the cache empty? + #[cfg_attr(test, mutants::skip)] + fn is_empty(&self) -> bool { + self.cache.is_empty() && self.next.is_empty() + } } /// Evaluates the pair of nonces, to determine an order @@ -1835,13 +1846,10 @@ impl MemPoolDB { continue; } - let do_consider = if settings.filter_origins.len() > 0 { - settings + let do_consider = settings.filter_origins.is_empty() + || settings .filter_origins - .contains(&tx_info.metadata.origin_address) - } else { - true - }; + .contains(&tx_info.metadata.origin_address); if !do_consider { debug!("Will skip mempool tx, since it does not have an allowed origin"; @@ -1933,7 +1941,7 @@ impl MemPoolDB { drop(query_stmt_null); drop(query_stmt_fee); - if retry_store.len() > 0 { + if !retry_store.is_empty() { let tx = self.tx_begin()?; for (address, nonce) in retry_store.into_iter() { nonce_cache.update(address, nonce, &tx); @@ -1953,7 +1961,7 @@ impl MemPoolDB { &self.db } - pub fn tx_begin<'a>(&'a mut self) -> Result, db_error> { + pub fn tx_begin(&mut self) -> Result, db_error> { let tx = tx_begin_immediate(&mut self.db)?; Ok(MemPoolTx::new( tx, @@ -1964,7 +1972,7 @@ impl MemPoolDB { pub fn db_has_tx(conn: &DBConn, txid: &Txid) -> Result { query_row(conn, "SELECT 1 FROM mempool WHERE txid = ?1", params![txid]) - .and_then(|row_opt: Option| Ok(row_opt.is_some())) + .map(|row_opt: Option| row_opt.is_some()) } pub fn get_tx(conn: &DBConn, txid: &Txid) -> Result, db_error> { @@ -2229,7 +2237,7 @@ impl MemPoolDB { // broadcast drop event if a tx is being replaced if let (Some(prior_tx), Some(event_observer)) = (prior_tx, event_observer) { - event_observer.mempool_txs_dropped(vec![prior_tx.txid], replace_reason); + event_observer.mempool_txs_dropped(vec![prior_tx.txid], Some(txid), replace_reason); }; Ok(()) @@ -2275,7 +2283,7 @@ impl MemPoolDB { if let Some(event_observer) = event_observer { let sql = "SELECT txid FROM mempool WHERE accept_time < ?1"; let txids = query_rows(tx, sql, args)?; - event_observer.mempool_txs_dropped(txids, MemPoolDropReason::STALE_COLLECT); + event_observer.mempool_txs_dropped(txids, None, MemPoolDropReason::STALE_COLLECT); } let sql = "DELETE FROM mempool WHERE accept_time < ?1"; @@ -2297,7 +2305,7 @@ impl MemPoolDB { if let Some(event_observer) = event_observer { let sql = "SELECT txid FROM mempool WHERE height < ?1"; let txids = query_rows(tx, sql, args)?; - event_observer.mempool_txs_dropped(txids, MemPoolDropReason::STALE_COLLECT); + event_observer.mempool_txs_dropped(txids, None, MemPoolDropReason::STALE_COLLECT); } let sql = "DELETE FROM mempool WHERE height < ?1"; @@ -2572,11 +2580,7 @@ impl MemPoolDB { /// Blacklist transactions from the mempool /// Do not call directly; it's `pub` only for testing - pub fn inner_blacklist_txs<'a>( - tx: &DBTx<'a>, - txids: &[Txid], - now: u64, - ) -> Result<(), db_error> { + pub fn inner_blacklist_txs(tx: &DBTx<'_>, txids: &[Txid], now: u64) -> Result<(), db_error> { for txid in txids { let sql = "INSERT OR REPLACE INTO tx_blacklist (txid, arrival_time) VALUES (?1, ?2)"; let args = params![txid, &u64_to_sql(now)?]; @@ -2587,8 +2591,8 @@ impl MemPoolDB { /// garbage-collect the tx blacklist -- delete any transactions whose blacklist timeout has /// been exceeded - pub fn garbage_collect_tx_blacklist<'a>( - tx: &DBTx<'a>, + pub fn garbage_collect_tx_blacklist( + tx: &DBTx<'_>, now: u64, timeout: u64, max_size: u64, @@ -2649,7 +2653,7 @@ impl MemPoolDB { /// Inner code body for dropping transactions. /// Note that the bloom filter will *NOT* be updated. That's the caller's job, if desired. - fn inner_drop_txs<'a>(tx: &DBTx<'a>, txids: &[Txid]) -> Result<(), db_error> { + fn inner_drop_txs(tx: &DBTx<'_>, txids: &[Txid]) -> Result<(), db_error> { let sql = "DELETE FROM mempool WHERE txid = ?"; for txid in txids.iter() { tx.execute(sql, &[txid])?; diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 0f43c40756..899f9d4a2f 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -219,10 +219,10 @@ pub const BLOCK_LIMIT_MAINNET_21: ExecutionCost = ExecutionCost { // Block limit for the testnet in Stacks 2.0. pub const HELIUM_BLOCK_LIMIT_20: ExecutionCost = ExecutionCost { - write_length: 15_0_000_000, - write_count: 5_0_000, + write_length: 150_000_000, + write_count: 50_000, read_length: 1_000_000_000, - read_count: 5_0_000, + read_count: 50_000, // allow much more runtime in helium blocks than mainnet runtime: 100_000_000_000, }; diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index a209ef0677..963820a741 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -71,11 +71,11 @@ use crate::util_lib::bloom::*; use crate::util_lib::db::{tx_begin_immediate, DBConn, FromRow}; use crate::util_lib::strings::StacksString; -const FOO_CONTRACT: &'static str = "(define-public (foo) (ok 1)) +const FOO_CONTRACT: &str = "(define-public (foo) (ok 1)) (define-public (bar (x uint)) (ok x))"; -const SK_1: &'static str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; -const SK_2: &'static str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; -const SK_3: &'static str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; +const SK_1: &str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; +const SK_2: &str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; +const SK_3: &str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; #[test] fn mempool_db_init() { @@ -1211,10 +1211,10 @@ fn test_iterate_candidates_concurrent_write_lock() { mempool_tx.commit().unwrap(); } - assert!(expected_addr_nonces.len() > 0); + assert!(!expected_addr_nonces.is_empty()); let all_addr_nonces = db_get_all_nonces(mempool.conn()).unwrap(); - assert_eq!(all_addr_nonces.len(), 0); + assert!(all_addr_nonces.is_empty()); // start a thread that holds a write-lock on the mempool let write_thread = std::thread::spawn(move || { @@ -1266,7 +1266,7 @@ fn test_iterate_candidates_concurrent_write_lock() { assert_eq!(all_addr_nonces.len(), expected_addr_nonces.len()); for (addr, nonce) in all_addr_nonces { - assert!(expected_addr_nonces.get(&addr).is_some()); + assert!(expected_addr_nonces.contains_key(&addr)); assert_eq!(nonce, 24); } } @@ -1397,7 +1397,7 @@ fn mempool_db_load_store_replace_tx(#[case] behavior: MempoolCollectionBehavior) let chainstate_path = chainstate_path(&path_name); let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); - let mut txs = codec_all_transactions( + let txs = codec_all_transactions( &TransactionVersion::Testnet, 0x80000000, &TransactionAnchorMode::Any, @@ -1409,7 +1409,7 @@ fn mempool_db_load_store_replace_tx(#[case] behavior: MempoolCollectionBehavior) let mut mempool_tx = mempool.tx_begin().unwrap(); eprintln!("add all txs"); - for (i, mut tx) in txs.drain(..).enumerate() { + for (i, mut tx) in txs.into_iter().enumerate() { // make sure each address is unique per tx (not the case in codec_all_transactions) let origin_address = StacksAddress { version: 22, @@ -1992,8 +1992,7 @@ fn test_txtags() { let txtags = mempool.get_txtags(&seed).unwrap(); let len_txtags = all_txtags.len(); - let last_txtags = - &all_txtags[len_txtags.saturating_sub(BLOOM_COUNTER_DEPTH as usize)..len_txtags]; + let last_txtags = &all_txtags[len_txtags.saturating_sub(BLOOM_COUNTER_DEPTH)..len_txtags]; let mut expected_txtag_set = HashSet::new(); for txtags in last_txtags.iter() { @@ -2240,7 +2239,7 @@ fn test_find_next_missing_transactions() { txid.clone(), tx_bytes, tx_fee, - block_height as u64, + block_height, &origin_addr, origin_nonce, &sponsor_addr, @@ -2375,9 +2374,9 @@ fn test_find_next_missing_transactions() { ) .unwrap(); assert!(txs.len() <= page_size as usize); - assert!(num_visited <= page_size as u64); + assert!(num_visited <= page_size); - if txs.len() == 0 { + if txs.is_empty() { assert!(next_page_opt.is_none()); break; } @@ -2414,9 +2413,9 @@ fn test_find_next_missing_transactions() { eprintln!("find_next_missing_transactions with empty bloom filter took {} ms to serve {} transactions", ts_after.saturating_sub(ts_before), page_size); assert!(txs.len() <= page_size as usize); - assert!(num_visited <= page_size as u64); + assert!(num_visited <= page_size); - if txs.len() == 0 { + if txs.is_empty() { assert!(next_page_opt.is_none()); break; } diff --git a/stackslib/src/cost_estimates/fee_medians.rs b/stackslib/src/cost_estimates/fee_medians.rs index 12bd2fb9b8..38d200d8a2 100644 --- a/stackslib/src/cost_estimates/fee_medians.rs +++ b/stackslib/src/cost_estimates/fee_medians.rs @@ -21,7 +21,7 @@ use crate::util_lib::db::{ sql_pragma, sqlite_open, table_exists, tx_begin_immediate_sqlite, u64_to_sql, }; -const CREATE_TABLE: &'static str = " +const CREATE_TABLE: &str = " CREATE TABLE median_fee_estimator ( measure_key INTEGER PRIMARY KEY AUTOINCREMENT, high NUMBER NOT NULL, @@ -200,7 +200,7 @@ impl FeeEstimator for WeightedMedianFeeRateEstimator { maybe_add_minimum_fee_rate(&mut working_fee_rates, self.full_block_weight); // If fee rates non-empty, then compute an update. - if working_fee_rates.len() > 0 { + if !working_fee_rates.is_empty() { // Values must be sorted. working_fee_rates.sort_by(|a, b| { a.fee_rate @@ -244,7 +244,7 @@ pub fn fee_rate_estimate_from_sorted_weighted_fees( for rate_and_weight in sorted_fee_rates { cumulative_weight += rate_and_weight.weight as f64; let percentile_n: f64 = - (cumulative_weight as f64 - rate_and_weight.weight as f64 / 2f64) / total_weight as f64; + (cumulative_weight - rate_and_weight.weight as f64 / 2f64) / total_weight; percentiles.push(percentile_n); } assert_eq!(percentiles.len(), sorted_fee_rates.len()); diff --git a/stackslib/src/cost_estimates/fee_scalar.rs b/stackslib/src/cost_estimates/fee_scalar.rs index ff7911058f..c3ad8bd40c 100644 --- a/stackslib/src/cost_estimates/fee_scalar.rs +++ b/stackslib/src/cost_estimates/fee_scalar.rs @@ -21,7 +21,7 @@ use crate::util_lib::db::{ }; const SINGLETON_ROW_ID: i64 = 1; -const CREATE_TABLE: &'static str = " +const CREATE_TABLE: &str = " CREATE TABLE scalar_fee_estimator ( estimate_key NUMBER PRIMARY KEY, high NUMBER NOT NULL, diff --git a/stackslib/src/cost_estimates/pessimistic.rs b/stackslib/src/cost_estimates/pessimistic.rs index cdb3ceb7da..9894180480 100644 --- a/stackslib/src/cost_estimates/pessimistic.rs +++ b/stackslib/src/cost_estimates/pessimistic.rs @@ -37,7 +37,7 @@ struct Samples { } const SAMPLE_SIZE: usize = 10; -const CREATE_TABLE: &'static str = " +const CREATE_TABLE: &str = " CREATE TABLE pessimistic_estimator ( estimate_key TEXT PRIMARY KEY, current_value NUMBER NOT NULL, diff --git a/stackslib/src/lib.rs b/stackslib/src/lib.rs index 31f97628a6..df8f664cba 100644 --- a/stackslib/src/lib.rs +++ b/stackslib/src/lib.rs @@ -63,20 +63,21 @@ pub mod clarity_cli; /// A high level library for interacting with the Clarity vm pub mod clarity_vm; pub mod cli; +pub mod config; pub mod core; pub mod cost_estimates; pub mod deps; pub mod monitoring; // set via _compile-time_ envars -const GIT_BRANCH: Option<&'static str> = option_env!("GIT_BRANCH"); -const GIT_COMMIT: Option<&'static str> = option_env!("GIT_COMMIT"); -const GIT_TREE_CLEAN: Option<&'static str> = option_env!("GIT_TREE_CLEAN"); +const GIT_BRANCH: Option<&str> = option_env!("GIT_BRANCH"); +const GIT_COMMIT: Option<&str> = option_env!("GIT_COMMIT"); +const GIT_TREE_CLEAN: Option<&str> = option_env!("GIT_TREE_CLEAN"); #[cfg(debug_assertions)] -const BUILD_TYPE: &'static str = "debug"; +const BUILD_TYPE: &str = "debug"; #[cfg(not(debug_assertions))] -const BUILD_TYPE: &'static str = "release"; +const BUILD_TYPE: &str = "release"; pub fn version_string(pkg_name: &str, pkg_version: &str) -> String { let git_branch = GIT_BRANCH diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 7f324c52c8..af597808c0 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -309,6 +309,8 @@ fn main() { process::exit(1); } + let common_opts = cli::drain_common_opts(&mut argv, 1); + if argv[1] == "--version" { println!( "{}", @@ -796,128 +798,7 @@ check if the associated microblocks can be downloaded } if argv[1] == "try-mine" { - if argv.len() < 3 { - eprintln!( - "Usage: {} try-mine [min-fee [max-time]] - -Given a , try to ''mine'' an anchored block. This invokes the miner block -assembly, but does not attempt to broadcast a block commit. This is useful for determining -what transactions a given chain state would include in an anchor block, or otherwise -simulating a miner. -", - argv[0] - ); - process::exit(1); - } - - let start = get_epoch_time_ms(); - let burnchain_path = format!("{}/mainnet/burnchain", &argv[2]); - let sort_db_path = format!("{}/mainnet/burnchain/sortition", &argv[2]); - let chain_state_path = format!("{}/mainnet/chainstate/", &argv[2]); - - let mut min_fee = u64::MAX; - let mut max_time = u64::MAX; - - if argv.len() >= 4 { - min_fee = argv[3].parse().expect("Could not parse min_fee"); - } - if argv.len() >= 5 { - max_time = argv[4].parse().expect("Could not parse max_time"); - } - - let sort_db = SortitionDB::open(&sort_db_path, false, PoxConstants::mainnet_default()) - .unwrap_or_else(|_| panic!("Failed to open {sort_db_path}")); - let chain_id = CHAIN_ID_MAINNET; - let (chain_state, _) = StacksChainState::open(true, chain_id, &chain_state_path, None) - .expect("Failed to open stacks chain state"); - let chain_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) - .expect("Failed to get sortition chain tip"); - - let estimator = Box::new(UnitEstimator); - let metric = Box::new(UnitMetric); - - let mut mempool_db = MemPoolDB::open(true, chain_id, &chain_state_path, estimator, metric) - .expect("Failed to open mempool db"); - - let header_tip = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) - .unwrap() - .unwrap(); - let parent_header = StacksChainState::get_anchored_block_header_info( - chain_state.db(), - &header_tip.consensus_hash, - &header_tip.anchored_header.block_hash(), - ) - .expect("Failed to load chain tip header info") - .expect("Failed to load chain tip header info"); - - let sk = StacksPrivateKey::new(); - let mut tx_auth = TransactionAuth::from_p2pkh(&sk).unwrap(); - tx_auth.set_origin_nonce(0); - - let mut coinbase_tx = StacksTransaction::new( - TransactionVersion::Mainnet, - tx_auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), - ); - - coinbase_tx.chain_id = chain_id; - coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; - let mut tx_signer = StacksTransactionSigner::new(&coinbase_tx); - tx_signer.sign_origin(&sk).unwrap(); - let coinbase_tx = tx_signer.get_tx().unwrap(); - - let mut settings = BlockBuilderSettings::limited(); - settings.max_miner_time_ms = max_time; - - let result = StacksBlockBuilder::build_anchored_block( - &chain_state, - &sort_db.index_handle(&chain_tip.sortition_id), - &mut mempool_db, - &parent_header, - chain_tip.total_burn, - VRFProof::empty(), - Hash160([0; 20]), - &coinbase_tx, - settings, - None, - &Burnchain::new(&burnchain_path, "bitcoin", "main").unwrap(), - ); - - let stop = get_epoch_time_ms(); - - println!( - "{} mined block @ height = {} off of {} ({}/{}) in {}ms. Min-fee: {}, Max-time: {}", - if result.is_ok() { - "Successfully" - } else { - "Failed to" - }, - parent_header.stacks_block_height + 1, - StacksBlockHeader::make_index_block_hash( - &parent_header.consensus_hash, - &parent_header.anchored_header.block_hash() - ), - &parent_header.consensus_hash, - &parent_header.anchored_header.block_hash(), - stop.saturating_sub(start), - min_fee, - max_time - ); - - if let Ok((block, execution_cost, size)) = result { - let mut total_fees = 0; - for tx in block.txs.iter() { - total_fees += tx.get_tx_fee(); - } - println!( - "Block {}: {} uSTX, {} bytes, cost {:?}", - block.block_hash(), - total_fees, - size, - &execution_cost - ); - } - + cli::command_try_mine(&argv[1..], common_opts.config.as_ref()); process::exit(0); } @@ -1142,7 +1023,7 @@ simulating a miner. } i += 1; let line = line.unwrap().trim().to_string(); - if line.len() == 0 { + if line.is_empty() { continue; } let vals: Vec<_> = line.split(" => ").map(|x| x.trim()).collect(); @@ -1436,7 +1317,7 @@ simulating a miner. let old_sortition_db = SortitionDB::open(old_sort_path, true, PoxConstants::mainnet_default()).unwrap(); - // initial argon balances -- see testnet/stacks-node/conf/testnet-follower-conf.toml + // initial argon balances -- see sample/conf/testnet-follower-conf.toml let initial_balances = vec![ ( StacksAddress::from_string("ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2") @@ -1704,7 +1585,7 @@ simulating a miner. null_event_dispatcher, ) .unwrap(); - if receipts.len() == 0 { + if receipts.is_empty() { break; } } @@ -1719,50 +1600,49 @@ simulating a miner. } if argv[1] == "replay-block" { - cli::command_replay_block(&argv[1..], None); + cli::command_replay_block(&argv[1..], common_opts.config.as_ref()); process::exit(0); } if argv[1] == "replay-naka-block" { - let chain_config = - if let Some(network_flag_ix) = argv.iter().position(|arg| arg == "--network") { - let Some(network_choice) = argv.get(network_flag_ix + 1) else { - eprintln!("Must supply network choice after `--network` option"); - process::exit(1); - }; - - let network_config = match network_choice.to_lowercase().as_str() { - "testnet" => cli::StacksChainConfig::default_testnet(), - "mainnet" => cli::StacksChainConfig::default_mainnet(), - other => { - eprintln!("Unknown network choice `{other}`"); - process::exit(1); - } - }; - - argv.remove(network_flag_ix + 1); - argv.remove(network_flag_ix); - - Some(network_config) - } else { - None - }; - - cli::command_replay_block_nakamoto(&argv[1..], chain_config.as_ref()); + cli::command_replay_block_nakamoto(&argv[1..], common_opts.config.as_ref()); process::exit(0); } if argv[1] == "replay-mock-mining" { - cli::command_replay_mock_mining(&argv[1..], None); + cli::command_replay_mock_mining(&argv[1..], common_opts.config.as_ref()); process::exit(0); } + if argv[1] == "dump-consts" { + dump_consts(); + } + if argv.len() < 4 { eprintln!("Usage: {} blockchain network working_dir", argv[0]); process::exit(1); } } +#[cfg_attr(test, mutants::skip)] +pub fn dump_consts() { + use stacks_common::consts; + let json_out = json!({ + "miner_reward_maturity": consts::MINER_REWARD_MATURITY, + "chain_id_mainnet": consts::CHAIN_ID_MAINNET, + "chain_id_testnet": consts::CHAIN_ID_TESTNET, + "signer_slots_per_user": consts::SIGNER_SLOTS_PER_USER, + "network_id_mainnet": consts::NETWORK_ID_MAINNET, + "network_id_testnet": consts::NETWORK_ID_TESTNET, + "microstacks_per_stacks": consts::MICROSTACKS_PER_STACKS, + "stacks_epoch_max": consts::STACKS_EPOCH_MAX, + "peer_version_mainnet_major": consts::PEER_VERSION_MAINNET_MAJOR, + "peer_version_testnet_major": consts::PEER_VERSION_TESTNET_MAJOR, + }); + println!("{}", serde_json::to_string_pretty(&json_out).unwrap()); + process::exit(0); +} + #[cfg_attr(test, mutants::skip)] pub fn tip_mine() { let argv: Vec = env::args().collect(); diff --git a/stackslib/src/monitoring/mod.rs b/stackslib/src/monitoring/mod.rs index 7f1aa9db26..6db895249c 100644 --- a/stackslib/src/monitoring/mod.rs +++ b/stackslib/src/monitoring/mod.rs @@ -388,7 +388,7 @@ pub fn test_convert_uint256_to_f64() { let original = ((Uint512::from_uint256(&Uint256::max()) * Uint512::from_u64(10)) / Uint512::from_u64(100)) .to_uint256(); - assert_approx_eq!(convert_uint256_to_f64_percentage(original, 7), 10 as f64); + assert_approx_eq!(convert_uint256_to_f64_percentage(original, 7), 10_f64); let original = ((Uint512::from_uint256(&Uint256::max()) * Uint512::from_u64(122)) / Uint512::from_u64(1000)) diff --git a/stackslib/src/net/api/getheaders.rs b/stackslib/src/net/api/getheaders.rs index fc585fd2e9..98a9fb3062 100644 --- a/stackslib/src/net/api/getheaders.rs +++ b/stackslib/src/net/api/getheaders.rs @@ -90,7 +90,7 @@ impl StacksHeaderStream { index_block_hash: tip.clone(), offset: 0, total_bytes: 0, - num_headers: num_headers, + num_headers, end_of_stream: false, corked: false, chainstate_db: db, @@ -242,7 +242,7 @@ impl HttpChunkGenerator for StacksHeaderStream { // then write ']' test_debug!("Opening header stream"); self.total_bytes += 1; - return Ok(vec!['[' as u8]); + return Ok(vec![b'[']); } if self.num_headers == 0 { test_debug!("End of header stream"); @@ -270,7 +270,7 @@ impl HttpChunkGenerator for StacksHeaderStream { self.num_headers -= 1; if self.num_headers > 0 { - header_bytes.push(',' as u8); + header_bytes.push(b','); } else { self.end_of_stream = true; } @@ -298,7 +298,7 @@ impl HttpChunkGenerator for StacksHeaderStream { test_debug!("Corking header stream"); self.corked = true; self.total_bytes += 1; - return Ok(vec![']' as u8]); + return Ok(vec![b']']); } test_debug!("Header stream terminated"); diff --git a/stackslib/src/net/api/getmicroblocks_indexed.rs b/stackslib/src/net/api/getmicroblocks_indexed.rs index 43a3a73e04..4a1b310ae0 100644 --- a/stackslib/src/net/api/getmicroblocks_indexed.rs +++ b/stackslib/src/net/api/getmicroblocks_indexed.rs @@ -94,8 +94,8 @@ impl StacksIndexedMicroblockStream { Ok(StacksIndexedMicroblockStream { microblock_hash: mblock_info.microblock_hash, - parent_index_block_hash: parent_index_block_hash, - num_items_buf: num_items_buf, + parent_index_block_hash, + num_items_buf, num_items_ptr: 0, chainstate_db: chainstate.reopen_db()?, }) diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index 3b253aeb21..d9b1d007a5 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -57,8 +57,8 @@ pub enum GetStackersErrors { } impl GetStackersErrors { - pub const NOT_AVAILABLE_ERR_TYPE: &'static str = "not_available_try_again"; - pub const OTHER_ERR_TYPE: &'static str = "other"; + pub const NOT_AVAILABLE_ERR_TYPE: &str = "not_available_try_again"; + pub const OTHER_ERR_TYPE: &str = "other"; pub fn error_type_string(&self) -> &'static str { match self { diff --git a/stackslib/src/net/api/gettenure.rs b/stackslib/src/net/api/gettenure.rs index 24c3c87d71..9888b5563f 100644 --- a/stackslib/src/net/api/gettenure.rs +++ b/stackslib/src/net/api/gettenure.rs @@ -304,7 +304,7 @@ impl HttpChunkGenerator for NakamotoTenureStream { fn generate_next_chunk(&mut self) -> Result, String> { let next_block_chunk = self.block_stream.generate_next_chunk()?; - if next_block_chunk.len() > 0 { + if !next_block_chunk.is_empty() { // have block data to send return Ok(next_block_chunk); } @@ -358,7 +358,7 @@ impl StacksHttpResponse { let ptr = &mut tenure_bytes.as_slice(); let mut blocks = vec![]; - while ptr.len() > 0 { + while !ptr.is_empty() { let block = NakamotoBlock::consensus_deserialize(ptr)?; blocks.push(block); } diff --git a/stackslib/src/net/api/gettransaction_unconfirmed.rs b/stackslib/src/net/api/gettransaction_unconfirmed.rs index 9a76ee002b..9628817b40 100644 --- a/stackslib/src/net/api/gettransaction_unconfirmed.rs +++ b/stackslib/src/net/api/gettransaction_unconfirmed.rs @@ -130,7 +130,7 @@ impl RPCRequestHandler for RPCGetTransactionUnconfirmedRequestHandler { return Ok(UnconfirmedTransactionResponse { status: UnconfirmedTransactionStatus::Microblock { block_hash: mblock_hash, - seq: seq, + seq, }, tx: to_hex(&transaction.serialize_to_vec()), }); diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index c832695103..8a8b138d69 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -359,7 +359,9 @@ impl NakamotoBlockProposal { while *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { std::thread::sleep(std::time::Duration::from_millis(10)); } - info!("Block validation is no longer stalled due to testing directive."); + info!( + "Block validation is no longer stalled due to testing directive. Continuing..." + ); } } let start = Instant::now(); @@ -562,7 +564,10 @@ impl NakamotoBlockProposal { // Clone signatures from block proposal // These have already been validated by `validate_nakamoto_block_burnchain()`` block.header.miner_signature = self.block.header.miner_signature.clone(); - block.header.signer_signature = self.block.header.signer_signature.clone(); + block + .header + .signer_signature + .clone_from(&self.block.header.signer_signature); // Clone the timestamp from the block proposal, which has already been validated block.header.timestamp = self.block.header.timestamp; @@ -742,6 +747,20 @@ impl RPCRequestHandler for RPCBlockProposalRequestHandler { NetError::SendError("Proposal currently being evaluated".into()), )); } + + if block_proposal + .block + .header + .timestamp + .saturating_add(network.get_connection_opts().block_proposal_max_age_secs) + < get_epoch_time_secs() + { + return Err(( + 422, + NetError::SendError("Block proposal is too old to process.".into()), + )); + } + let (chainstate, _) = chainstate.reopen().map_err(|e| (400, NetError::from(e)))?; let sortdb = sortdb.reopen().map_err(|e| (400, NetError::from(e)))?; let receiver = rpc_args diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs index aff20d962f..e1c794ea2d 100644 --- a/stackslib/src/net/api/postblock_v3.rs +++ b/stackslib/src/net/api/postblock_v3.rs @@ -32,7 +32,7 @@ use crate::net::httpcore::{ use crate::net::relay::Relayer; use crate::net::{Error as NetError, NakamotoBlocksData, StacksMessageType, StacksNodeState}; -pub static PATH: &'static str = "/v3/blocks/upload/"; +pub static PATH: &str = "/v3/blocks/upload/"; #[derive(Clone, Default)] pub struct RPCPostBlockRequestHandler { diff --git a/stackslib/src/net/api/postmempoolquery.rs b/stackslib/src/net/api/postmempoolquery.rs index 25da52a66d..185fe16a64 100644 --- a/stackslib/src/net/api/postmempoolquery.rs +++ b/stackslib/src/net/api/postmempoolquery.rs @@ -175,7 +175,7 @@ impl HttpChunkGenerator for StacksMemPoolStream { "max_txs" => self.max_txs ); - if next_txs.len() > 0 { + if !next_txs.is_empty() { // have another tx to send let chunk = next_txs[0].serialize_to_vec(); if let Some(next_last_randomized_txid) = next_last_randomized_txid_opt { diff --git a/stackslib/src/net/api/tests/gettenure.rs b/stackslib/src/net/api/tests/gettenure.rs index c4f179acc9..a6a23fb4af 100644 --- a/stackslib/src/net/api/tests/gettenure.rs +++ b/stackslib/src/net/api/tests/gettenure.rs @@ -191,7 +191,7 @@ fn test_stream_nakamoto_tenure() { let ptr = &mut all_block_bytes.as_slice(); let mut blocks = vec![]; - while ptr.len() > 0 { + while !ptr.is_empty() { let block = NakamotoBlock::consensus_deserialize(ptr).unwrap(); blocks.push(block); } diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index c6c62dd1fe..35e12b5593 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -95,7 +95,7 @@ mod postmicroblock; mod poststackerdbchunk; mod posttransaction; -const TEST_CONTRACT: &'static str = " +const TEST_CONTRACT: &str = " (define-trait test-trait ( (do-test () (response uint uint)) @@ -149,7 +149,7 @@ const TEST_CONTRACT: &'static str = " })) "; -const TEST_CONTRACT_UNCONFIRMED: &'static str = " +const TEST_CONTRACT_UNCONFIRMED: &str = " (define-read-only (ro-test) (ok 1)) (define-constant cst-unconfirmed 456) (define-data-var bar-unconfirmed uint u1) @@ -159,7 +159,7 @@ const TEST_CONTRACT_UNCONFIRMED: &'static str = " "; /// This helper function drives I/O between a sender and receiver Http conversation. -fn convo_send_recv(sender: &mut ConversationHttp, receiver: &mut ConversationHttp) -> () { +fn convo_send_recv(sender: &mut ConversationHttp, receiver: &mut ConversationHttp) { let (mut pipe_read, mut pipe_write) = Pipe::new(); pipe_read.set_nonblocking(true); diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 481d0b2047..4d8551d375 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -186,7 +186,13 @@ impl MemPoolEventDispatcher for ProposalTestObserver { Some(Box::new(Arc::clone(&self.proposal_observer))) } - fn mempool_txs_dropped(&self, txids: Vec, reason: mempool::MemPoolDropReason) {} + fn mempool_txs_dropped( + &self, + txids: Vec, + new_txid: Option, + reason: mempool::MemPoolDropReason, + ) { + } fn mined_block_event( &self, @@ -334,9 +340,9 @@ fn test_try_make_response() { request.add_header("authorization".into(), "password".into()); requests.push(request); - // Set the timestamp to a value in the past + // Set the timestamp to a value in the past (but NOT BEFORE timeout) let mut early_time_block = good_block.clone(); - early_time_block.header.timestamp -= 10000; + early_time_block.header.timestamp -= 400; rpc_test .peer_1 .miner @@ -382,16 +388,42 @@ fn test_try_make_response() { request.add_header("authorization".into(), "password".into()); requests.push(request); + // Set the timestamp to a value in the past (BEFORE the timeout) + let mut stale_block = good_block.clone(); + stale_block.header.timestamp -= 10000; + rpc_test.peer_1.miner.sign_nakamoto_block(&mut stale_block); + + // post the invalid block proposal + let proposal = NakamotoBlockProposal { + block: stale_block, + chain_id: 0x80000000, + }; + + let mut request = StacksHttpRequest::new_for_peer( + rpc_test.peer_1.to_peer_host(), + "POST".into(), + "/v3/block_proposal".into(), + HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), + ) + .expect("failed to construct request"); + request.add_header("authorization".into(), "password".into()); + requests.push(request); + // execute the requests let observer = ProposalTestObserver::new(); let proposal_observer = Arc::clone(&observer.proposal_observer); info!("Run requests with observer"); - let mut responses = rpc_test.run_with_observer(requests, Some(&observer)); + let responses = rpc_test.run_with_observer(requests, Some(&observer)); - let response = responses.remove(0); + for response in responses.iter().take(3) { + assert_eq!(response.preamble().status_code, 202); + } + let response = &responses[3]; + assert_eq!(response.preamble().status_code, 422); - // Wait for the results of all 3 requests + // Wait for the results of all 3 PROCESSED requests + let start = std::time::Instant::now(); loop { info!("Wait for results to be non-empty"); if proposal_observer @@ -407,6 +439,10 @@ fn test_try_make_response() { } else { break; } + assert!( + start.elapsed().as_secs() < 60, + "Timed out waiting for results" + ); } let observer = proposal_observer.lock().unwrap(); diff --git a/stackslib/src/net/api/tests/postmempoolquery.rs b/stackslib/src/net/api/tests/postmempoolquery.rs index 6954024844..8f921525a3 100644 --- a/stackslib/src/net/api/tests/postmempoolquery.rs +++ b/stackslib/src/net/api/tests/postmempoolquery.rs @@ -178,7 +178,7 @@ fn test_stream_mempool_txs() { txid.clone(), tx_bytes, tx_fee, - block_height as u64, + block_height, &origin_addr, origin_nonce, &sponsor_addr, diff --git a/stackslib/src/net/asn.rs b/stackslib/src/net/asn.rs index f38c6c54d4..c28e82484b 100644 --- a/stackslib/src/net/asn.rs +++ b/stackslib/src/net/asn.rs @@ -214,9 +214,9 @@ impl ASEntry4 { let asn = asn_opt.unwrap(); Ok(Some(ASEntry4 { - prefix: prefix, - mask: mask, - asn: asn, + prefix, + mask, + asn, org: 0, // TODO })) } diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index d6bdbb301e..d11dd9995d 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -55,7 +55,7 @@ use crate::util_lib::db::{ DBConn, Error as db_error, FromColumn, FromRow, }; -pub const ATLASDB_VERSION: &'static str = "2"; +pub const ATLASDB_VERSION: &str = "2"; /// The maximum number of atlas attachment instances that should be /// checked at once (this is used to limit the return size of @@ -66,7 +66,7 @@ pub const ATLASDB_VERSION: &'static str = "2"; /// Attachment as well (which is larger). pub const MAX_PROCESS_PER_ROUND: u32 = 1_000; -const ATLASDB_INITIAL_SCHEMA: &'static [&'static str] = &[ +const ATLASDB_INITIAL_SCHEMA: &[&str] = &[ r#" CREATE TABLE attachments( hash TEXT UNIQUE PRIMARY KEY, @@ -90,7 +90,7 @@ const ATLASDB_INITIAL_SCHEMA: &'static [&'static str] = &[ "CREATE TABLE db_config(version TEXT NOT NULL);", ]; -const ATLASDB_SCHEMA_2: &'static [&'static str] = &[ +const ATLASDB_SCHEMA_2: &[&str] = &[ // We have to allow status to be null, because SQLite won't let us add // a not null column without a default. The default defeats the point of // having not-null here anyways, so we leave this field nullable. @@ -105,7 +105,7 @@ const ATLASDB_SCHEMA_2: &'static [&'static str] = &[ "#, ]; -const ATLASDB_INDEXES: &'static [&'static str] = &[ +const ATLASDB_INDEXES: &[&str] = &[ "CREATE INDEX IF NOT EXISTS index_was_instantiated ON attachments(was_instantiated);", "CREATE INDEX IF NOT EXISTS index_instance_status ON attachment_instances(status);", ]; @@ -127,14 +127,14 @@ pub enum AttachmentInstanceStatus { } impl FromRow for Attachment { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let content: Vec = row.get_unwrap("content"); Ok(Attachment { content }) } } impl FromRow for AttachmentInstance { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let hex_content_hash: String = row.get_unwrap("content_hash"); let attachment_index: u32 = row.get_unwrap("attachment_index"); let block_height = @@ -160,7 +160,7 @@ impl FromRow for AttachmentInstance { } impl FromRow<(u32, u32)> for (u32, u32) { - fn from_row<'a>(row: &'a Row) -> Result<(u32, u32), db_error> { + fn from_row(row: &Row) -> Result<(u32, u32), db_error> { let t1: u32 = row.get_unwrap(0); let t2: u32 = row.get_unwrap(1); Ok((t1, t2)) @@ -445,7 +445,7 @@ impl AtlasDB { &self.conn } - pub fn tx_begin<'a>(&'a mut self) -> Result, db_error> { + pub fn tx_begin(&mut self) -> Result, db_error> { if !self.readwrite { return Err(db_error::ReadOnly); } diff --git a/stackslib/src/net/atlas/download.rs b/stackslib/src/net/atlas/download.rs index a9dad242a5..f877a0da3a 100644 --- a/stackslib/src/net/atlas/download.rs +++ b/stackslib/src/net/atlas/download.rs @@ -102,7 +102,7 @@ impl AttachmentsDownloader { let mut events_to_deregister = vec![]; // Handle initial batch - if self.initial_batch.len() > 0 { + if !self.initial_batch.is_empty() { let mut resolved = self.enqueue_initial_attachments(&mut network.atlasdb)?; resolved_attachments.append(&mut resolved); } @@ -703,7 +703,7 @@ impl BatchedDNSLookupsState { let mut state = BatchedDNSLookupsResults::default(); for url_str in urls.drain(..) { - if url_str.len() == 0 { + if url_str.is_empty() { continue; } let url = match url_str.parse_to_block_url() { @@ -932,7 +932,7 @@ impl BatchedRequestsState } }); - if pending_requests.len() > 0 { + if !pending_requests.is_empty() { // We need to keep polling for (event_id, request) in pending_requests.drain() { state.remaining.insert(event_id, request); @@ -1314,10 +1314,11 @@ impl ReliabilityReport { } pub fn score(&self) -> u32 { - match self.total_requests_sent { - 0 => 0 as u32, - n => self.total_requests_success * 1000 / (n * 1000) + n, + let n = self.total_requests_sent; + if n == 0 { + return n; } + self.total_requests_success * 1000 / (n * 1000) + n } } diff --git a/stackslib/src/net/atlas/mod.rs b/stackslib/src/net/atlas/mod.rs index 45100d984b..c382aa618d 100644 --- a/stackslib/src/net/atlas/mod.rs +++ b/stackslib/src/net/atlas/mod.rs @@ -229,7 +229,7 @@ impl AttachmentInstance { metadata, contract_id: contract_id.clone(), tx_id, - canonical_stacks_tip_height: canonical_stacks_tip_height, + canonical_stacks_tip_height, }; return Some(instance); } diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 1d8e5d10d2..1d1e58d4ee 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -136,7 +136,7 @@ pub struct NeighborStats { impl NeighborStats { pub fn new(outbound: bool) -> NeighborStats { NeighborStats { - outbound: outbound, + outbound, first_contact_time: 0, last_contact_time: 0, last_send_time: 0, @@ -162,9 +162,9 @@ impl NeighborStats { /// Add a neighbor health point for this peer. /// This updates the recent list of instances where this peer either successfully replied to a /// message, or failed to do so (indicated by `success`). - pub fn add_healthpoint(&mut self, success: bool) -> () { + pub fn add_healthpoint(&mut self, success: bool) { let hp = NeighborHealthPoint { - success: success, + success, time: get_epoch_time_secs(), }; self.healthpoints.push_back(hp); @@ -176,7 +176,7 @@ impl NeighborStats { /// Record that we recently received a block of the given size. /// Keeps track of the last `NUM_BANDWIDTH_POINTS` such events, so we can estimate the current /// bandwidth consumed by block pushes. - pub fn add_block_push(&mut self, message_size: u64) -> () { + pub fn add_block_push(&mut self, message_size: u64) { self.block_push_rx_counts .push_back((get_epoch_time_secs(), message_size)); while self.block_push_rx_counts.len() > NUM_BANDWIDTH_POINTS { @@ -187,7 +187,7 @@ impl NeighborStats { /// Record that we recently received a microblock of the given size. /// Keeps track of the last `NUM_BANDWIDTH_POINTS` such events, so we can estimate the current /// bandwidth consumed by microblock pushes. - pub fn add_microblocks_push(&mut self, message_size: u64) -> () { + pub fn add_microblocks_push(&mut self, message_size: u64) { self.microblocks_push_rx_counts .push_back((get_epoch_time_secs(), message_size)); while self.microblocks_push_rx_counts.len() > NUM_BANDWIDTH_POINTS { @@ -198,7 +198,7 @@ impl NeighborStats { /// Record that we recently received a transaction of the given size. /// Keeps track of the last `NUM_BANDWIDTH_POINTS` such events, so we can estimate the current /// bandwidth consumed by transaction pushes. - pub fn add_transaction_push(&mut self, message_size: u64) -> () { + pub fn add_transaction_push(&mut self, message_size: u64) { self.transaction_push_rx_counts .push_back((get_epoch_time_secs(), message_size)); while self.transaction_push_rx_counts.len() > NUM_BANDWIDTH_POINTS { @@ -209,7 +209,7 @@ impl NeighborStats { /// Record that we recently received a stackerdb chunk push of the given size. /// Keeps track of the last `NUM_BANDWIDTH_POINTS` such events, so we can estimate the current /// bandwidth consumed by stackerdb chunk pushes. - pub fn add_stackerdb_push(&mut self, message_size: u64) -> () { + pub fn add_stackerdb_push(&mut self, message_size: u64) { self.stackerdb_push_rx_counts .push_back((get_epoch_time_secs(), message_size)); while self.stackerdb_push_rx_counts.len() > NUM_BANDWIDTH_POINTS { @@ -220,7 +220,7 @@ impl NeighborStats { /// Record that we recently received a Nakamoto blcok push of the given size. /// Keeps track of the last `NUM_BANDWIDTH_POINTS` such events, so we can estimate the current /// bandwidth consumed by Nakamoto block pushes - pub fn add_nakamoto_block_push(&mut self, message_size: u64) -> () { + pub fn add_nakamoto_block_push(&mut self, message_size: u64) { self.nakamoto_block_push_rx_counts .push_back((get_epoch_time_secs(), message_size)); while self.nakamoto_block_push_rx_counts.len() > NUM_BANDWIDTH_POINTS { @@ -228,7 +228,7 @@ impl NeighborStats { } } - pub fn add_relayer(&mut self, addr: &NeighborAddress, num_bytes: u64) -> () { + pub fn add_relayer(&mut self, addr: &NeighborAddress, num_bytes: u64) { if let Some(stats) = self.relayed_messages.get_mut(addr) { stats.num_messages += 1; stats.num_bytes += num_bytes; @@ -236,7 +236,7 @@ impl NeighborStats { } else { let info = RelayStats { num_messages: 1, - num_bytes: num_bytes, + num_bytes, last_seen: get_epoch_time_secs(), }; self.relayed_messages.insert(addr.clone(), info); @@ -427,8 +427,8 @@ impl NeighborKey { handshake_data: &HandshakeData, ) -> NeighborKey { NeighborKey { - peer_version: peer_version, - network_id: network_id, + peer_version, + network_id, addrbytes: handshake_data.addrbytes.clone(), port: handshake_data.port, } @@ -436,8 +436,8 @@ impl NeighborKey { pub fn from_socketaddr(peer_version: u32, network_id: u32, addr: &SocketAddr) -> NeighborKey { NeighborKey { - peer_version: peer_version, - network_id: network_id, + peer_version, + network_id, addrbytes: PeerAddress::from_socketaddr(addr), port: addr.port(), } @@ -613,7 +613,7 @@ impl ConversationP2P { get_epoch_time_secs().saturating_sub(self.instantiated) } - pub fn set_public_key(&mut self, pubkey_opt: Option) -> () { + pub fn set_public_key(&mut self, pubkey_opt: Option) { self.connection.set_public_key(pubkey_opt); } @@ -1181,7 +1181,8 @@ impl ConversationP2P { &mut self, stacker_db_data: &StackerDBHandshakeData, ) { - self.db_smart_contracts = stacker_db_data.smart_contracts.clone(); + self.db_smart_contracts + .clone_from(&stacker_db_data.smart_contracts); } /// Forget about this peer's stacker DB replication state @@ -1202,7 +1203,7 @@ impl ConversationP2P { let natpunch_data = NatPunchData { addrbytes: self.peer_addrbytes.clone(), port: self.peer_port, - nonce: nonce, + nonce, }; let msg = StacksMessage::from_chain_view( self.version, @@ -1442,7 +1443,7 @@ impl ConversationP2P { peer_dbconn, self.network_id, epoch.network_epoch, - (get_epoch_time_secs() as u64).saturating_sub(self.connection.options.max_neighbor_age), + get_epoch_time_secs().saturating_sub(self.connection.options.max_neighbor_age), MAX_NEIGHBORS_DATA_LEN, chain_view.burn_block_height, false, @@ -2138,7 +2139,7 @@ impl ConversationP2P { ); return self .reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled) - .and_then(|handle| Ok(Some(handle))); + .map(|handle| Some(handle)); } Ok(None) } @@ -2176,7 +2177,7 @@ impl ConversationP2P { debug!("{:?}: Neighbor {:?} exceeded max microblocks-push bandwidth of {} bytes/sec (currently at {})", self, &self.to_neighbor_key(), self.connection.options.max_microblocks_push_bandwidth, self.stats.get_microblocks_push_bandwidth()); return self .reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled) - .and_then(|handle| Ok(Some(handle))); + .map(|handle| Some(handle)); } Ok(None) } @@ -2213,7 +2214,7 @@ impl ConversationP2P { debug!("{:?}: Neighbor {:?} exceeded max transaction-push bandwidth of {} bytes/sec (currently at {})", self, &self.to_neighbor_key(), self.connection.options.max_transaction_push_bandwidth, self.stats.get_transaction_push_bandwidth()); return self .reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled) - .and_then(|handle| Ok(Some(handle))); + .map(|handle| Some(handle)); } Ok(None) } @@ -2251,7 +2252,7 @@ impl ConversationP2P { debug!("{:?}: Neighbor {:?} exceeded max stackerdb-push bandwidth of {} bytes/sec (currently at {})", self, &self.to_neighbor_key(), self.connection.options.max_stackerdb_push_bandwidth, self.stats.get_stackerdb_push_bandwidth()); return self .reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled) - .and_then(|handle| Ok(Some(handle))); + .map(|handle| Some(handle)); } Ok(None) @@ -2290,7 +2291,7 @@ impl ConversationP2P { debug!("{:?}: Neighbor {:?} exceeded max Nakamoto block push bandwidth of {} bytes/sec (currently at {})", self, &self.to_neighbor_key(), self.connection.options.max_nakamoto_block_push_bandwidth, self.stats.get_nakamoto_block_push_bandwidth()); return self .reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled) - .and_then(|handle| Ok(Some(handle))); + .map(|handle| Some(handle)); } Ok(None) @@ -2568,7 +2569,7 @@ impl ConversationP2P { StacksMessageType::HandshakeAccept(ref data) => { debug!("{:?}: Got HandshakeAccept", &self); self.handle_handshake_accept(network.get_chain_view(), &msg.preamble, data, None) - .and_then(|_| Ok(None)) + .map(|_| None) } StacksMessageType::StackerDBHandshakeAccept(ref data, ref db_data) => { debug!("{:?}: Got StackerDBHandshakeAccept", &self); @@ -2578,7 +2579,7 @@ impl ConversationP2P { data, Some(db_data), ) - .and_then(|_| Ok(None)) + .map(|_| None) } StacksMessageType::Ping(_) => { debug!("{:?}: Got Ping", &self); @@ -2652,7 +2653,7 @@ impl ConversationP2P { data, None, ) - .and_then(|_| Ok(None)) + .map(|_| None) } else { debug!("{:?}: Unsolicited unauthenticated HandshakeAccept", &self); @@ -2670,7 +2671,7 @@ impl ConversationP2P { data, Some(db_data), ) - .and_then(|_| Ok(None)) + .map(|_| None) } else { debug!( "{:?}: Unsolicited unauthenticated StackerDBHandshakeAccept", @@ -2851,8 +2852,8 @@ impl ConversationP2P { match dns_client.poll_lookup(&dns_request.host, dns_request.port) { Ok(query_result_opt) => { // just take one of the addresses, if there are any - self.data_ip = query_result_opt - .map(|query_result| match query_result.result { + self.data_ip = + query_result_opt.and_then(|query_result| match query_result.result { Ok(mut ips) => ips.pop(), Err(e) => { warn!( @@ -2864,8 +2865,7 @@ impl ConversationP2P { self.dns_deadline = u128::MAX; None } - }) - .flatten(); + }); if let Some(ip) = self.data_ip.as_ref() { debug!("{}: Resolved data URL {} to {}", &self, &self.data_url, &ip); } else { @@ -3044,7 +3044,7 @@ impl ConversationP2P { } /// Remove all timed-out messages, and ding the remote peer as unhealthy - pub fn clear_timeouts(&mut self) -> () { + pub fn clear_timeouts(&mut self) { let num_drained = self.connection.drain_timeouts(); for _ in 0..num_drained { self.stats.add_healthpoint(false); @@ -3190,7 +3190,7 @@ mod test { sender: &mut ConversationP2P, mut sender_handles: Vec<&mut ReplyHandleP2P>, receiver: &mut ConversationP2P, - ) -> () { + ) { let (mut pipe_read, mut pipe_write) = Pipe::new(); pipe_read.set_nonblocking(true); @@ -3252,7 +3252,7 @@ mod test { for i in prev_snapshot.block_height..chain_view.burn_block_height + 1 { let mut next_snapshot = prev_snapshot.clone(); - let big_i = Uint256::from_u64(i as u64); + let big_i = Uint256::from_u64(i); let mut big_i_bytes_32 = [0u8; 32]; let mut big_i_bytes_20 = [0u8; 20]; big_i_bytes_32.copy_from_slice(&big_i.to_u8_slice()); @@ -3347,7 +3347,7 @@ mod test { network_id: 0, chain_name: "bitcoin".to_string(), network_name: "testnet".to_string(), - working_dir: format!("/tmp/stacks-test-databases-{}", test_name), + working_dir: format!("/tmp/stacks-test-databases-{test_name}"), consensus_hash_lifetime: 24, stable_confirmations: 7, first_block_height: 12300, @@ -5575,7 +5575,7 @@ mod test { let getblocksdata_1 = GetBlocksInv { consensus_hash: convo_1_ancestor.consensus_hash, - num_blocks: 10 as u16, + num_blocks: 10, }; let getblocksdata_1_msg = convo_1 .sign_message( diff --git a/stackslib/src/net/codec.rs b/stackslib/src/net/codec.rs index bd8154e414..ec342209a7 100644 --- a/stackslib/src/net/codec.rs +++ b/stackslib/src/net/codec.rs @@ -65,8 +65,8 @@ impl Preamble { payload_len: u32, ) -> Preamble { Preamble { - peer_version: peer_version, - network_id: network_id, + peer_version, + network_id, seq: 0, burn_block_height: block_height, burn_block_hash: burn_block_hash.clone(), @@ -74,7 +74,7 @@ impl Preamble { burn_stable_block_hash: stable_burn_block_hash.clone(), additional_data: 0, signature: MessageSignature::empty(), - payload_len: payload_len, + payload_len, } } @@ -234,8 +234,8 @@ impl StacksMessageCodec for GetBlocksInv { } Ok(GetBlocksInv { - consensus_hash: consensus_hash, - num_blocks: num_blocks, + consensus_hash, + num_blocks, }) } } @@ -435,10 +435,7 @@ impl StacksMessageCodec for PoxInvData { } let pox_bitvec: Vec = read_next_exact::<_, u8>(fd, bitvec_len(bitlen).into())?; - Ok(PoxInvData { - bitlen: bitlen, - pox_bitvec: pox_bitvec, - }) + Ok(PoxInvData { bitlen, pox_bitvec }) } } @@ -454,9 +451,7 @@ impl StacksMessageCodec for BlocksAvailableData { fd, BLOCKS_AVAILABLE_MAX_LEN, )?; - Ok(BlocksAvailableData { - available: available, - }) + Ok(BlocksAvailableData { available }) } } @@ -502,7 +497,7 @@ impl BlocksData { BlocksData { blocks: vec![] } } - pub fn push(&mut self, ch: ConsensusHash, block: StacksBlock) -> () { + pub fn push(&mut self, ch: ConsensusHash, block: StacksBlock) { self.blocks.push(BlocksDatum(ch, block)) } } @@ -624,14 +619,14 @@ impl HandshakeData { }; HandshakeData { - addrbytes: addrbytes, - port: port, + addrbytes, + port, services: local_peer.services, node_public_key: StacksPublicKeyBuffer::from_public_key( &Secp256k1PublicKey::from_private(&local_peer.private_key), ), expire_block_height: local_peer.private_key_expire, - data_url: data_url, + data_url, } } } @@ -675,7 +670,7 @@ impl HandshakeAcceptData { pub fn new(local_peer: &LocalPeer, heartbeat_interval: u32) -> HandshakeAcceptData { HandshakeAcceptData { handshake: HandshakeData::from_local_peer(local_peer), - heartbeat_interval: heartbeat_interval, + heartbeat_interval, } } } @@ -1384,7 +1379,7 @@ impl StacksMessage { 0, ); StacksMessage { - preamble: preamble, + preamble, relayers: vec![], payload: message, } @@ -1414,7 +1409,7 @@ impl StacksMessage { peer_version: self.preamble.peer_version, network_id: self.preamble.network_id, addrbytes: addrbytes.clone(), - port: port, + port, } } @@ -1431,7 +1426,7 @@ impl StacksMessage { /// Sign the StacksMessage. The StacksMessage must _not_ have any relayers (i.e. we're /// originating this messsage). pub fn sign(&mut self, seq: u32, private_key: &Secp256k1PrivateKey) -> Result<(), net_error> { - if self.relayers.len() > 0 { + if !self.relayers.is_empty() { return Err(net_error::InvalidMessage); } self.preamble.seq = seq; @@ -1498,8 +1493,7 @@ impl StacksMessage { self.payload.consensus_serialize(&mut message_bits)?; let mut p = self.preamble.clone(); - p.verify(&message_bits, &secp256k1_pubkey) - .and_then(|_m| Ok(())) + p.verify(&message_bits, &secp256k1_pubkey).map(|_m| ()) } } @@ -1573,8 +1567,8 @@ impl ProtocolFamily for StacksP2P { let (relayers, payload) = StacksMessage::deserialize_body(&mut cursor)?; let message = StacksMessage { preamble: preamble.clone(), - relayers: relayers, - payload: payload, + relayers, + payload, }; Ok((message, cursor.position() as usize)) } @@ -1588,7 +1582,7 @@ impl ProtocolFamily for StacksP2P { preamble .clone() .verify(&bytes[0..(preamble.payload_len as usize)], key) - .and_then(|_m| Ok(())) + .map(|_m| ()) } fn write_message( @@ -1666,7 +1660,7 @@ pub mod test { pub fn check_codec_and_corruption( obj: &T, bytes: &Vec, - ) -> () { + ) { // obj should serialize to bytes let mut write_buf: Vec = Vec::with_capacity(bytes.len()); obj.consensus_serialize(&mut write_buf).unwrap(); @@ -1687,7 +1681,7 @@ pub mod test { } // short message shouldn't parse, but should EOF - if write_buf.len() > 0 { + if !write_buf.is_empty() { let mut short_buf = write_buf.clone(); let short_len = short_buf.len() - 1; short_buf.truncate(short_len); diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 0e58adb36e..fe047e2984 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -48,6 +48,9 @@ use crate::net::{ StacksHttp, StacksP2P, }; +/// The default maximum age in seconds of a block that can be validated by the block proposal endpoint +pub const DEFAULT_BLOCK_PROPOSAL_MAX_AGE_SECS: u64 = 600; + /// Receiver notification handle. /// When a message with the expected `seq` value arrives, send it to an expected receiver (possibly /// in another thread) via the given `receiver_input` channel. @@ -63,7 +66,7 @@ impl ReceiverNotify

{ ReceiverNotify { expected_seq: seq, receiver_input: input, - ttl: ttl, + ttl, } } @@ -104,7 +107,7 @@ impl NetworkReplyHandle

{ receiver_output: Some(output), request_pipe_write: Some(write), deadline: 0, - socket_event_id: socket_event_id, + socket_event_id, } } @@ -113,12 +116,12 @@ impl NetworkReplyHandle

{ receiver_output: None, request_pipe_write: Some(write), deadline: 0, - socket_event_id: socket_event_id, + socket_event_id, } } /// deadline is in seconds - pub fn set_deadline(&mut self, dl: u64) -> () { + pub fn set_deadline(&mut self, dl: u64) { self.deadline = dl; } @@ -434,6 +437,8 @@ pub struct ConnectionOptions { pub nakamoto_unconfirmed_downloader_interval_ms: u128, /// The authorization token to enable privileged RPC endpoints pub auth_token: Option, + /// The maximum age in seconds of a block that can be validated by the block proposal endpoint + pub block_proposal_max_age_secs: u64, /// StackerDB replicas to talk to for a particular smart contract pub stackerdb_hint_replicas: HashMap>, @@ -568,6 +573,7 @@ impl std::default::Default for ConnectionOptions { nakamoto_inv_sync_burst_interval_ms: 1_000, // wait 1 second after a sortition before running inventory sync nakamoto_unconfirmed_downloader_interval_ms: 5_000, // run unconfirmed downloader once every 5 seconds auth_token: None, + block_proposal_max_age_secs: DEFAULT_BLOCK_PROPOSAL_MAX_AGE_SECS, stackerdb_hint_replicas: HashMap::new(), // no faults on by default @@ -689,7 +695,7 @@ impl ConnectionInbox

{ } Err(net_error::UnderflowError(_)) => { // not enough data to form a preamble yet - if bytes_consumed == 0 && bytes.len() > 0 { + if bytes_consumed == 0 && !bytes.is_empty() { // preamble is too long return Err(net_error::DeserializeError( "Preamble size would exceed maximum allowed size".to_string(), @@ -773,7 +779,7 @@ impl ConnectionInbox

{ self.payload_ptr = 0; self.buf = trailer; - if self.buf.len() > 0 { + if !self.buf.is_empty() { test_debug!( "Buffer has {} bytes remaining: {:?}", self.buf.len(), @@ -956,7 +962,7 @@ impl ConnectionInbox

{ // we can buffer bytes faster than we can process messages, so be sure to drain the buffer // before returning. - if self.buf.len() > 0 { + if !self.buf.is_empty() { loop { let mut consumed_message = false; @@ -1084,7 +1090,7 @@ impl ConnectionOutbox

{ pub fn new(outbox_maxlen: usize) -> ConnectionOutbox

{ ConnectionOutbox { outbox: VecDeque::with_capacity(outbox_maxlen), - outbox_maxlen: outbox_maxlen, + outbox_maxlen, pending_message_fd: None, socket_out_buf: vec![], socket_out_ptr: 0, @@ -1093,7 +1099,7 @@ impl ConnectionOutbox

{ } fn begin_next_message(&mut self) -> Option { - if self.outbox.len() == 0 { + if self.outbox.is_empty() { // nothing to send return None; } @@ -1109,8 +1115,8 @@ impl ConnectionOutbox

{ pending_message_fd } - fn finish_message(&mut self) -> () { - assert!(self.outbox.len() > 0); + fn finish_message(&mut self) { + assert!(!self.outbox.is_empty()); // wake up any receivers when (if) we get a reply let mut inflight_message = self.outbox.pop_front(); @@ -1301,7 +1307,7 @@ impl NetworkConnection

{ public_key_opt: Option, ) -> NetworkConnection

{ NetworkConnection { - protocol: protocol, + protocol, options: (*options).clone(), inbox: ConnectionInbox::new(options.inbox_maxlen, public_key_opt), @@ -1470,7 +1476,7 @@ impl NetworkConnection

{ } /// set the public key - pub fn set_public_key(&mut self, pubk: Option) -> () { + pub fn set_public_key(&mut self, pubk: Option) { self.inbox.public_key = pubk; } @@ -1562,7 +1568,7 @@ mod test { let mut i = 0; // push the message, and force pipes to go out of scope to close the write end - while pipes.len() > 0 { + while !pipes.is_empty() { let mut p = pipes.remove(0); protocol.write_message(&mut p, &messages[i]).unwrap(); i += 1; @@ -1725,7 +1731,7 @@ mod test { let mut rhs = vec![]; // push the message, and force pipes to go out of scope to close the write end - while handles.len() > 0 { + while !handles.is_empty() { let mut rh = handles.remove(0); protocol.write_message(&mut rh, &messages[i]).unwrap(); i += 1; @@ -1867,7 +1873,7 @@ mod test { &BurnchainHeaderHash([0x11; 32]), 12339, &BurnchainHeaderHash([0x22; 32]), - StacksMessageType::Ping(PingData { nonce: nonce }), + StacksMessageType::Ping(PingData { nonce }), ); let privkey = Secp256k1PrivateKey::new(); ping.sign(request_id, &privkey).unwrap(); @@ -1989,12 +1995,12 @@ mod test { let mut serialized_ping = vec![]; ping.consensus_serialize(&mut serialized_ping).unwrap(); assert_eq!( - conn.outbox.socket_out_buf[0..(conn.outbox.socket_out_ptr as usize)], - serialized_ping[0..(conn.outbox.socket_out_ptr as usize)] + conn.outbox.socket_out_buf[0..conn.outbox.socket_out_ptr], + serialized_ping[0..conn.outbox.socket_out_ptr] ); let mut half_ping = - conn.outbox.socket_out_buf.clone()[0..(conn.outbox.socket_out_ptr as usize)].to_vec(); + conn.outbox.socket_out_buf.clone()[0..conn.outbox.socket_out_ptr].to_vec(); let mut ping_buf_05 = vec![0; 2 * ping_size - (ping_size + ping_size / 2)]; // flush the remaining half-ping @@ -2097,7 +2103,7 @@ mod test { let pinger = thread::spawn(move || { let mut i = 0; - while pipes.len() > 0 { + while !pipes.is_empty() { let mut p = pipes.remove(0); i += 1; @@ -2203,7 +2209,7 @@ mod test { let pinger = thread::spawn(move || { let mut rhs = vec![]; - while handle_vec.len() > 0 { + while !handle_vec.is_empty() { let mut handle = handle_vec.remove(0); handle.flush().unwrap(); rhs.push(handle); @@ -2317,7 +2323,7 @@ mod test { let pinger = thread::spawn(move || { let mut rhs = vec![]; - while handle_vec.len() > 0 { + while !handle_vec.is_empty() { let mut handle = handle_vec.remove(0); handle.flush().unwrap(); rhs.push(handle); diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index ff6b5a9a05..35471183f3 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -45,12 +45,12 @@ use crate::util_lib::db::{ }; use crate::util_lib::strings::UrlString; -pub const PEERDB_VERSION: &'static str = "3"; +pub const PEERDB_VERSION: &str = "3"; const NUM_SLOTS: usize = 8; impl FromColumn for PeerAddress { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let addrbytes_bin: String = row.get_unwrap(column_name); if addrbytes_bin.len() != 128 { error!("Unparsable peer address {}", addrbytes_bin); @@ -74,7 +74,7 @@ impl FromColumn for PeerAddress { } impl FromRow for QualifiedContractIdentifier { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let cid_str: String = row.get_unwrap("smart_contract_id"); let cid = QualifiedContractIdentifier::parse(&cid_str).map_err(|_e| db_error::ParseError)?; @@ -157,20 +157,20 @@ impl LocalPeer { info!( "Will be authenticating p2p messages with the following"; "public key" => &Secp256k1PublicKey::from_private(&pkey).to_hex(), - "services" => &to_hex(&(services as u16).to_be_bytes()), + "services" => &to_hex(&services.to_be_bytes()), "Stacker DBs" => stacker_dbs.iter().map(|cid| format!("{}", &cid)).collect::>().join(",") ); LocalPeer { - network_id: network_id, - parent_network_id: parent_network_id, + network_id, + parent_network_id, nonce: my_nonce, private_key: pkey, private_key_expire: key_expire, addrbytes: addr, - port: port, - services: services as u16, - data_url: data_url, + port, + services, + data_url, public_ip_address: None, stacker_dbs, } @@ -203,7 +203,7 @@ impl LocalPeer { } impl FromRow for LocalPeer { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let network_id: u32 = row.get_unwrap("network_id"); let parent_network_id: u32 = row.get_unwrap("parent_network_id"); let nonce_hex: String = row.get_unwrap("nonce"); @@ -237,15 +237,15 @@ impl FromRow for LocalPeer { }; Ok(LocalPeer { - network_id: network_id, - parent_network_id: parent_network_id, + network_id, + parent_network_id, private_key: privkey, nonce: nonce_buf, private_key_expire: privkey_expire, - addrbytes: addrbytes, - port: port, - services: services, - data_url: data_url, + addrbytes, + port, + services, + data_url, public_ip_address: None, stacker_dbs, }) @@ -253,7 +253,7 @@ impl FromRow for LocalPeer { } impl FromRow for ASEntry4 { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let prefix: u32 = row.get_unwrap("prefix"); let mask: u8 = row.get_unwrap("mask"); let asn: u32 = row.get_unwrap("asn"); @@ -269,7 +269,7 @@ impl FromRow for ASEntry4 { } impl FromRow for Neighbor { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let peer_version: u32 = row.get_unwrap("peer_version"); let network_id: u32 = row.get_unwrap("network_id"); let addrbytes: PeerAddress = PeerAddress::from_column(row, "addrbytes")?; @@ -289,20 +289,20 @@ impl FromRow for Neighbor { Ok(Neighbor { addr: NeighborKey { - peer_version: peer_version, - network_id: network_id, - addrbytes: addrbytes, - port: port, + peer_version, + network_id, + addrbytes, + port, }, - public_key: public_key, + public_key, expire_block: expire_block_height, - last_contact_time: last_contact_time, - asn: asn, - org: org, - allowed: allowed, - denied: denied, - in_degree: in_degree, - out_degree: out_degree, + last_contact_time, + asn, + org, + allowed, + denied, + in_degree, + out_degree, }) } } @@ -316,7 +316,7 @@ impl FromRow for Neighbor { // This is done to ensure that the frontier represents live, long-lived peers to the greatest // extent possible. -const PEERDB_INITIAL_SCHEMA: &'static [&'static str] = &[ +const PEERDB_INITIAL_SCHEMA: &[&str] = &[ r#" CREATE TABLE frontier( peer_version INTEGER NOT NULL, @@ -374,10 +374,10 @@ const PEERDB_INITIAL_SCHEMA: &'static [&'static str] = &[ );"#, ]; -const PEERDB_INDEXES: &'static [&'static str] = +const PEERDB_INDEXES: &[&str] = &["CREATE INDEX IF NOT EXISTS peer_address_index ON frontier(network_id,addrbytes,port);"]; -const PEERDB_SCHEMA_2: &'static [&'static str] = &[ +const PEERDB_SCHEMA_2: &[&str] = &[ r#"PRAGMA foreign_keys = ON;"#, r#" CREATE TABLE stackerdb_peers( @@ -401,7 +401,7 @@ const PEERDB_SCHEMA_2: &'static [&'static str] = &[ "#, ]; -const PEERDB_SCHEMA_3: &'static [&'static str] = &[ +const PEERDB_SCHEMA_3: &[&str] = &[ r#" ALTER TABLE frontier ADD COLUMN public BOOL NOT NULL DEFAULT 0; "#, @@ -668,10 +668,7 @@ impl PeerDB { let conn = sqlite_open(path, open_flags, false)?; - let mut db = PeerDB { - conn: conn, - readwrite: readwrite, - }; + let mut db = PeerDB { conn, readwrite }; if create_flag { // instantiate! @@ -753,10 +750,7 @@ impl PeerDB { let conn = sqlite_open(path, open_flags, true)?; - let db = PeerDB { - conn: conn, - readwrite: readwrite, - }; + let db = PeerDB { conn, readwrite }; Ok(db) } @@ -773,10 +767,7 @@ impl PeerDB { }; let conn = sqlite_open(path, open_flags, true)?; - let db = PeerDB { - conn: conn, - readwrite: readwrite, - }; + let db = PeerDB { conn, readwrite }; Ok(db) } @@ -794,7 +785,7 @@ impl PeerDB { let conn = Connection::open_in_memory().map_err(|e| db_error::SqliteError(e))?; let mut db = PeerDB { - conn: conn, + conn, readwrite: true, }; @@ -821,7 +812,7 @@ impl PeerDB { &self.conn } - pub fn tx_begin<'a>(&'a mut self) -> Result, db_error> { + pub fn tx_begin(&mut self) -> Result, db_error> { if !self.readwrite { return Err(db_error::ReadOnly); } @@ -885,7 +876,7 @@ impl PeerDB { /// Re-key and return the new local peer pub fn rekey(&mut self, new_expire_block: u64) -> Result { - if new_expire_block > ((1 as u64) << 63) - 1 { + if new_expire_block > (1 << 63) - 1 { return Err(db_error::Overflow); } @@ -1246,14 +1237,14 @@ impl PeerDB { // we're preemptively allowing let nk = NeighborKey { peer_version: 0, - network_id: network_id, + network_id, addrbytes: peer_addr.clone(), port: peer_port, }; let empty_key = StacksPublicKey::from_private(&StacksPrivateKey::new()); let mut empty_neighbor = Neighbor::empty(&nk, &empty_key, 0); - empty_neighbor.allowed = allow_deadline as i64; + empty_neighbor.allowed = allow_deadline; debug!("Preemptively allow peer {:?}", &nk); if !PeerDB::try_insert_peer(tx, &empty_neighbor, &[])? { @@ -1292,7 +1283,7 @@ impl PeerDB { // we're preemptively denying let nk = NeighborKey { peer_version: 0, - network_id: network_id, + network_id, addrbytes: peer_addr.clone(), port: peer_port, }; @@ -2830,7 +2821,7 @@ mod test { }, public_key: Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()), expire_block: (i + 23456) as u64, - last_contact_time: (1552509642 + (i as u64)) as u64, + last_contact_time: (1552509642 + (i as u64)), allowed: (now_secs + 600) as i64, denied: -1, asn: (34567 + i) as u32, @@ -2850,7 +2841,7 @@ mod test { }, public_key: Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()), expire_block: (i + 23456) as u64, - last_contact_time: (1552509642 + (i as u64)) as u64, + last_contact_time: (1552509642 + (i as u64)), allowed: 0, denied: -1, asn: (34567 + i) as u32, @@ -2934,7 +2925,7 @@ mod test { }, public_key: Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()), expire_block: (i + 23456) as u64, - last_contact_time: (1552509642 + (i as u64)) as u64, + last_contact_time: (1552509642 + (i as u64)), allowed: -1, denied: -1, asn: (34567 + i) as u32, @@ -2955,7 +2946,7 @@ mod test { }, public_key: Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()), expire_block: (i + 23456) as u64, - last_contact_time: (1552509642 + (i as u64)) as u64, + last_contact_time: (1552509642 + (i as u64)), allowed: -1, denied: -1, asn: (34567 + i) as u32, diff --git a/stackslib/src/net/dns.rs b/stackslib/src/net/dns.rs index aedb73bd62..b610f2a156 100644 --- a/stackslib/src/net/dns.rs +++ b/stackslib/src/net/dns.rs @@ -43,9 +43,9 @@ pub struct DNSRequest { impl DNSRequest { pub fn new(host: String, port: u16, timeout: u128) -> DNSRequest { DNSRequest { - host: host, - port: port, - timeout: timeout, + host, + port, + timeout, } } @@ -56,7 +56,7 @@ impl DNSRequest { } impl Hash for DNSRequest { - fn hash(&self, state: &mut H) -> () { + fn hash(&self, state: &mut H) { self.host.hash(state); self.port.hash(state); } @@ -76,15 +76,12 @@ pub struct DNSResponse { impl DNSResponse { pub fn new(request: DNSRequest, result: Result, String>) -> DNSResponse { - DNSResponse { - request: request, - result: result, - } + DNSResponse { request, result } } pub fn error(request: DNSRequest, errstr: String) -> DNSResponse { DNSResponse { - request: request, + request, result: Err(errstr), } } @@ -122,13 +119,13 @@ impl DNSResolver { queries: VecDeque::new(), inbound: socket_chan_rx, outbound: dns_chan_tx, - max_inflight: max_inflight, + max_inflight, hardcoded: HashMap::new(), }; (resolver, client) } - pub fn add_hardcoded(&mut self, host: &str, port: u16, addrs: Vec) -> () { + pub fn add_hardcoded(&mut self, host: &str, port: u16, addrs: Vec) { self.hardcoded.insert((host.to_string(), port), addrs); } @@ -153,7 +150,7 @@ impl DNSResolver { } }; - if addrs.len() == 0 { + if addrs.is_empty() { return DNSResponse::error(req, "DNS resolve error: got zero addresses".to_string()); } test_debug!("{}:{} resolved to {:?}", &req.host, req.port, &addrs); @@ -269,7 +266,7 @@ impl DNSClient { Ok(()) } - fn clear_timeouts(&mut self) -> () { + fn clear_timeouts(&mut self) { let mut to_remove = vec![]; for req in self.requests.keys() { if req.is_timed_out() { @@ -277,7 +274,7 @@ impl DNSClient { to_remove.push(req.clone()); } } - for req in to_remove.drain(..) { + for req in to_remove.into_iter() { self.requests.insert( req.clone(), Some(DNSResponse::error(req, "DNS lookup timed out".to_string())), @@ -350,7 +347,7 @@ impl DNSClient { Ok(Some(resp)) } - pub fn clear_all_requests(&mut self) -> () { + pub fn clear_all_requests(&mut self) { self.requests.clear() } } diff --git a/stackslib/src/net/download/epoch2x.rs b/stackslib/src/net/download/epoch2x.rs index c57d9d19bc..d58321118e 100644 --- a/stackslib/src/net/download/epoch2x.rs +++ b/stackslib/src/net/download/epoch2x.rs @@ -112,14 +112,14 @@ impl BlockRequestKey { canonical_stacks_tip_height: u64, ) -> BlockRequestKey { BlockRequestKey { - neighbor: neighbor, - data_url: data_url, - consensus_hash: consensus_hash, - anchor_block_hash: anchor_block_hash, - index_block_hash: index_block_hash, - parent_block_header: parent_block_header, - parent_consensus_hash: parent_consensus_hash, - sortition_height: sortition_height, + neighbor, + data_url, + consensus_hash, + anchor_block_hash, + index_block_hash, + parent_block_header, + parent_consensus_hash, + sortition_height, download_start: get_epoch_time_secs(), kind, canonical_stacks_tip_height, @@ -267,13 +267,13 @@ impl BlockDownloader { finished_scan_at: 0, last_inv_update_at: 0, - max_inflight_requests: max_inflight_requests, + max_inflight_requests, blocks_to_try: HashMap::new(), microblocks_to_try: HashMap::new(), parsed_urls: HashMap::new(), dns_lookups: HashMap::new(), - dns_timeout: dns_timeout, + dns_timeout, getblock_requests: HashMap::new(), getmicroblocks_requests: HashMap::new(), @@ -285,13 +285,13 @@ impl BlockDownloader { broken_neighbors: vec![], blocked_urls: HashMap::new(), - download_interval: download_interval, + download_interval, requested_blocks: HashMap::new(), requested_microblocks: HashMap::new(), } } - pub fn reset(&mut self) -> () { + pub fn reset(&mut self) { debug!("Downloader reset"); self.state = BlockDownloaderState::DNSLookupBegin; @@ -313,7 +313,7 @@ impl BlockDownloader { // preserve download accounting } - pub fn restart_scan(&mut self, sortition_start: u64) -> () { + pub fn restart_scan(&mut self, sortition_start: u64) { // prepare to restart a full-chain scan for block downloads self.block_sortition_height = sortition_start; self.microblock_sortition_height = sortition_start; @@ -327,15 +327,15 @@ impl BlockDownloader { &mut self, pox_id: &PoxId, dns_client: &mut DNSClient, - mut urls: Vec, + urls: Vec, ) -> Result<(), net_error> { assert_eq!(self.state, BlockDownloaderState::DNSLookupBegin); // optimistic concurrency control: remember the current PoX Id self.pox_id = pox_id.clone(); self.dns_lookups.clear(); - for url_str in urls.drain(..) { - if url_str.len() == 0 { + for url_str in urls.into_iter() { + if url_str.is_empty() { continue; } let url = url_str.parse_to_block_url()?; // NOTE: should always succeed, since a UrlString shouldn't decode unless it's a valid URL or the empty string @@ -418,7 +418,7 @@ impl BlockDownloader { Ok(inflight == 0) } - pub fn getblocks_begin(&mut self, requests: HashMap) -> () { + pub fn getblocks_begin(&mut self, requests: HashMap) { assert_eq!(self.state, BlockDownloaderState::GetBlocksBegin); // don't touch blocks-to-try -- that's managed by the peer network directly. @@ -537,7 +537,7 @@ impl BlockDownloader { }); // are we done? - if pending_block_requests.len() == 0 { + if pending_block_requests.is_empty() { self.state = BlockDownloaderState::GetMicroblocksBegin; return Ok(true); } @@ -550,7 +550,7 @@ impl BlockDownloader { } /// Start fetching microblocks - pub fn getmicroblocks_begin(&mut self, requests: HashMap) -> () { + pub fn getmicroblocks_begin(&mut self, requests: HashMap) { assert_eq!(self.state, BlockDownloaderState::GetMicroblocksBegin); self.getmicroblocks_requests = requests; @@ -626,7 +626,7 @@ impl BlockDownloader { Some(http_response) => { match StacksHttpResponse::decode_microblocks(http_response) { Ok(microblocks) => { - if microblocks.len() == 0 { + if microblocks.is_empty() { // we wouldn't have asked for a 0-length stream info!("Got unexpected zero-length microblock stream from {:?} ({:?})", &block_key.neighbor, &block_key.data_url; "consensus_hash" => %block_key.consensus_hash @@ -675,7 +675,7 @@ impl BlockDownloader { }); // are we done? - if pending_microblock_requests.len() == 0 { + if pending_microblock_requests.is_empty() { self.state = BlockDownloaderState::Done; return Ok(true); } @@ -910,7 +910,7 @@ impl BlockDownloader { block_sortition_height: u64, ibd: bool, force: bool, - ) -> () { + ) { if force || (ibd && self.state == BlockDownloaderState::DNSLookupBegin) || (self.empty_block_download_passes > 0 @@ -945,7 +945,7 @@ impl BlockDownloader { mblock_sortition_height: u64, ibd: bool, force: bool, - ) -> () { + ) { if force || (ibd && self.state == BlockDownloaderState::DNSLookupBegin) || (self.empty_microblock_download_passes > 0 @@ -972,7 +972,7 @@ impl BlockDownloader { } /// Set a hint that we should re-scan for blocks - pub fn hint_download_rescan(&mut self, target_sortition_height: u64, ibd: bool) -> () { + pub fn hint_download_rescan(&mut self, target_sortition_height: u64, ibd: bool) { self.hint_block_sortition_height_available(target_sortition_height, ibd, false); self.hint_microblock_sortition_height_available(target_sortition_height, ibd, false); } @@ -997,7 +997,7 @@ impl BlockDownloader { if microblocks { // being requested now? for (_, reqs) in self.microblocks_to_try.iter() { - if reqs.len() > 0 { + if !reqs.is_empty() { if reqs[0].index_block_hash == *index_hash { return true; } @@ -1012,7 +1012,7 @@ impl BlockDownloader { } } else { for (_, reqs) in self.blocks_to_try.iter() { - if reqs.len() > 0 { + if !reqs.is_empty() { if reqs[0].index_block_hash == *index_hash { return true; } @@ -1048,7 +1048,7 @@ impl PeerNetwork { } /// Pass a hint to the downloader to re-scan - pub fn hint_download_rescan(&mut self, target_height: u64, ibd: bool) -> () { + pub fn hint_download_rescan(&mut self, target_height: u64, ibd: bool) { match self.block_downloader { Some(ref mut dl) => dl.hint_download_rescan(target_height, ibd), None => {} @@ -1060,10 +1060,10 @@ impl PeerNetwork { match self.events.get(neighbor_key) { Some(ref event_id) => match self.peers.get(event_id) { Some(ref convo) => { - if convo.data_url.len() > 0 { - Some(convo.data_url.clone()) - } else { + if convo.data_url.is_empty() { None + } else { + Some(convo.data_url.clone()) } } None => None, @@ -1214,7 +1214,7 @@ impl PeerNetwork { start_sortition_height + scan_batch_size ); - let mut availability = + let availability = PeerNetwork::with_inv_state(self, |ref mut network, ref mut inv_state| { BlockDownloader::get_block_availability( &network.local_peer, @@ -1240,7 +1240,7 @@ impl PeerNetwork { ); for (i, (consensus_hash, block_hash_opt, mut neighbors)) in - availability.drain(..).enumerate() + availability.into_iter().enumerate() { test_debug!( "{:?}: consider availability of {}/{:?}", @@ -1255,11 +1255,8 @@ impl PeerNetwork { break; } - let block_hash = match block_hash_opt { - Some(h) => h, - None => { - continue; - } + let Some(block_hash) = block_hash_opt else { + continue; }; let mut parent_block_header_opt = None; @@ -1450,18 +1447,15 @@ impl PeerNetwork { (&mut neighbors[..]).shuffle(&mut thread_rng()); let mut requests = VecDeque::new(); - for nk in neighbors.drain(..) { - let data_url = match self.get_data_url(&nk) { - Some(url) => url, - None => { - debug!( - "{:?}: Unable to request {} from {}: no data URL", - &self.local_peer, &target_index_block_hash, &nk - ); - continue; - } + for nk in neighbors.into_iter() { + let Some(data_url) = self.get_data_url(&nk) else { + debug!( + "{:?}: Unable to request {} from {}: no data URL", + &self.local_peer, &target_index_block_hash, &nk + ); + continue; }; - if data_url.len() == 0 { + if data_url.is_empty() { // peer doesn't yet know its public IP address, and isn't given a data URL // directly debug!( @@ -1584,7 +1578,7 @@ impl PeerNetwork { let (need_blocks, block_sortition_height, microblock_sortition_height) = match self.block_downloader { Some(ref mut downloader) => ( - downloader.blocks_to_try.len() == 0, + downloader.blocks_to_try.is_empty(), downloader.block_sortition_height, downloader.microblock_sortition_height, ), @@ -1659,7 +1653,7 @@ impl PeerNetwork { } } - if next_microblocks_to_try.len() == 0 { + if next_microblocks_to_try.is_empty() { // have no microblocks to try in the first place, so just advance to the // next batch debug!( @@ -1711,7 +1705,7 @@ impl PeerNetwork { let requests = next_blocks_to_try.remove(&height).expect( "BUG: hashmap both contains and does not contain sortition height", ); - if requests.len() == 0 { + if requests.is_empty() { height += 1; continue; } @@ -1773,7 +1767,7 @@ impl PeerNetwork { let requests = next_microblocks_to_try.remove(&mblock_height).expect( "BUG: hashmap both contains and does not contain sortition height", ); - if requests.len() == 0 { + if requests.is_empty() { debug!("No microblock requests for {}", mblock_height); mblock_height += 1; continue; @@ -1849,7 +1843,7 @@ impl PeerNetwork { } } - if downloader.blocks_to_try.len() == 0 { + if downloader.blocks_to_try.is_empty() { // nothing in this range, so advance sortition range to try for next time next_block_sortition_height = next_block_sortition_height + (network.burnchain.pox_constants.reward_cycle_length as u64); @@ -1858,7 +1852,7 @@ impl PeerNetwork { &network.local_peer, next_block_sortition_height ); } - if downloader.microblocks_to_try.len() == 0 { + if downloader.microblocks_to_try.is_empty() { // nothing in this range, so advance sortition range to try for next time next_microblock_sortition_height = next_microblock_sortition_height + (network.burnchain.pox_constants.reward_cycle_length as u64); @@ -1926,7 +1920,7 @@ impl PeerNetwork { match requestables.pop_front() { Some(requestable) => { if let Some(Some(ref sockaddrs)) = dns_lookups.get(requestable.get_url()) { - assert!(sockaddrs.len() > 0); + assert!(!sockaddrs.is_empty()); let peerhost = match PeerHost::try_from_url(requestable.get_url()) { Some(ph) => ph, @@ -1983,9 +1977,9 @@ impl PeerNetwork { pub fn block_getblocks_begin(&mut self) -> Result<(), net_error> { test_debug!("{:?}: block_getblocks_begin", &self.local_peer); PeerNetwork::with_downloader_state(self, |ref mut network, ref mut downloader| { - let mut priority = PeerNetwork::prioritize_requests(&downloader.blocks_to_try); + let priority = PeerNetwork::prioritize_requests(&downloader.blocks_to_try); let mut requests = HashMap::new(); - for sortition_height in priority.drain(..) { + for sortition_height in priority.into_iter() { match downloader.blocks_to_try.get_mut(&sortition_height) { Some(ref mut keys) => { match PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) { @@ -2021,9 +2015,9 @@ impl PeerNetwork { pub fn block_getmicroblocks_begin(&mut self) -> Result<(), net_error> { test_debug!("{:?}: block_getmicroblocks_begin", &self.local_peer); PeerNetwork::with_downloader_state(self, |ref mut network, ref mut downloader| { - let mut priority = PeerNetwork::prioritize_requests(&downloader.microblocks_to_try); + let priority = PeerNetwork::prioritize_requests(&downloader.microblocks_to_try); let mut requests = HashMap::new(); - for sortition_height in priority.drain(..) { + for sortition_height in priority.into_iter() { match downloader.microblocks_to_try.get_mut(&sortition_height) { Some(ref mut keys) => { match PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) { @@ -2182,21 +2176,21 @@ impl PeerNetwork { let mut microblocks_empty = vec![]; for (height, requests) in downloader.blocks_to_try.iter() { - if requests.len() == 0 { + if requests.is_empty() { blocks_empty.push(*height); } } for (height, requests) in downloader.microblocks_to_try.iter() { - if requests.len() == 0 { + if requests.is_empty() { microblocks_empty.push(*height); } } - for height in blocks_empty.drain(..) { + for height in blocks_empty.into_iter() { downloader.blocks_to_try.remove(&height); } - for height in microblocks_empty.drain(..) { + for height in microblocks_empty.into_iter() { downloader.microblocks_to_try.remove(&height); } @@ -2278,9 +2272,8 @@ impl PeerNetwork { debug!("Re-trying blocks:"); for (height, requests) in downloader.blocks_to_try.iter() { assert!( - requests.len() > 0, - "Empty block requests at height {}", - height + !requests.is_empty(), + "Empty block requests at height {height}" ); debug!( " Height {}: anchored block {} available from {} peers: {:?}", @@ -2295,9 +2288,8 @@ impl PeerNetwork { } for (height, requests) in downloader.microblocks_to_try.iter() { assert!( - requests.len() > 0, - "Empty microblock requests at height {}", - height + !requests.is_empty(), + "Empty microblock requests at height {height}" ); debug!( " Height {}: microblocks {} available from {} peers: {:?}", @@ -2319,7 +2311,7 @@ impl PeerNetwork { } /// Initialize the downloader - pub fn init_block_downloader(&mut self) -> () { + pub fn init_block_downloader(&mut self) { self.block_downloader = Some(BlockDownloader::new( self.connection_opts.dns_timeout, self.connection_opts.download_interval, @@ -2328,7 +2320,7 @@ impl PeerNetwork { } /// Initialize the attachment downloader - pub fn init_attachments_downloader(&mut self, initial_batch: Vec) -> () { + pub fn init_attachments_downloader(&mut self, initial_batch: Vec) { self.attachments_downloader = Some(AttachmentsDownloader::new(initial_batch)); } diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index e2716e8252..1d4d680c43 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -355,16 +355,13 @@ impl NakamotoTenureDownloader { /// Determine how many blocks must be in this tenure. /// Returns None if we don't have the start and end blocks yet. pub fn tenure_length(&self) -> Option { - self.tenure_end_block - .as_ref() - .map(|tenure_end_block| { - let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { - return None; - }; - - Some(u64::from(tc_payload.previous_tenure_blocks)) - }) - .flatten() + self.tenure_end_block.as_ref().and_then(|tenure_end_block| { + let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { + return None; + }; + + Some(u64::from(tc_payload.previous_tenure_blocks)) + }) } /// Add downloaded tenure blocks to this machine. diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 08714f5cbf..e5b796181a 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -687,11 +687,11 @@ impl NakamotoTenureDownloaderSet { self.clear_downloader(naddr); } } - for done_naddr in finished.drain(..) { + for done_naddr in finished.into_iter() { debug!("Remove finished downloader for {done_naddr}"); self.clear_downloader(&done_naddr); } - for done_tenure in finished_tenures.drain(..) { + for done_tenure in finished_tenures.into_iter() { self.completed_tenures.insert(done_tenure); } diff --git a/stackslib/src/net/http/request.rs b/stackslib/src/net/http/request.rs index 9231071e18..6535f4a14a 100644 --- a/stackslib/src/net/http/request.rs +++ b/stackslib/src/net/http/request.rs @@ -66,13 +66,13 @@ impl HttpRequestPreamble { keep_alive: bool, ) -> HttpRequestPreamble { HttpRequestPreamble { - version: version, - verb: verb, + version, + verb, path_and_query_str, host: PeerHost::from_host_port(hostname, port), content_type: None, content_length: None, - keep_alive: keep_alive, + keep_alive, headers: BTreeMap::new(), } } @@ -98,7 +98,7 @@ impl HttpRequestPreamble { ) -> HttpRequestPreamble { HttpRequestPreamble { version: HttpVersion::Http11, - verb: verb, + verb, path_and_query_str, host: peerhost, content_type: None, @@ -443,14 +443,14 @@ impl StacksMessageCodec for HttpRequestPreamble { }; Ok(HttpRequestPreamble { - version: version, - verb: verb, + version, + verb, path_and_query_str, host: peerhost.unwrap(), - content_type: content_type, - content_length: content_length, - keep_alive: keep_alive, - headers: headers, + content_type, + content_length, + keep_alive, + headers, }) } } diff --git a/stackslib/src/net/http/response.rs b/stackslib/src/net/http/response.rs index 77bcaa730f..3ebed7e9d2 100644 --- a/stackslib/src/net/http/response.rs +++ b/stackslib/src/net/http/response.rs @@ -133,7 +133,7 @@ impl HttpResponseContents { HttpResponseContents::RAM(ref mut buf) => { // dump directly into the pipewrite // TODO: zero-copy? - if buf.len() > 0 { + if !buf.is_empty() { fd.write_all(&buf[..]).map_err(Error::WriteError)?; buf.clear(); } @@ -159,12 +159,12 @@ impl HttpResponsePreamble { keep_alive: bool, ) -> HttpResponsePreamble { HttpResponsePreamble { - client_http_version: client_http_version, - status_code: status_code, - reason: reason, - keep_alive: keep_alive, + client_http_version, + status_code, + reason, + keep_alive, content_length: content_length_opt, - content_type: content_type, + content_type, headers: BTreeMap::new(), } } @@ -280,7 +280,7 @@ impl HttpResponsePreamble { /// Add a header. /// Reserved headers will not be directly added to self.headers. - pub fn add_header(&mut self, key: String, value: String) -> () { + pub fn add_header(&mut self, key: String, value: String) { let hdr = key.to_lowercase(); if HttpReservedHeader::is_reserved(&hdr) { match HttpReservedHeader::try_from_str(&hdr, &value) { @@ -336,7 +336,7 @@ impl HttpResponsePreamble { } } - pub fn add_CORS_headers(&mut self) -> () { + pub fn add_CORS_headers(&mut self) { self.headers .insert("Access-Control-Allow-Origin".to_string(), "*".to_string()); } @@ -590,12 +590,12 @@ impl StacksMessageCodec for HttpResponsePreamble { Ok(HttpResponsePreamble { client_http_version, - status_code: status_code, - reason: reason, - keep_alive: keep_alive, + status_code, + reason, + keep_alive, content_type: content_type.unwrap_or(HttpContentType::Bytes), // per the RFC - content_length: content_length, - headers: headers, + content_length, + headers, }) } } diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 9b2dd1e106..1688b95b25 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -59,10 +59,10 @@ use crate::net::{Error as NetError, MessageSequence, ProtocolFamily, StacksNodeS const CHUNK_BUF_LEN: usize = 32768; /// canonical stacks tip height header -pub const STACKS_HEADER_HEIGHT: &'static str = "X-Canonical-Stacks-Tip-Height"; +pub const STACKS_HEADER_HEIGHT: &str = "X-Canonical-Stacks-Tip-Height"; /// request ID header -pub const STACKS_REQUEST_ID: &'static str = "X-Request-Id"; +pub const STACKS_REQUEST_ID: &str = "X-Request-Id"; /// Request ID to use or expect from non-Stacks HTTP clients. /// In particular, if a HTTP response does not contain the x-request-id header, then it's assumed @@ -80,12 +80,12 @@ pub enum TipRequest { impl TipRequest {} -impl ToString for TipRequest { - fn to_string(&self) -> String { +impl fmt::Display for TipRequest { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Self::UseLatestAnchoredTip => "".to_string(), - Self::UseLatestUnconfirmedTip => "latest".to_string(), - Self::SpecificTip(ref tip) => format!("{}", tip), + Self::UseLatestAnchoredTip => write!(f, ""), + Self::UseLatestUnconfirmedTip => write!(f, "latest"), + Self::SpecificTip(ref tip) => write!(f, "{tip}"), } } } @@ -316,7 +316,7 @@ impl HttpRequestContentsExtensions for HttpRequestContents { /// Use a particular tip request fn for_tip(mut self, tip_req: TipRequest) -> Self { if tip_req != TipRequest::UseLatestAnchoredTip { - self.query_arg("tip".to_string(), format!("{}", &tip_req.to_string())) + self.query_arg("tip".to_string(), tip_req.to_string()) } else { let _ = self.take_query_arg(&"tip".to_string()); self @@ -475,11 +475,11 @@ impl StacksHttpRequest { } let (decoded_path, _) = decode_request_path(&preamble.path_and_query_str)?; let full_query_string = contents.get_full_query_string(); - if full_query_string.len() > 0 { - preamble.path_and_query_str = format!("{}?{}", &decoded_path, &full_query_string); + preamble.path_and_query_str = if full_query_string.is_empty() { + decoded_path } else { - preamble.path_and_query_str = decoded_path; - } + format!("{decoded_path}?{full_query_string}") + }; Ok(Self { preamble, @@ -1039,7 +1039,7 @@ impl StacksHttp { let payload = match handler.try_parse_request( preamble, &captures, - if query.len() > 0 { Some(&query) } else { None }, + if query.is_empty() { None } else { Some(&query) }, body, ) { Ok(p) => p, @@ -1078,7 +1078,7 @@ impl StacksHttp { let payload = match request.try_parse_request( preamble, &captures, - if query.len() > 0 { Some(&query) } else { None }, + if query.is_empty() { None } else { Some(&query) }, body, ) { Ok(p) => p, @@ -1256,7 +1256,7 @@ impl StacksHttp { } /// Clear any pending response state -- i.e. due to a failed request. - fn reset(&mut self) -> () { + fn reset(&mut self) { self.request_handler_index = None; self.reply = None; } diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index bbdd8f68ae..915d7ae419 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -82,7 +82,7 @@ impl PeerBlocksInv { num_sortitions: 0, num_reward_cycles: 0, last_updated_at: 0, - first_block_height: first_block_height, + first_block_height, } } @@ -96,13 +96,13 @@ impl PeerBlocksInv { ) -> PeerBlocksInv { assert_eq!(block_inv.len(), microblocks_inv.len()); PeerBlocksInv { - block_inv: block_inv, - microblocks_inv: microblocks_inv, - num_sortitions: num_sortitions, - num_reward_cycles: num_reward_cycles, - pox_inv: pox_inv, + block_inv, + microblocks_inv, + num_sortitions, + num_reward_cycles, + pox_inv, last_updated_at: get_epoch_time_secs(), - first_block_height: first_block_height, + first_block_height, } } @@ -175,8 +175,8 @@ impl PeerBlocksInv { assert!(block_height >= self.first_block_height); let sortition_height = block_height - self.first_block_height; - self.num_sortitions = if self.num_sortitions < sortition_height + (bitlen as u64) { - sortition_height + (bitlen as u64) + self.num_sortitions = if self.num_sortitions < sortition_height + bitlen { + sortition_height + bitlen } else { self.num_sortitions }; @@ -541,7 +541,7 @@ impl NeighborBlockStats { is_bootstrap_peer: bool, ) -> NeighborBlockStats { NeighborBlockStats { - nk: nk, + nk, inv: PeerBlocksInv::empty(first_block_height), pox_reward_cycle: 0, block_reward_cycle: 0, @@ -558,7 +558,7 @@ impl NeighborBlockStats { learned_data: false, learned_data_height: u64::MAX, scans: 0, - is_bootstrap_peer: is_bootstrap_peer, + is_bootstrap_peer, } } @@ -983,11 +983,11 @@ impl InvState { InvState { block_stats: HashMap::new(), - request_timeout: request_timeout, - first_block_height: first_block_height, + request_timeout, + first_block_height, last_change_at: 0, - sync_interval: sync_interval, + sync_interval, hint_learned_data: false, hint_learned_data_height: u64::MAX, @@ -1007,7 +1007,7 @@ impl InvState { peers: HashSet, bootstrap_peers: &HashSet, max_neighbors: usize, - ) -> () { + ) { for (nk, stats) in self.block_stats.iter_mut() { if stats.status != NodeStatus::Online { stats.status = NodeStatus::Online; @@ -1167,14 +1167,14 @@ impl InvState { } #[cfg(test)] - pub fn add_peer(&mut self, nk: NeighborKey, is_bootstrap_peer: bool) -> () { + pub fn add_peer(&mut self, nk: NeighborKey, is_bootstrap_peer: bool) { self.block_stats.insert( nk.clone(), NeighborBlockStats::new(nk, self.first_block_height, is_bootstrap_peer), ); } - pub fn del_peer(&mut self, nk: &NeighborKey) -> () { + pub fn del_peer(&mut self, nk: &NeighborKey) { self.block_stats.remove(&nk); } @@ -1768,9 +1768,8 @@ impl PeerNetwork { ) -> Result, net_error> { if stats.block_reward_cycle <= stats.inv.num_reward_cycles { self.make_getblocksinv(sortdb, nk, stats, stats.block_reward_cycle) - .and_then(|getblocksinv_opt| { - Ok(getblocksinv_opt - .map(|getblocksinv| (stats.block_reward_cycle, getblocksinv))) + .map(|getblocksinv_opt| { + getblocksinv_opt.map(|getblocksinv| (stats.block_reward_cycle, getblocksinv)) }) } else { Ok(None) @@ -2167,13 +2166,13 @@ impl PeerNetwork { let again = match stats.state { InvWorkState::GetPoxInvBegin => self .inv_getpoxinv_begin(pins, sortdb, nk, stats, request_timeout) - .and_then(|_| Ok(true))?, + .map(|_| true)?, InvWorkState::GetPoxInvFinish => { self.inv_getpoxinv_try_finish(sortdb, nk, stats, ibd)? } InvWorkState::GetBlocksInvBegin => self .inv_getblocksinv_begin(pins, sortdb, nk, stats, request_timeout) - .and_then(|_| Ok(true))?, + .map(|_| true)?, InvWorkState::GetBlocksInvFinish => { self.inv_getblocksinv_try_finish(nk, stats, ibd)? } @@ -2402,7 +2401,7 @@ impl PeerNetwork { &network.local_peer, inv_state.block_sortition_start, ); - if !inv_state.hint_learned_data && inv_state.block_stats.len() > 0 { + if !inv_state.hint_learned_data && !inv_state.block_stats.is_empty() { // did a full scan without learning anything new inv_state.last_rescanned_at = get_epoch_time_secs(); inv_state.hint_do_rescan = false; @@ -2561,7 +2560,7 @@ impl PeerNetwork { } /// Initialize inv state - pub fn init_inv_sync_epoch2x(&mut self, sortdb: &SortitionDB) -> () { + pub fn init_inv_sync_epoch2x(&mut self, sortdb: &SortitionDB) { // find out who we'll be synchronizing with for the duration of this inv sync debug!( "{:?}: Initializing peer block inventory state", @@ -2729,7 +2728,7 @@ impl PeerNetwork { // always-allowed peer? let mut finished_always_allowed_inv_sync = false; - if always_allowed.len() == 0 { + if always_allowed.is_empty() { // vacuously, we are done so we can return return true; } diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index e832b70184..c103f16eb7 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -241,7 +241,7 @@ impl InvGenerator { tenure_id_consensus_hash: &ConsensusHash, ) -> Result, NetError> { let tip_block_id = StacksBlockId::new(tip_block_ch, tip_block_bh); - if self.processed_tenures.get(&tip_block_id).is_none() { + if !self.processed_tenures.contains_key(&tip_block_id) { // this tip has no known table. // does it have an ancestor with a table? If so, then move its ancestor's table to this // tip. Otherwise, make a new table. diff --git a/stackslib/src/net/mempool/mod.rs b/stackslib/src/net/mempool/mod.rs index 2a4232ad2f..27253180d4 100644 --- a/stackslib/src/net/mempool/mod.rs +++ b/stackslib/src/net/mempool/mod.rs @@ -170,7 +170,7 @@ impl MempoolSync { continue; } // has a data URL? - if convo.data_url.len() == 0 { + if convo.data_url.is_empty() { continue; } // already resolved? diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 4af4d2a397..58ab1f0b03 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -637,7 +637,7 @@ pub struct RPCHandlerArgs<'a> { pub coord_comms: Option<&'a CoordinatorChannels>, } -impl<'a> RPCHandlerArgs<'a> { +impl RPCHandlerArgs<'_> { pub fn get_estimators_ref( &self, ) -> Option<(&dyn CostEstimator, &dyn FeeEstimator, &dyn CostMetric)> { @@ -982,7 +982,7 @@ impl fmt::Debug for NeighborAddress { } impl NeighborAddress { - pub fn clear_public_key(&mut self) -> () { + pub fn clear_public_key(&mut self) { self.public_key_hash = Hash160([0u8; 20]); } @@ -1384,8 +1384,8 @@ impl NeighborKey { na: &NeighborAddress, ) -> NeighborKey { NeighborKey { - peer_version: peer_version, - network_id: network_id, + peer_version, + network_id, addrbytes: na.addrbytes.clone(), port: na.port, } @@ -1558,9 +1558,9 @@ impl NetworkResult { attachments: vec![], synced_transactions: vec![], stacker_db_sync_results: vec![], - num_state_machine_passes: num_state_machine_passes, - num_inv_sync_passes: num_inv_sync_passes, - num_download_passes: num_download_passes, + num_state_machine_passes, + num_inv_sync_passes, + num_download_passes, num_connected_peers, burn_height, coinbase_height, @@ -1581,33 +1581,28 @@ impl NetworkResult { let pushed_blocks: HashSet<_> = self .pushed_blocks .iter() - .map(|(_, block_list)| { - block_list - .iter() - .map(|block_data| { - block_data - .blocks - .iter() - .map(|block_datum| { - StacksBlockId::new(&block_datum.0, &block_datum.1.block_hash()) - }) - .collect::>() - }) - .flatten() + .flat_map(|(_, block_list)| { + block_list.iter().flat_map(|block_data| { + block_data + .blocks + .iter() + .map(|block_datum| { + StacksBlockId::new(&block_datum.0, &block_datum.1.block_hash()) + }) + .collect::>() + }) }) - .flatten() .collect(); let uploaded_blocks: HashSet<_> = self .uploaded_blocks .iter() - .map(|blk_data| { + .flat_map(|blk_data| { blk_data .blocks .iter() .map(|blk| StacksBlockId::new(&blk.0, &blk.1.block_hash())) }) - .flatten() .collect(); blocks.extend(pushed_blocks.into_iter()); @@ -1620,32 +1615,26 @@ impl NetworkResult { let mut mblocks: HashSet<_> = self .confirmed_microblocks .iter() - .map(|(_, mblocks, _)| mblocks.iter().map(|mblk| mblk.block_hash())) - .flatten() + .flat_map(|(_, mblocks, _)| mblocks.iter().map(|mblk| mblk.block_hash())) .collect(); let pushed_microblocks: HashSet<_> = self .pushed_microblocks .iter() - .map(|(_, mblock_list)| { - mblock_list - .iter() - .map(|(_, mblock_data)| { - mblock_data - .microblocks - .iter() - .map(|mblock| mblock.block_hash()) - }) - .flatten() + .flat_map(|(_, mblock_list)| { + mblock_list.iter().flat_map(|(_, mblock_data)| { + mblock_data + .microblocks + .iter() + .map(|mblock| mblock.block_hash()) + }) }) - .flatten() .collect(); let uploaded_microblocks: HashSet<_> = self .uploaded_microblocks .iter() - .map(|mblk_data| mblk_data.microblocks.iter().map(|mblk| mblk.block_hash())) - .flatten() + .flat_map(|mblk_data| mblk_data.microblocks.iter().map(|mblk| mblk.block_hash())) .collect(); mblocks.extend(pushed_microblocks.into_iter()); @@ -1800,7 +1789,7 @@ impl NetworkResult { } retain }); - mblocks.len() > 0 + !mblocks.is_empty() }); newer .confirmed_microblocks @@ -1828,7 +1817,7 @@ impl NetworkResult { } retain }); - if tx_data.len() == 0 { + if tx_data.is_empty() { continue; } @@ -1850,9 +1839,9 @@ impl NetworkResult { } retain }); - block_data.blocks.len() > 0 + !block_data.blocks.is_empty() }); - if block_list.len() == 0 { + if block_list.is_empty() { continue; } @@ -1873,9 +1862,9 @@ impl NetworkResult { } retain }); - mblock_data.microblocks.len() > 0 + !mblock_data.microblocks.is_empty() }); - if microblock_data.len() == 0 { + if microblock_data.is_empty() { continue; } @@ -1896,9 +1885,9 @@ impl NetworkResult { } retain }); - naka_blocks.blocks.len() > 0 + !naka_blocks.blocks.is_empty() }); - if nakamoto_block_data.len() == 0 { + if nakamoto_block_data.is_empty() { continue; } @@ -1927,7 +1916,7 @@ impl NetworkResult { retain }); - blk_data.blocks.len() > 0 + !blk_data.blocks.is_empty() }); self.uploaded_microblocks.retain_mut(|ref mut mblock_data| { mblock_data.microblocks.retain(|mblk| { @@ -1938,7 +1927,7 @@ impl NetworkResult { retain }); - mblock_data.microblocks.len() > 0 + !mblock_data.microblocks.is_empty() }); self.uploaded_nakamoto_blocks.retain(|nblk| { let retain = !newer_naka_blocks.contains(&nblk.block_id()); @@ -2067,38 +2056,37 @@ impl NetworkResult { } pub fn has_blocks(&self) -> bool { - self.blocks.len() > 0 || self.pushed_blocks.len() > 0 + !self.blocks.is_empty() || !self.pushed_blocks.is_empty() } pub fn has_microblocks(&self) -> bool { - self.confirmed_microblocks.len() > 0 - || self.pushed_microblocks.len() > 0 - || self.uploaded_microblocks.len() > 0 + !self.confirmed_microblocks.is_empty() + || !self.pushed_microblocks.is_empty() + || !self.uploaded_microblocks.is_empty() } pub fn has_nakamoto_blocks(&self) -> bool { - self.nakamoto_blocks.len() > 0 - || self.pushed_nakamoto_blocks.len() > 0 - || self.uploaded_nakamoto_blocks.len() > 0 + !self.nakamoto_blocks.is_empty() + || !self.pushed_nakamoto_blocks.is_empty() + || !self.uploaded_nakamoto_blocks.is_empty() } pub fn has_transactions(&self) -> bool { - self.pushed_transactions.len() > 0 - || self.uploaded_transactions.len() > 0 - || self.synced_transactions.len() > 0 + !self.pushed_transactions.is_empty() + || !self.uploaded_transactions.is_empty() + || !self.synced_transactions.is_empty() } pub fn has_attachments(&self) -> bool { - self.attachments.len() > 0 + !self.attachments.is_empty() } pub fn has_stackerdb_chunks(&self) -> bool { self.stacker_db_sync_results .iter() - .fold(0, |acc, x| acc + x.chunks_to_store.len()) - > 0 - || self.uploaded_stackerdb_chunks.len() > 0 - || self.pushed_stackerdb_chunks.len() > 0 + .any(|x| !x.chunks_to_store.is_empty()) + || !self.uploaded_stackerdb_chunks.is_empty() + || !self.pushed_stackerdb_chunks.is_empty() } pub fn transactions(&self) -> Vec { @@ -2180,7 +2168,7 @@ impl NetworkResult { } } - pub fn consume_http_uploads(&mut self, msgs: Vec) -> () { + pub fn consume_http_uploads(&mut self, msgs: Vec) { for msg in msgs.into_iter() { match msg { StacksMessageType::Transaction(tx_data) => { @@ -2345,23 +2333,23 @@ pub mod test { } } - pub fn close(&mut self) -> () { + pub fn close(&mut self) { self.closed = true; } - pub fn block(&mut self) -> () { + pub fn block(&mut self) { self.block = true; } - pub fn unblock(&mut self) -> () { + pub fn unblock(&mut self) { self.block = false; } - pub fn set_read_error(&mut self, e: Option) -> () { + pub fn set_read_error(&mut self, e: Option) { self.read_error = e; } - pub fn set_write_error(&mut self, e: Option) -> () { + pub fn set_write_error(&mut self, e: Option) { self.write_error = e; } } @@ -2639,7 +2627,7 @@ pub mod test { private_key_expire: start_block + conn_opts.private_key_lifetime, initial_neighbors: vec![], asn4_entries: vec![], - burnchain: burnchain, + burnchain, connection_opts: conn_opts, server_port: 32000, http_port: 32001, @@ -2651,7 +2639,7 @@ pub mod test { test_name: "".into(), initial_balances: vec![], initial_lockups: vec![], - spending_account: spending_account, + spending_account, setup_code: "".into(), epochs: None, check_pox_invariants: None, @@ -2691,7 +2679,7 @@ pub mod test { config } - pub fn add_neighbor(&mut self, n: &Neighbor) -> () { + pub fn add_neighbor(&mut self, n: &Neighbor) { self.initial_neighbors.push(n.clone()); } @@ -2973,7 +2961,7 @@ pub mod test { debug!("Not setting aggregate public key"); } // add test-specific boot code - if conf.setup_code.len() > 0 { + if !conf.setup_code.is_empty() { let receipt = clarity_tx.connection().as_transaction(|clarity| { let boot_code_addr = boot_code_test_addr(); let boot_code_account = StacksAccount { @@ -3040,7 +3028,7 @@ pub mod test { if !config.initial_lockups.is_empty() { let lockups = config.initial_lockups.clone(); boot_data.get_bulk_initial_lockups = - Some(Box::new(move || Box::new(lockups.into_iter().map(|e| e)))); + Some(Box::new(move || Box::new(lockups.into_iter()))); } let (chainstate, _) = StacksChainState::open_and_exec( @@ -3204,15 +3192,15 @@ pub mod test { peer_network.local_peer = local_peer; TestPeer { - config: config, + config, network: peer_network, sortdb: Some(sortdb), miner, stacks_node: Some(stacks_node), - relayer: relayer, + relayer, mempool: Some(mempool), - chainstate_path: chainstate_path, - coord: coord, + chainstate_path, + coord, indexer: Some(indexer), malleablized_blocks: vec![], mine_malleablized_blocks: true, @@ -3240,7 +3228,7 @@ pub mod test { self.network.chain_view = chain_view; for n in self.config.initial_neighbors.iter() { - self.network.connect_peer(&n.addr).and_then(|e| Ok(()))?; + self.network.connect_peer(&n.addr).map(|_| ())?; } Ok(()) } @@ -3383,7 +3371,7 @@ pub mod test { self.coord.handle_new_stacks_block().unwrap(); self.coord.handle_new_nakamoto_stacks_block().unwrap(); - receipts_res.and_then(|receipts| Ok((net_result, receipts))) + receipts_res.map(|receipts| (net_result, receipts)) } pub fn step_dns(&mut self, dns_client: &mut DNSClient) -> Result { @@ -3852,7 +3840,7 @@ pub mod test { &mut self, microblocks: &Vec, ) -> Result { - assert!(microblocks.len() > 0); + assert!(!microblocks.is_empty()); let sortdb = self.sortdb.take().unwrap(); let mut node = self.stacks_node.take().unwrap(); let res = { @@ -3904,7 +3892,7 @@ pub mod test { &mut self, block: &StacksBlock, microblocks: &Vec, - ) -> () { + ) { let sortdb = self.sortdb.take().unwrap(); let mut node = self.stacks_node.take().unwrap(); { @@ -3988,7 +3976,7 @@ pub mod test { block: &StacksBlock, consensus_hash: &ConsensusHash, microblocks: &Vec, - ) -> () { + ) { let sortdb = self.sortdb.take().unwrap(); let mut node = self.stacks_node.take().unwrap(); { @@ -4503,7 +4491,7 @@ pub mod test { view_res } - pub fn dump_frontier(&self) -> () { + pub fn dump_frontier(&self) { let conn = self.network.peerdb.conn(); let peers = PeerDB::get_all_peers(conn).unwrap(); debug!("--- BEGIN ALL PEERS ({}) ---", peers.len()); diff --git a/stackslib/src/net/neighbors/comms.rs b/stackslib/src/net/neighbors/comms.rs index ed0e03f5c6..e41295704c 100644 --- a/stackslib/src/net/neighbors/comms.rs +++ b/stackslib/src/net/neighbors/comms.rs @@ -145,7 +145,7 @@ pub trait NeighborComms { self.remove_connecting(network, &nk); return self .neighbor_handshake(network, &nk) - .and_then(|handle| Ok(Some(handle))); + .map(|handle| Some(handle)); } if let Some(event_id) = self.get_connecting(network, &nk) { @@ -201,7 +201,7 @@ pub trait NeighborComms { self.remove_connecting(network, &alt_nk); return self .neighbor_handshake(network, &alt_nk) - .and_then(|handle| Ok(Some(handle))); + .map(|handle| Some(handle)); } Err(e) => { info!( @@ -247,7 +247,7 @@ pub trait NeighborComms { self.remove_connecting(network, &nk); return self .neighbor_handshake(network, &nk) - .and_then(|handle| Ok(Some(handle))); + .map(|handle| Some(handle)); } test_debug!( "{:?}: Already connected to {:?} on event {} (address: {:?})", diff --git a/stackslib/src/net/neighbors/db.rs b/stackslib/src/net/neighbors/db.rs index 0289875f11..ebf83af962 100644 --- a/stackslib/src/net/neighbors/db.rs +++ b/stackslib/src/net/neighbors/db.rs @@ -195,7 +195,7 @@ pub trait NeighborWalkDB { ) .map_err(net_error::DBError)?; - if neighbors.len() == 0 { + if neighbors.is_empty() { debug!( "{:?}: No neighbors available in the peer DB newer than {}!", network.get_local_peer(), @@ -205,7 +205,7 @@ pub trait NeighborWalkDB { &network.peerdb_conn(), network.get_local_peer().network_id, )?; - if seed_nodes.len() == 0 { + if seed_nodes.is_empty() { return Err(net_error::NoSuchNeighbor); } return Ok(seed_nodes); @@ -261,7 +261,7 @@ pub trait NeighborWalkDB { }) .collect(); - if next_neighbors.len() == 0 { + if next_neighbors.is_empty() { return Err(net_error::NoSuchNeighbor); } @@ -295,7 +295,7 @@ impl PeerDBNeighborWalk { let mut slots = PeerDB::peer_slots(conn, nk.network_id, &nk.addrbytes, nk.port) .map_err(net_error::DBError)?; - if slots.len() == 0 { + if slots.is_empty() { // not present return Ok(None); } diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index 450dc04463..cc3fd73db8 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -118,7 +118,7 @@ impl PeerNetwork { fn new_outbound_or_pingback_walk( &self, ) -> Result, net_error> { - if self.get_walk_pingbacks().len() == 0 { + if self.get_walk_pingbacks().is_empty() { debug!( "{:?}: no walk pingbacks, so instantiate a normal neighbor walk", self.get_local_peer() diff --git a/stackslib/src/net/neighbors/neighbor.rs b/stackslib/src/net/neighbors/neighbor.rs index 617860063e..64a033ce9c 100644 --- a/stackslib/src/net/neighbors/neighbor.rs +++ b/stackslib/src/net/neighbors/neighbor.rs @@ -35,7 +35,7 @@ impl Neighbor { Neighbor { addr: key.clone(), public_key: pubk.clone(), - expire_block: expire_block, + expire_block, last_contact_time: 0, allowed: 0, denied: 0, @@ -49,9 +49,9 @@ impl Neighbor { /// Update this peer in the DB. /// If there's no DB entry for this peer, then do nothing. /// Updates last-contact-time to now, since this is only called when we get back a Handshake - pub fn save_update<'a>( + pub fn save_update( &mut self, - tx: &DBTx<'a>, + tx: &DBTx<'_>, stacker_dbs: Option<&[QualifiedContractIdentifier]>, ) -> Result<(), net_error> { self.last_contact_time = get_epoch_time_secs(); @@ -66,9 +66,9 @@ impl Neighbor { /// Updates last-contact-time to now, since this is only called when we get back a Handshake /// Return true if saved. /// Return false if not saved -- i.e. the frontier is full and we should try evicting neighbors. - pub fn save<'a>( + pub fn save( &mut self, - tx: &DBTx<'a>, + tx: &DBTx<'_>, stacker_dbs: Option<&[QualifiedContractIdentifier]>, ) -> Result { self.last_contact_time = get_epoch_time_secs(); diff --git a/stackslib/src/net/neighbors/walk.rs b/stackslib/src/net/neighbors/walk.rs index d4f1cd089b..f16483b361 100644 --- a/stackslib/src/net/neighbors/walk.rs +++ b/stackslib/src/net/neighbors/walk.rs @@ -78,23 +78,23 @@ impl NeighborWalkResult { } } - pub fn add_new(&mut self, nk: NeighborKey) -> () { + pub fn add_new(&mut self, nk: NeighborKey) { self.new_connections.insert(nk); } - pub fn add_broken(&mut self, nk: NeighborKey) -> () { + pub fn add_broken(&mut self, nk: NeighborKey) { self.broken_connections.insert(nk); } - pub fn add_dead(&mut self, nk: NeighborKey) -> () { + pub fn add_dead(&mut self, nk: NeighborKey) { self.dead_connections.insert(nk); } - pub fn add_replaced(&mut self, nk: NeighborKey) -> () { + pub fn add_replaced(&mut self, nk: NeighborKey) { self.replaced_neighbors.insert(nk); } - pub fn clear(&mut self) -> () { + pub fn clear(&mut self) { self.new_connections.clear(); self.dead_connections.clear(); self.broken_connections.clear(); @@ -348,7 +348,7 @@ impl NeighborWalk { network: &PeerNetwork, ) -> Result, net_error> { let event_ids: Vec<_> = network.iter_peer_event_ids().collect(); - if event_ids.len() == 0 { + if event_ids.is_empty() { debug!( "{:?}: failed to begin inbound neighbor walk: no one's connected to us", network.get_local_peer() @@ -429,7 +429,7 @@ impl NeighborWalk { comms: NC, network: &PeerNetwork, ) -> Result, net_error> { - if network.get_walk_pingbacks().len() == 0 { + if network.get_walk_pingbacks().is_empty() { debug!("{:?}: no walk pingbacks", network.get_local_peer()); return Err(net_error::NoSuchNeighbor); } @@ -532,7 +532,7 @@ impl NeighborWalk { } /// Clear the walk's connection state - fn clear_connections(&mut self, _local_peer: &LocalPeer) -> () { + fn clear_connections(&mut self, _local_peer: &LocalPeer) { test_debug!("{:?}: Walk clear connections", _local_peer); self.pending_neighbor_addrs = None; self.comms.reset(); @@ -1043,7 +1043,7 @@ impl NeighborWalk { } } - if still_pending.len() > 0 { + if !still_pending.is_empty() { // try again self.pending_neighbor_addrs = Some(still_pending); return Ok(false); @@ -1390,7 +1390,7 @@ impl NeighborWalk { exclude: Option<&Neighbor>, ) -> Option { let mut rnd = thread_rng(); - if frontier.len() == 0 || (exclude.is_some() && frontier.len() == 1) { + if frontier.is_empty() || (exclude.is_some() && frontier.len() == 1) { return None; } // select a random neighbor index, if exclude is set, and matches this @@ -1456,7 +1456,7 @@ impl NeighborWalk { let mut rnd = thread_rng(); // step to a node in cur_neighbor's frontier, per MHRWDA - let next_neighbor_opt = if self.frontier.len() == 0 { + let next_neighbor_opt = if self.frontier.is_empty() { // stay here for now -- we don't yet know this neighbor's // frontier if self.walk_outbound { @@ -1467,7 +1467,7 @@ impl NeighborWalk { } else { // continuing the walk let next_neighbor = - Self::pick_random_neighbor(&self.frontier, None).expect("BUG: empty frontier size"); // won't panic since self.frontier.len() > 0 + Self::pick_random_neighbor(&self.frontier, None).expect("BUG: empty frontier size"); // won't panic since !self.frontier.is_empty() let walk_prob: f64 = rnd.gen(); if walk_prob < self @@ -1603,7 +1603,7 @@ impl NeighborWalk { } self.network_pingbacks = still_pending; - if self.network_pingbacks.len() > 0 { + if !self.network_pingbacks.is_empty() { // still connecting debug!( "{:?}: Still trying to pingback-handshake with {} neighbors", diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 13f7ad7fac..1e38b4d872 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -474,7 +474,7 @@ impl PeerNetwork { ); let pub_ip = connection_opts.public_ip_address.clone(); let pub_ip_learned = pub_ip.is_none(); - local_peer.public_ip_address = pub_ip.clone(); + local_peer.public_ip_address.clone_from(&pub_ip); if connection_opts.disable_inbound_handshakes { debug!("{:?}: disable inbound handshakes", &local_peer); @@ -765,7 +765,7 @@ impl PeerNetwork { } /// Create a transaction against the PeerDB - pub fn peerdb_tx_begin<'a>(&'a mut self) -> Result, db_error> { + pub fn peerdb_tx_begin(&mut self) -> Result, db_error> { self.peerdb.tx_begin() } @@ -1042,7 +1042,7 @@ impl PeerNetwork { /// sent this peer the message in the first place. pub fn broadcast_message( &mut self, - mut neighbor_keys: Vec, + neighbor_keys: Vec, relay_hints: Vec, message_payload: StacksMessageType, ) { @@ -1053,7 +1053,7 @@ impl PeerNetwork { neighbor_keys.len(), &relay_hints ); - for nk in neighbor_keys.drain(..) { + for nk in neighbor_keys.into_iter() { if let Some(event_id) = self.events.get(&nk) { let event_id = *event_id; if let Some(convo) = self.peers.get_mut(&event_id) { @@ -1400,9 +1400,9 @@ impl PeerNetwork { } Ok(()) } - NetworkRequest::Relay(neighbor_key, msg) => self - .relay_signed_message(&neighbor_key, msg) - .and_then(|_| Ok(())), + NetworkRequest::Relay(neighbor_key, msg) => { + self.relay_signed_message(&neighbor_key, msg).map(|_| ()) + } NetworkRequest::Broadcast(relay_hints, msg) => { // pick some neighbors. Note that only some messages can be broadcasted. let neighbor_keys = match msg { @@ -1410,8 +1410,8 @@ impl PeerNetwork { // send to each neighbor that needs one let mut all_neighbors = HashSet::new(); for BlocksDatum(_, block) in data.blocks.iter() { - let mut neighbors = self.sample_broadcast_peers(&relay_hints, block)?; - for nk in neighbors.drain(..) { + let neighbors = self.sample_broadcast_peers(&relay_hints, block)?; + for nk in neighbors.into_iter() { all_neighbors.insert(nk); } } @@ -1421,9 +1421,8 @@ impl PeerNetwork { // send to each neighbor that needs at least one let mut all_neighbors = HashSet::new(); for mblock in data.microblocks.iter() { - let mut neighbors = - self.sample_broadcast_peers(&relay_hints, mblock)?; - for nk in neighbors.drain(..) { + let neighbors = self.sample_broadcast_peers(&relay_hints, mblock)?; + for nk in neighbors.into_iter() { all_neighbors.insert(nk); } } @@ -1746,7 +1745,7 @@ impl PeerNetwork { self.can_register_peer(nk, outbound).and_then(|_| { let other_events = self.get_pubkey_events(pubkh); - if other_events.len() > 0 { + if !other_events.is_empty() { for event_id in other_events.into_iter() { if let Some(convo) = self.peers.get(&event_id) { // only care if we're trying to connect in the same direction @@ -1885,7 +1884,7 @@ impl PeerNetwork { } /// Deregister a socket from our p2p network instance. - fn deregister_socket(&mut self, event_id: usize, socket: mio_net::TcpStream) -> () { + fn deregister_socket(&mut self, event_id: usize, socket: mio_net::TcpStream) { match self.network { Some(ref mut network) => { let _ = network.deregister(event_id, &socket); @@ -1895,7 +1894,7 @@ impl PeerNetwork { } /// Deregister a socket/event pair - pub fn deregister_peer(&mut self, event_id: usize) -> () { + pub fn deregister_peer(&mut self, event_id: usize) { debug!("{:?}: Disconnect event {}", &self.local_peer, event_id); let mut nk_remove: Vec<(NeighborKey, Hash160)> = vec![]; @@ -1956,7 +1955,7 @@ impl PeerNetwork { } /// Deregister by neighbor key - pub fn deregister_neighbor(&mut self, neighbor_key: &NeighborKey) -> () { + pub fn deregister_neighbor(&mut self, neighbor_key: &NeighborKey) { debug!("Disconnect from {:?}", neighbor_key); let event_id = match self.events.get(&neighbor_key) { None => { @@ -1968,7 +1967,7 @@ impl PeerNetwork { } /// Deregister and ban a neighbor - pub fn deregister_and_ban_neighbor(&mut self, neighbor: &NeighborKey) -> () { + pub fn deregister_and_ban_neighbor(&mut self, neighbor: &NeighborKey) { debug!("Disconnect from and ban {:?}", neighbor); match self.events.get(neighbor) { Some(event_id) => { @@ -2294,7 +2293,7 @@ impl PeerNetwork { /// -- Drop broken connections. /// -- Update our frontier. /// -- Prune our frontier if it gets too big. - fn process_neighbor_walk(&mut self, walk_result: NeighborWalkResult) -> () { + fn process_neighbor_walk(&mut self, walk_result: NeighborWalkResult) { for broken in walk_result.broken_connections.iter() { self.deregister_and_ban_neighbor(broken); } @@ -2313,7 +2312,7 @@ impl PeerNetwork { /// Queue up pings to everyone we haven't spoken to in a while to let them know that we're still /// alive. - pub fn queue_ping_heartbeats(&mut self) -> () { + pub fn queue_ping_heartbeats(&mut self) { let now = get_epoch_time_secs(); let mut relay_handles = HashMap::new(); for (_, convo) in self.peers.iter_mut() { @@ -2418,7 +2417,7 @@ impl PeerNetwork { } /// Prune inbound and outbound connections if we can - pub(crate) fn prune_connections(&mut self) -> () { + pub(crate) fn prune_connections(&mut self) { if cfg!(test) && self.connection_opts.disable_network_prune { return; } @@ -2552,7 +2551,7 @@ impl PeerNetwork { // flush each outgoing conversation let mut relay_handles = std::mem::replace(&mut self.relay_handles, HashMap::new()); for (event_id, handle_list) in relay_handles.iter_mut() { - if handle_list.len() == 0 { + if handle_list.is_empty() { debug!("No handles for event {}", event_id); drained.push(*event_id); continue; @@ -2564,7 +2563,7 @@ impl PeerNetwork { event_id ); - while handle_list.len() > 0 { + while !handle_list.is_empty() { debug!("Flush {} relay handles", handle_list.len()); let res = self.with_p2p_convo(*event_id, |_network, convo, client_sock| { if let Some(handle) = handle_list.front_mut() { @@ -2620,7 +2619,7 @@ impl PeerNetwork { } } } - for empty in drained.drain(..) { + for empty in drained.into_iter() { relay_handles.remove(&empty); } @@ -2655,7 +2654,7 @@ impl PeerNetwork { /// Return Err(..) on failure #[cfg_attr(test, mutants::skip)] fn begin_learn_public_ip(&mut self) -> Result { - if self.peers.len() == 0 { + if self.peers.is_empty() { return Err(net_error::NoSuchNeighbor); } @@ -2729,7 +2728,7 @@ impl PeerNetwork { } /// Disconnect from all peers - fn disconnect_all(&mut self) -> () { + fn disconnect_all(&mut self) { let mut all_event_ids = vec![]; for (eid, _) in self.peers.iter() { all_event_ids.push(*eid); @@ -2949,8 +2948,8 @@ impl PeerNetwork { old_pox_id, mut blocks, mut microblocks, - mut broken_http_peers, - mut broken_p2p_peers, + broken_http_peers, + broken_p2p_peers, ) = match self.download_blocks(sortdb, chainstate, dns_client, ibd) { Ok(x) => x, Err(net_error::NotConnected) => { @@ -3004,7 +3003,7 @@ impl PeerNetwork { } let _ = PeerNetwork::with_network_state(self, |ref mut network, ref mut network_state| { - for dead_event in broken_http_peers.drain(..) { + for dead_event in broken_http_peers.into_iter() { debug!( "{:?}: De-register dead/broken HTTP connection {}", &network.local_peer, dead_event @@ -3016,7 +3015,7 @@ impl PeerNetwork { Ok(()) }); - for broken_neighbor in broken_p2p_peers.drain(..) { + for broken_neighbor in broken_p2p_peers.into_iter() { debug!( "{:?}: De-register dead/broken neighbor {:?}", &self.local_peer, &broken_neighbor @@ -3269,13 +3268,13 @@ impl PeerNetwork { } let reward_cycle_start = self.antientropy_start_reward_cycle; - let reward_cycle_finish = - self.antientropy_start_reward_cycle - .saturating_sub(self.connection_opts.inv_reward_cycles) as u64; + let reward_cycle_finish = self + .antientropy_start_reward_cycle + .saturating_sub(self.connection_opts.inv_reward_cycles); self.antientropy_start_reward_cycle = reward_cycle_finish; - if neighbor_keys.len() == 0 { + if neighbor_keys.is_empty() { return; } @@ -3831,7 +3830,7 @@ impl PeerNetwork { match dns_client_opt { Some(ref mut dns_client) => { - let mut dead_events = PeerNetwork::with_attachments_downloader( + let dead_events = PeerNetwork::with_attachments_downloader( self, |network, attachments_downloader| { let mut dead_events = vec![]; @@ -3854,7 +3853,7 @@ impl PeerNetwork { let _ = PeerNetwork::with_network_state( self, |ref mut network, ref mut network_state| { - for event_id in dead_events.drain(..) { + for event_id in dead_events.into_iter() { debug!( "Atlas: Deregistering faulty connection (event_id: {})", event_id @@ -4119,7 +4118,8 @@ impl PeerNetwork { /// Get the local peer from the peer DB, but also preserve the public IP address pub fn load_local_peer(&self) -> Result { let mut lp = PeerDB::get_local_peer(&self.peerdb.conn())?; - lp.public_ip_address = self.local_peer.public_ip_address.clone(); + lp.public_ip_address + .clone_from(&self.local_peer.public_ip_address); Ok(lp) } @@ -4831,8 +4831,8 @@ impl PeerNetwork { // prune back our connections if it's been a while // (only do this if we're done with all other tasks). // Also, process banned peers. - if let Ok(mut dead_events) = self.process_bans() { - for dead in dead_events.drain(..) { + if let Ok(dead_events) = self.process_bans() { + for dead in dead_events.into_iter() { debug!( "{:?}: Banned connection on event {}", &self.local_peer, dead @@ -5657,7 +5657,7 @@ mod test { p2p.process_connecting_sockets(&mut p2p_poll_state); let mut banned = p2p.process_bans().unwrap(); - if banned.len() > 0 { + if !banned.is_empty() { test_debug!("Banned {} peer(s)", banned.len()); } @@ -5687,7 +5687,7 @@ mod test { } let banned = rx.recv().unwrap(); - assert!(banned.len() >= 1); + assert!(!banned.is_empty()); p2p_thread.join().unwrap(); test_debug!("dispatcher thread joined"); diff --git a/stackslib/src/net/poll.rs b/stackslib/src/net/poll.rs index 0362745f90..a4c2c46465 100644 --- a/stackslib/src/net/poll.rs +++ b/stackslib/src/net/poll.rs @@ -74,9 +74,9 @@ impl NetworkState { let events = mio::Events::with_capacity(event_capacity); Ok(NetworkState { - poll: poll, - events: events, - event_capacity: event_capacity, + poll, + events, + event_capacity, servers: vec![], count: 1, event_map: HashMap::new(), diff --git a/stackslib/src/net/prune.rs b/stackslib/src/net/prune.rs index c33b7fea76..96edb12c2a 100644 --- a/stackslib/src/net/prune.rs +++ b/stackslib/src/net/prune.rs @@ -269,11 +269,11 @@ impl PeerNetwork { while num_outbound - (ret.len() as u64) > self.connection_opts.soft_num_neighbors { let mut weighted_sample: HashMap = HashMap::new(); for (org, neighbor_info) in org_neighbors.iter() { - if neighbor_info.len() > 0 { + if !neighbor_info.is_empty() { weighted_sample.insert(*org, neighbor_info.len()); } } - if weighted_sample.len() == 0 { + if weighted_sample.is_empty() { // nothing to do break; } @@ -397,7 +397,7 @@ impl PeerNetwork { } /// Prune our frontier. Ignore connections in the preserve set. - pub fn prune_frontier(&mut self, preserve: &HashSet) -> () { + pub fn prune_frontier(&mut self, preserve: &HashSet) { let num_outbound = PeerNetwork::count_outbound_conversations(&self.peers); let num_inbound = (self.peers.len() as u64).saturating_sub(num_outbound); debug!( @@ -449,7 +449,7 @@ impl PeerNetwork { #[cfg(test)] { - if pruned_by_ip.len() > 0 || pruned_by_org.len() > 0 { + if !pruned_by_ip.is_empty() || !pruned_by_org.is_empty() { let (mut inbound, mut outbound) = self.dump_peer_table(); inbound.sort(); diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index b93171916c..9121bac2c9 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -274,7 +274,7 @@ impl RelayerStats { } /// Add in new stats gleaned from the PeerNetwork's network result - pub fn merge_relay_stats(&mut self, mut stats: HashMap) -> () { + pub fn merge_relay_stats(&mut self, mut stats: HashMap) { for (mut addr, new_stats) in stats.drain() { addr.clear_public_key(); let inserted = if let Some(stats) = self.relay_stats.get_mut(&addr) { @@ -291,7 +291,7 @@ impl RelayerStats { } to_remove.push(*ts); } - for ts in to_remove.drain(..) { + for ts in to_remove.into_iter() { self.relay_updates.remove(&ts); } } @@ -307,7 +307,7 @@ impl RelayerStats { } /// Record that we've seen a relayed message from one of our neighbors. - pub fn add_relayed_message(&mut self, nk: NeighborKey, msg: &R) -> () { + pub fn add_relayed_message(&mut self, nk: NeighborKey, msg: &R) { let h = msg.get_digest(); let now = get_epoch_time_secs(); let inserted = if let Some(relayed) = self.recent_messages.get_mut(&nk) { @@ -319,7 +319,7 @@ impl RelayerStats { } // prune stale - while relayed.len() > 0 { + while !relayed.is_empty() { let head_ts = match relayed.front() { Some((ts, _)) => *ts, None => { @@ -342,7 +342,7 @@ impl RelayerStats { let mut to_remove = vec![]; for (ts, old_nk) in self.recent_updates.iter() { self.recent_messages.remove(old_nk); - if self.recent_messages.len() <= (MAX_RELAYER_STATS as usize) - 1 { + if self.recent_messages.len() <= MAX_RELAYER_STATS - 1 { break; } to_remove.push(*ts); @@ -363,7 +363,7 @@ impl RelayerStats { } /// Process a neighbor ban -- remove any state for this neighbor - pub fn process_neighbor_ban(&mut self, nk: &NeighborKey) -> () { + pub fn process_neighbor_ban(&mut self, nk: &NeighborKey) { let addr = NeighborAddress::from_neighbor_key((*nk).clone(), Hash160([0u8; 20])); self.recent_messages.remove(nk); self.relay_stats.remove(&addr); @@ -406,7 +406,7 @@ impl RelayerStats { // look up ASNs let mut asns = HashMap::new(); for nk in neighbors.iter() { - if asns.get(nk).is_none() { + if !asns.contains_key(nk) { match PeerDB::asn_lookup(conn, &nk.addrbytes)? { Some(asn) => asns.insert((*nk).clone(), asn), None => asns.insert((*nk).clone(), 0), @@ -516,10 +516,10 @@ impl RelayerStats { if norm <= 1 { // there is one or zero options - if rankings_vec.len() > 0 { - return vec![rankings_vec[0].0.clone()]; - } else { + if rankings_vec.is_empty() { return vec![]; + } else { + return vec![rankings_vec[0].0.clone()]; } } @@ -1150,7 +1150,7 @@ impl Relayer { for (anchored_block_hash, (relayers, mblocks_map)) in new_microblocks.into_iter() { for (_, mblock) in mblocks_map.into_iter() { - if mblocks_data.get(&anchored_block_hash).is_none() { + if !mblocks_data.contains_key(&anchored_block_hash) { mblocks_data.insert(anchored_block_hash.clone(), vec![]); } @@ -1437,7 +1437,7 @@ impl Relayer { for (consensus_hash, microblock_stream, _download_time) in network_result.confirmed_microblocks.iter() { - if microblock_stream.len() == 0 { + if microblock_stream.is_empty() { continue; } let anchored_block_hash = microblock_stream[0].header.prev_block.clone(); @@ -1798,7 +1798,7 @@ impl Relayer { } } - if accepted_blocks.len() > 0 { + if !accepted_blocks.is_empty() { pushed_blocks.push(AcceptedNakamotoBlocks { relayers: relayers.clone(), blocks: accepted_blocks, @@ -2078,7 +2078,9 @@ impl Relayer { Relayer::preprocess_pushed_microblocks(&sort_ic, network_result, chainstate)?; bad_neighbors.append(&mut new_bad_neighbors); - if new_blocks.len() > 0 || new_microblocks.len() > 0 || new_confirmed_microblocks.len() > 0 + if !new_blocks.is_empty() + || !new_microblocks.is_empty() + || !new_confirmed_microblocks.is_empty() { info!( "Processing newly received Stacks blocks: {}, microblocks: {}, confirmed microblocks: {}", @@ -2237,7 +2239,7 @@ impl Relayer { } filtered_tx_data.push((relayers, tx)); } - if filtered_tx_data.len() > 0 { + if !filtered_tx_data.is_empty() { filtered_pushed_transactions.insert(nk, filtered_tx_data); } } @@ -2608,7 +2610,7 @@ impl Relayer { let new_block_chs = new_blocks.iter().map(|(ch, _)| ch.clone()).collect(); let available = Relayer::load_blocks_available_data(sortdb, new_block_chs) .unwrap_or(BlocksAvailableMap::new()); - if available.len() > 0 { + if !available.is_empty() { debug!("{:?}: Blocks available: {}", &_local_peer, available.len()); if let Err(e) = self.p2p.advertize_blocks(available, new_blocks) { warn!("Failed to advertize new blocks: {:?}", &e); @@ -2622,7 +2624,7 @@ impl Relayer { .collect(); let mblocks_available = Relayer::load_blocks_available_data(sortdb, new_mblock_chs) .unwrap_or(BlocksAvailableMap::new()); - if mblocks_available.len() > 0 { + if !mblocks_available.is_empty() { debug!( "{:?}: Confirmed microblock streams available: {}", &_local_peer, @@ -2637,7 +2639,7 @@ impl Relayer { } // have the p2p thread forward all new unconfirmed microblocks - if new_microblocks.len() > 0 { + if !new_microblocks.is_empty() { debug!( "{:?}: Unconfirmed microblocks: {}", &_local_peer, @@ -2685,7 +2687,7 @@ impl Relayer { // attempt to relay messages (note that this is all best-effort). // punish bad peers - if bad_block_neighbors.len() > 0 { + if !bad_block_neighbors.is_empty() { debug!( "{:?}: Ban {} peers", &_local_peer, @@ -2776,7 +2778,7 @@ impl Relayer { for blocks_and_relayers in accepted_blocks.into_iter() { let AcceptedNakamotoBlocks { relayers, blocks } = blocks_and_relayers; - if blocks.len() == 0 { + if blocks.is_empty() { continue; } @@ -2817,7 +2819,7 @@ impl Relayer { &relayers ); - if relay_blocks.len() == 0 { + if relay_blocks.is_empty() { continue; } @@ -2883,7 +2885,7 @@ impl Relayer { .unwrap_or(u64::MAX); // don't panic if we somehow receive more than u64::MAX blocks // punish bad peers - if bad_neighbors.len() > 0 { + if !bad_neighbors.is_empty() { debug!("{:?}: Ban {} peers", &local_peer, bad_neighbors.len()); if let Err(e) = self.p2p.ban_peers(bad_neighbors) { warn!("Failed to ban bad-block peers: {:?}", &e); @@ -2891,7 +2893,7 @@ impl Relayer { } // relay if not IBD - if !ibd && accepted_blocks.len() > 0 { + if !ibd && !accepted_blocks.is_empty() { self.relay_epoch3_blocks(local_peer, sortdb, accepted_blocks); } num_new_nakamoto_blocks @@ -2932,7 +2934,7 @@ impl Relayer { ) .unwrap_or(vec![]); - if new_txs.len() > 0 { + if !new_txs.is_empty() { debug!( "{:?}: Send {} transactions to neighbors", &_local_peer, @@ -3123,8 +3125,7 @@ impl PeerNetwork { recipient: &NeighborKey, wanted: &[(ConsensusHash, BurnchainHeaderHash)], mut msg_builder: S, - ) -> () - where + ) where S: FnMut(BlocksAvailableData) -> StacksMessageType, { for i in (0..wanted.len()).step_by(BLOCKS_AVAILABLE_MAX_LEN as usize) { @@ -3165,7 +3166,7 @@ impl PeerNetwork { recipient: &NeighborKey, consensus_hash: ConsensusHash, block: StacksBlock, - ) -> () { + ) { let blk_hash = block.block_hash(); let ch = consensus_hash.clone(); let payload = BlocksData { @@ -3204,11 +3205,11 @@ impl PeerNetwork { recipient: &NeighborKey, index_block_hash: StacksBlockId, microblocks: Vec, - ) -> () { + ) { let idx_bhh = index_block_hash.clone(); let payload = MicroblocksData { index_anchor_block: index_block_hash, - microblocks: microblocks, + microblocks, }; let message = match self.sign_for_neighbor(recipient, StacksMessageType::Microblocks(payload)) { @@ -3354,7 +3355,7 @@ impl PeerNetwork { availability_data: BlocksAvailableMap, blocks: HashMap, ) -> Result<(usize, usize), net_error> { - let (mut outbound_recipients, mut inbound_recipients) = + let (outbound_recipients, inbound_recipients) = self.find_block_recipients(&availability_data)?; debug!( "{:?}: Advertize {} blocks to {} inbound peers, {} outbound peers", @@ -3367,7 +3368,7 @@ impl PeerNetwork { let num_inbound = inbound_recipients.len(); let num_outbound = outbound_recipients.len(); - for recipient in outbound_recipients.drain(..) { + for recipient in outbound_recipients.into_iter() { debug!( "{:?}: Advertize {} blocks to outbound peer {}", &self.local_peer, @@ -3380,7 +3381,7 @@ impl PeerNetwork { &blocks, )?; } - for recipient in inbound_recipients.drain(..) { + for recipient in inbound_recipients.into_iter() { debug!( "{:?}: Advertize {} blocks to inbound peer {}", &self.local_peer, @@ -3405,14 +3406,14 @@ impl PeerNetwork { availability_data: BlocksAvailableMap, microblocks: HashMap)>, ) -> Result<(usize, usize), net_error> { - let (mut outbound_recipients, mut inbound_recipients) = + let (outbound_recipients, inbound_recipients) = self.find_block_recipients(&availability_data)?; debug!("{:?}: Advertize {} confirmed microblock streams to {} inbound peers, {} outbound peers", &self.local_peer, availability_data.len(), outbound_recipients.len(), inbound_recipients.len()); let num_inbound = inbound_recipients.len(); let num_outbound = outbound_recipients.len(); - for recipient in outbound_recipients.drain(..) { + for recipient in outbound_recipients.into_iter() { debug!( "{:?}: Advertize {} confirmed microblock streams to outbound peer {}", &self.local_peer, @@ -3425,7 +3426,7 @@ impl PeerNetwork { µblocks, )?; } - for recipient in inbound_recipients.drain(..) { + for recipient in inbound_recipients.into_iter() { debug!( "{:?}: Advertize {} confirmed microblock streams to inbound peer {}", &self.local_peer, @@ -3441,7 +3442,7 @@ impl PeerNetwork { /// Update accounting information for relayed messages from a network result. /// This influences selecting next-hop neighbors to get data from us. - pub fn update_relayer_stats(&mut self, network_result: &NetworkResult) -> () { + pub fn update_relayer_stats(&mut self, network_result: &NetworkResult) { // synchronize for (_, convo) in self.peers.iter_mut() { let stats = convo.get_stats_mut().take_relayers(); diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index 3a44de7953..e515934738 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -481,7 +481,7 @@ impl ConversationHttp { self.pending_response.is_none() && self.connection.inbox_len() == 0 && self.connection.outbox_len() == 0 - && self.reply_streams.len() == 0 + && self.reply_streams.is_empty() } /// Is the conversation out of pending data? @@ -593,7 +593,7 @@ impl ConversationHttp { /// Remove all timed-out messages, and ding the remote peer as unhealthy #[cfg_attr(test, mutants::skip)] - pub fn clear_timeouts(&mut self) -> () { + pub fn clear_timeouts(&mut self) { self.connection.drain_timeouts(); } diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index fdad3b85df..78f0f6fbb5 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -82,7 +82,7 @@ impl HttpPeer { } } - pub fn set_server_handle(&mut self, h: usize, addr: SocketAddr) -> () { + pub fn set_server_handle(&mut self, h: usize, addr: SocketAddr) { self.http_server_handle = h; self.http_server_addr = addr; } @@ -287,7 +287,7 @@ impl HttpPeer { /// Deregister a socket/event pair #[cfg_attr(test, mutants::skip)] - pub fn deregister_http(&mut self, network_state: &mut NetworkState, event_id: usize) -> () { + pub fn deregister_http(&mut self, network_state: &mut NetworkState, event_id: usize) { test_debug!("Remove HTTP event {}", event_id); self.peers.remove(&event_id); @@ -306,7 +306,7 @@ impl HttpPeer { } /// Remove slow/unresponsive peers - fn disconnect_unresponsive(&mut self, network_state: &mut NetworkState) -> () { + fn disconnect_unresponsive(&mut self, network_state: &mut NetworkState) { let now = get_epoch_time_secs(); let mut to_remove = vec![]; for (event_id, (socket, _, _, ts)) in self.connecting.iter() { @@ -338,7 +338,7 @@ impl HttpPeer { } } - for event_id in to_remove.drain(0..) { + for event_id in to_remove.into_iter() { self.deregister_http(network_state, event_id); } } @@ -522,7 +522,7 @@ impl HttpPeer { network_state: &mut NetworkState, node_state: &mut StacksNodeState, poll_state: &mut NetworkPollState, - ) -> () { + ) { for event_id in poll_state.ready.iter() { if self.connecting.contains_key(event_id) { let (socket, data_url, initial_request_opt, _) = @@ -779,7 +779,7 @@ mod test { let mut resp = vec![]; match sock.read_to_end(&mut resp) { Ok(_) => { - if resp.len() == 0 { + if resp.is_empty() { test_debug!("Client {} did not receive any data", i); client_sx.send(Err(net_error::PermanentlyDrained)).unwrap(); return; @@ -1106,7 +1106,7 @@ mod test { }, |client_id, http_response_bytes_res| { match http_response_bytes_res { - Ok(bytes) => bytes.len() == 0, // should not have gotten any data + Ok(bytes) => bytes.is_empty(), // should not have gotten any data Err(net_error::PermanentlyDrained) => true, Err(err) => { // should have failed diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 2b735668ac..0faf5bbe03 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -39,7 +39,7 @@ use crate::util_lib::db::{ FromRow, }; -const STACKER_DB_SCHEMA: &'static [&'static str] = &[ +const STACKER_DB_SCHEMA: &[&str] = &[ r#" PRAGMA foreign_keys = ON; "#, @@ -191,7 +191,7 @@ fn inner_get_slot_validation( query_row(conn, &sql, args).map_err(|e| e.into()) } -impl<'a> StackerDBTx<'a> { +impl StackerDBTx<'_> { pub fn commit(self) -> Result<(), db_error> { self.sql_tx.commit().map_err(db_error::from) } @@ -527,10 +527,7 @@ impl StackerDBs { /// Open a transaction on the Stacker DB. /// The config would be obtained from a DBSelector instance - pub fn tx_begin<'a>( - &'a mut self, - config: StackerDBConfig, - ) -> Result, db_error> { + pub fn tx_begin(&mut self, config: StackerDBConfig) -> Result, db_error> { let sql_tx = tx_begin_immediate(&mut self.conn)?; Ok(StackerDBTx { sql_tx, config }) } diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 9d1b25af51..899990402d 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -347,7 +347,7 @@ impl StackerDBs { &e ); } - } else if (new_config != stackerdb_config && new_config.signers.len() > 0) + } else if (new_config != stackerdb_config && !new_config.signers.is_empty()) || (new_config == stackerdb_config && new_config.signers.len() != self.get_slot_versions(&stackerdb_contract_id)?.len()) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 237f582d26..7dfeb809c7 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -225,7 +225,7 @@ impl StackerDBSync { let mut eviction_index = None; if self.last_eviction_time + 60 < get_epoch_time_secs() { self.last_eviction_time = get_epoch_time_secs(); - if self.replicas.len() > 0 { + if !self.replicas.is_empty() { eviction_index = Some(thread_rng().gen_range(0..self.replicas.len())); } } @@ -558,7 +558,7 @@ impl StackerDBSync { self.chunk_fetch_priorities .retain(|(chunk, ..)| chunk.slot_id != slot_id); - if self.chunk_fetch_priorities.len() > 0 { + if !self.chunk_fetch_priorities.is_empty() { let next_chunk_fetch_priority = self.next_chunk_fetch_priority % self.chunk_fetch_priorities.len(); self.next_chunk_fetch_priority = next_chunk_fetch_priority; @@ -611,7 +611,7 @@ impl StackerDBSync { self.chunk_push_priorities .retain(|(chunk, ..)| chunk.chunk_data.slot_id != slot_id); - if self.chunk_push_priorities.len() > 0 { + if !self.chunk_push_priorities.is_empty() { let next_chunk_push_priority = self.next_chunk_push_priority % self.chunk_push_priorities.len(); self.next_chunk_push_priority = next_chunk_push_priority; @@ -700,7 +700,7 @@ impl StackerDBSync { /// Returns Err(NoSuchNeighbor) if we don't have anyone to talk to /// Returns Err(..) on DB query error pub fn connect_begin(&mut self, network: &mut PeerNetwork) -> Result { - if self.replicas.len() == 0 { + if self.replicas.is_empty() { // find some from the peer DB let replicas = self.find_qualified_replicas(network)?; self.replicas = replicas; @@ -713,7 +713,7 @@ impl StackerDBSync { network.get_num_p2p_convos(); "replicas" => ?self.replicas ); - if self.replicas.len() == 0 { + if self.replicas.is_empty() { // nothing to do return Err(net_error::NoSuchNeighbor); } @@ -776,7 +776,7 @@ impl StackerDBSync { } } } - Ok(self.connected_replicas.len() > 0) + Ok(!self.connected_replicas.is_empty()) } /// Finish up connecting to our replicas. @@ -866,7 +866,7 @@ impl StackerDBSync { return Ok(false); } - if self.connected_replicas.len() == 0 { + if self.connected_replicas.is_empty() { // no one to talk to debug!( "{:?}: {}: connect_try_finish: no valid replicas", @@ -996,7 +996,7 @@ impl StackerDBSync { /// Return Ok(true) if we processed all requested chunks /// Return Ok(false) if there are still some requests to make pub fn getchunks_begin(&mut self, network: &mut PeerNetwork) -> Result { - if self.chunk_fetch_priorities.len() == 0 { + if self.chunk_fetch_priorities.is_empty() { // done debug!( "{:?}: {}: getchunks_begin: no chunks prioritized", @@ -1083,7 +1083,7 @@ impl StackerDBSync { self.next_chunk_fetch_priority = cur_priority; - Ok(self.chunk_fetch_priorities.len() == 0) + Ok(self.chunk_fetch_priorities.is_empty()) } /// Collect chunk replies from neighbors @@ -1157,13 +1157,13 @@ impl StackerDBSync { /// Returns true if there are no more chunks to push. /// Returns false if there are pub fn pushchunks_begin(&mut self, network: &mut PeerNetwork) -> Result { - if self.chunk_push_priorities.len() == 0 && self.push_round != self.rounds { + if self.chunk_push_priorities.is_empty() && self.push_round != self.rounds { // only do this once per round let priorities = self.make_chunk_push_schedule(&network)?; self.chunk_push_priorities = priorities; self.push_round = self.rounds; } - if self.chunk_push_priorities.len() == 0 { + if self.chunk_push_priorities.is_empty() { // done debug!( "{:?}:{}: pushchunks_begin: no chunks prioritized", @@ -1334,7 +1334,7 @@ impl StackerDBSync { network: &PeerNetwork, ) -> Result<(), net_error> { // figure out the new expected versions - let mut expected_versions = vec![0u32; self.num_slots as usize]; + let mut expected_versions = vec![0u32; self.num_slots]; for (_, chunk_inv) in self.chunk_invs.iter() { for (slot_id, slot_version) in chunk_inv.slot_versions.iter().enumerate() { expected_versions[slot_id] = (*slot_version).max(expected_versions[slot_id]); diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index 5f6e8a7bed..511201f245 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -135,7 +135,7 @@ fn setup_stackerdb(peer: &mut TestPeer, idx: usize, fill: bool, num_slots: usize thread_rng().fill(&mut inner_data[..]); let mut chunk_data = StackerDBChunkData::new(i as u32, 1, inner_data); - chunk_data.sign(&pks[i as usize]).unwrap(); + chunk_data.sign(&pks[i]).unwrap(); let chunk_md = chunk_data.get_slot_metadata(); tx.try_replace_chunk(contract_id, &chunk_md, &chunk_data.data) @@ -167,13 +167,13 @@ fn load_stackerdb(peer: &TestPeer, idx: usize) -> Vec<(SlotMetadata, Vec)> { let chunk_metadata = peer .network .stackerdbs - .get_slot_metadata(&peer.config.stacker_dbs[idx], i as u32) + .get_slot_metadata(&peer.config.stacker_dbs[idx], i) .unwrap() .unwrap(); let chunk = peer .network .stackerdbs - .get_latest_chunk(&peer.config.stacker_dbs[idx], i as u32) + .get_latest_chunk(&peer.config.stacker_dbs[idx], i) .unwrap() .unwrap_or(vec![]); ret.push((chunk_metadata, chunk)); @@ -246,14 +246,14 @@ fn test_stackerdb_replica_2_neighbors_1_chunk() { assert_eq!(peer_1_db_chunks.len(), 1); assert_eq!(peer_1_db_chunks[0].0.slot_id, 0); assert_eq!(peer_1_db_chunks[0].0.slot_version, 1); - assert!(peer_1_db_chunks[0].1.len() > 0); + assert!(!peer_1_db_chunks[0].1.is_empty()); // verify that peer 2 did NOT get the data let peer_2_db_chunks = load_stackerdb(&peer_2, idx_2); assert_eq!(peer_2_db_chunks.len(), 1); assert_eq!(peer_2_db_chunks[0].0.slot_id, 0); assert_eq!(peer_2_db_chunks[0].0.slot_version, 0); - assert!(peer_2_db_chunks[0].1.len() == 0); + assert!(peer_2_db_chunks[0].1.is_empty()); let peer_1_db_configs = peer_1.config.get_stacker_db_configs(); let peer_2_db_configs = peer_2.config.get_stacker_db_configs(); @@ -362,14 +362,14 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { assert_eq!(peer_1_db_chunks.len(), 1); assert_eq!(peer_1_db_chunks[0].0.slot_id, 0); assert_eq!(peer_1_db_chunks[0].0.slot_version, 1); - assert!(peer_1_db_chunks[0].1.len() > 0); + assert!(!peer_1_db_chunks[0].1.is_empty()); // verify that peer 2 did NOT get the data let peer_2_db_chunks = load_stackerdb(&peer_2, idx_2); assert_eq!(peer_2_db_chunks.len(), 1); assert_eq!(peer_2_db_chunks[0].0.slot_id, 0); assert_eq!(peer_2_db_chunks[0].0.slot_version, 0); - assert!(peer_2_db_chunks[0].1.len() == 0); + assert!(peer_2_db_chunks[0].1.is_empty()); let peer_1_db_configs = peer_1.config.get_stacker_db_configs(); let peer_2_db_configs = peer_2.config.get_stacker_db_configs(); @@ -404,7 +404,7 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { check_sync_results(&res); for sync_res in res.stacker_db_sync_results.iter() { assert_eq!(sync_res.chunks_to_store.len(), 0); - if sync_res.stale.len() > 0 { + if !sync_res.stale.is_empty() { peer_1_stale = true; } } @@ -433,7 +433,7 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { check_sync_results(&res); for sync_res in res.stacker_db_sync_results.iter() { assert_eq!(sync_res.chunks_to_store.len(), 0); - if sync_res.stale.len() > 0 { + if !sync_res.stale.is_empty() { peer_2_stale = true; } } @@ -593,7 +593,7 @@ fn inner_test_stackerdb_replica_2_neighbors_10_chunks(push_only: bool, base_port for i in 0..10 { assert_eq!(peer_1_db_chunks[i].0.slot_id, i as u32); assert_eq!(peer_1_db_chunks[i].0.slot_version, 1); - assert!(peer_1_db_chunks[i].1.len() > 0); + assert!(!peer_1_db_chunks[i].1.is_empty()); } // verify that peer 2 did NOT get the data @@ -602,7 +602,7 @@ fn inner_test_stackerdb_replica_2_neighbors_10_chunks(push_only: bool, base_port for i in 0..10 { assert_eq!(peer_2_db_chunks[i].0.slot_id, i as u32); assert_eq!(peer_2_db_chunks[i].0.slot_version, 0); - assert!(peer_2_db_chunks[i].1.len() == 0); + assert!(peer_2_db_chunks[i].1.is_empty()); } let peer_1_db_configs = peer_1.config.get_stacker_db_configs(); @@ -725,7 +725,7 @@ fn test_stackerdb_push_relayer() { for i in 0..10 { assert_eq!(peer_1_db_chunks[i].0.slot_id, i as u32); assert_eq!(peer_1_db_chunks[i].0.slot_version, 1); - assert!(peer_1_db_chunks[i].1.len() > 0); + assert!(!peer_1_db_chunks[i].1.is_empty()); } // verify that peer 2 and 3 did NOT get the data @@ -734,7 +734,7 @@ fn test_stackerdb_push_relayer() { for i in 0..10 { assert_eq!(peer_2_db_chunks[i].0.slot_id, i as u32); assert_eq!(peer_2_db_chunks[i].0.slot_version, 0); - assert!(peer_2_db_chunks[i].1.len() == 0); + assert!(peer_2_db_chunks[i].1.is_empty()); } let peer_3_db_chunks = load_stackerdb(&peer_3, idx_2); @@ -742,7 +742,7 @@ fn test_stackerdb_push_relayer() { for i in 0..10 { assert_eq!(peer_3_db_chunks[i].0.slot_id, i as u32); assert_eq!(peer_3_db_chunks[i].0.slot_version, 0); - assert!(peer_3_db_chunks[i].1.len() == 0); + assert!(peer_3_db_chunks[i].1.is_empty()); } let peer_1_db_configs = peer_1.config.get_stacker_db_configs(); @@ -921,7 +921,7 @@ fn test_stackerdb_push_relayer_late_chunks() { for i in 0..10 { assert_eq!(peer_1_db_chunks[i].0.slot_id, i as u32); assert_eq!(peer_1_db_chunks[i].0.slot_version, 1); - assert!(peer_1_db_chunks[i].1.len() > 0); + assert!(!peer_1_db_chunks[i].1.is_empty()); } // verify that peer 2 and 3 did NOT get the data @@ -930,7 +930,7 @@ fn test_stackerdb_push_relayer_late_chunks() { for i in 0..10 { assert_eq!(peer_2_db_chunks[i].0.slot_id, i as u32); assert_eq!(peer_2_db_chunks[i].0.slot_version, 0); - assert!(peer_2_db_chunks[i].1.len() == 0); + assert!(peer_2_db_chunks[i].1.is_empty()); } let peer_3_db_chunks = load_stackerdb(&peer_3, idx_2); @@ -938,7 +938,7 @@ fn test_stackerdb_push_relayer_late_chunks() { for i in 0..10 { assert_eq!(peer_3_db_chunks[i].0.slot_id, i as u32); assert_eq!(peer_3_db_chunks[i].0.slot_version, 0); - assert!(peer_3_db_chunks[i].1.len() == 0); + assert!(peer_3_db_chunks[i].1.is_empty()); } let peer_1_db_configs = peer_1.config.get_stacker_db_configs(); @@ -1124,7 +1124,7 @@ fn inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(push_only: bool, for j in 0..10 { assert_eq!(peer_db_chunks[j].0.slot_id, j as u32); assert_eq!(peer_db_chunks[j].0.slot_version, 1); - assert!(peer_db_chunks[j].1.len() > 0); + assert!(!peer_db_chunks[j].1.is_empty()); } } else { // everyone else gets nothing @@ -1136,7 +1136,7 @@ fn inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(push_only: bool, for j in 0..10 { assert_eq!(peer_db_chunks[j].0.slot_id, j as u32); assert_eq!(peer_db_chunks[j].0.slot_version, 0); - assert!(peer_db_chunks[j].1.len() == 0); + assert!(peer_db_chunks[j].1.is_empty()); } } diff --git a/stackslib/src/net/tests/convergence.rs b/stackslib/src/net/tests/convergence.rs index 8494f4ea46..627db94758 100644 --- a/stackslib/src/net/tests/convergence.rs +++ b/stackslib/src/net/tests/convergence.rs @@ -840,7 +840,7 @@ fn test_walk_inbound_line_15() { }) } -fn dump_peers(peers: &Vec) -> () { +fn dump_peers(peers: &Vec) { test_debug!("\n=== PEER DUMP ==="); for i in 0..peers.len() { let mut neighbor_index = vec![]; @@ -870,7 +870,7 @@ fn dump_peers(peers: &Vec) -> () { test_debug!("\n"); } -fn dump_peer_histograms(peers: &Vec) -> () { +fn dump_peer_histograms(peers: &Vec) { let mut outbound_hist: HashMap = HashMap::new(); let mut inbound_hist: HashMap = HashMap::new(); let mut all_hist: HashMap = HashMap::new(); @@ -933,7 +933,7 @@ fn dump_peer_histograms(peers: &Vec) -> () { test_debug!("\n"); } -fn run_topology_test(peers: &mut Vec) -> () { +fn run_topology_test(peers: &mut Vec) { run_topology_test_ex(peers, |_| false, false) } @@ -941,8 +941,7 @@ fn run_topology_test_ex( peers: &mut Vec, mut finished_check: F, use_finished_check: bool, -) -> () -where +) where F: FnMut(&Vec) -> bool, { let peer_count = peers.len(); diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index 2d53c89f9a..9c995f1f32 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -250,7 +250,7 @@ pub fn run_get_blocks_and_microblocks( mut done_func: D, ) -> Vec where - T: FnOnce(&mut Vec) -> (), + T: FnOnce(&mut Vec), F: FnOnce( usize, &mut Vec, @@ -259,7 +259,7 @@ where Option, Option>, )>, - P: FnMut(&mut Vec) -> (), + P: FnMut(&mut Vec), C: FnMut(&mut TestPeer) -> bool, D: FnMut(&mut Vec) -> bool, { diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index a479dad07a..7469d3c33b 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -102,7 +102,7 @@ impl NakamotoDownloadStateMachine { } } -impl<'a> NakamotoStagingBlocksConnRef<'a> { +impl NakamotoStagingBlocksConnRef<'_> { pub fn load_nakamoto_tenure( &self, tip: &StacksBlockId, @@ -474,8 +474,8 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .unwrap() .unwrap(); - assert!(unconfirmed_tenure.len() > 0); - assert!(last_confirmed_tenure.len() > 0); + assert!(!unconfirmed_tenure.is_empty()); + assert!(!last_confirmed_tenure.is_empty()); assert_eq!( unconfirmed_tenure.first().as_ref().unwrap().block_id(), @@ -1182,7 +1182,7 @@ fn test_tenure_start_end_from_inventory() { for (i, wt) in wanted_tenures.iter().enumerate() { if i >= (rc_len - 1).into() { // nothing here - assert!(available.get(&wt.tenure_id_consensus_hash).is_none()); + assert!(!available.contains_key(&wt.tenure_id_consensus_hash)); continue; } diff --git a/stackslib/src/net/tests/httpcore.rs b/stackslib/src/net/tests/httpcore.rs index 4bcf52605c..3aec8d5e5d 100644 --- a/stackslib/src/net/tests/httpcore.rs +++ b/stackslib/src/net/tests/httpcore.rs @@ -316,7 +316,7 @@ fn test_http_request_type_codec() { str::from_utf8(&expected_bytes).unwrap() ); - if expected_http_body.len() > 0 { + if !expected_http_body.is_empty() { expected_http_preamble.set_content_type(HttpContentType::Bytes); expected_http_preamble.set_content_length(expected_http_body.len() as u32) } @@ -767,11 +767,11 @@ fn test_http_response_type_codec() { match preamble { StacksHttpPreamble::Response(ref mut req) => { assert_eq!(req.headers.len(), 5); - assert!(req.headers.get("access-control-allow-headers").is_some()); - assert!(req.headers.get("access-control-allow-methods").is_some()); - assert!(req.headers.get("access-control-allow-origin").is_some()); - assert!(req.headers.get("server").is_some()); - assert!(req.headers.get("date").is_some()); + assert!(req.headers.contains_key("access-control-allow-headers")); + assert!(req.headers.contains_key("access-control-allow-methods")); + assert!(req.headers.contains_key("access-control-allow-origin")); + assert!(req.headers.contains_key("server")); + assert!(req.headers.contains_key("date")); req.headers.clear(); } StacksHttpPreamble::Request(_) => { diff --git a/stackslib/src/net/tests/inv/epoch2x.rs b/stackslib/src/net/tests/inv/epoch2x.rs index e31b6dc593..aed43bdcba 100644 --- a/stackslib/src/net/tests/inv/epoch2x.rs +++ b/stackslib/src/net/tests/inv/epoch2x.rs @@ -1248,7 +1248,7 @@ fn test_inv_sync_start_reward_cycle() { let mut peer_1 = TestPeer::new(peer_1_config); - let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; + let num_blocks = GETPOXINV_MAX_BITLEN * 2; for i in 0..num_blocks { let (burn_ops, stacks_block, microblocks) = peer_1.make_default_tenure(); peer_1.next_burnchain_block(burn_ops.clone()); @@ -1298,7 +1298,7 @@ fn test_inv_sync_check_peer_epoch2x_synced() { let mut peer_1 = TestPeer::new(peer_1_config); - let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; + let num_blocks = GETPOXINV_MAX_BITLEN * 2; for i in 0..num_blocks { let (burn_ops, stacks_block, microblocks) = peer_1.make_default_tenure(); peer_1.next_burnchain_block(burn_ops.clone()); @@ -1340,7 +1340,7 @@ fn test_sync_inv_2_peers_plain() { peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); - let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; + let num_blocks = GETPOXINV_MAX_BITLEN * 2; let first_stacks_block_height = { let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) @@ -1517,7 +1517,7 @@ fn test_sync_inv_2_peers_stale() { peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); - let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; + let num_blocks = GETPOXINV_MAX_BITLEN * 2; let first_stacks_block_height = { let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) @@ -1625,7 +1625,7 @@ fn test_sync_inv_2_peers_unstable() { peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); - let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; + let num_blocks = GETPOXINV_MAX_BITLEN * 2; let first_stacks_block_height = { let sn = @@ -1838,7 +1838,7 @@ fn test_sync_inv_2_peers_different_pox_vectors() { peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); - let num_blocks = (GETPOXINV_MAX_BITLEN * 3) as u64; + let num_blocks = GETPOXINV_MAX_BITLEN * 3; let first_stacks_block_height = { let sn = diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index 5f889cde3e..3a29d453ae 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -596,7 +596,7 @@ fn check_inv_state( tenure_inv.get(bit.try_into().unwrap()).unwrap_or(false) }; - let burn_block_height = (*tenure_rc as u64) * u64::from(rc_len) + (bit as u64); + let burn_block_height = *tenure_rc * u64::from(rc_len) + (bit as u64); if burn_block_height < nakamoto_start_burn_height { // inv doesn't cover epoch 2 assert!( @@ -912,7 +912,7 @@ fn test_nakamoto_inv_sync_state_machine() { .map(|e_id| *e_id) .collect(); - if event_ids.len() > 0 && other_event_ids.len() > 0 { + if !event_ids.is_empty() && !other_event_ids.is_empty() { break; } } @@ -938,7 +938,7 @@ fn test_nakamoto_inv_sync_state_machine() { loop { let _ = other_peer.step_with_ibd(false); let ev_ids: Vec<_> = other_peer.network.iter_peer_event_ids().collect(); - if ev_ids.len() == 0 { + if ev_ids.is_empty() { // disconnected panic!("Disconnected"); } @@ -1043,7 +1043,7 @@ fn test_nakamoto_inv_sync_across_epoch_change() { .map(|e_id| *e_id) .collect(); - if event_ids.len() > 0 && other_event_ids.len() > 0 { + if !event_ids.is_empty() && !other_event_ids.is_empty() { break; } } diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index d3f30aca19..558dddb63e 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -307,7 +307,7 @@ fn test_mempool_sync_2_peers() { // peer 2 has none of the old ones for tx in peer_2_mempool_txs { assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); - assert!(old_txs.get(&tx.tx.txid()).is_none()); + assert!(!old_txs.contains_key(&tx.tx.txid())); } } @@ -1144,7 +1144,7 @@ fn test_mempool_sync_2_peers_nakamoto_paginated() { .map(|e_id| *e_id) .collect(); - if event_ids.len() > 0 && other_event_ids.len() > 0 { + if !event_ids.is_empty() && !other_event_ids.is_empty() { break; } } diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 6729dbc4a8..3a07ed006c 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -352,10 +352,10 @@ impl NakamotoBootPlan { /// Make a peer and transition it into the Nakamoto epoch. /// The node needs to be stacking; otherwise, Nakamoto won't activate. - fn boot_nakamoto_peers<'a>( + fn boot_nakamoto_peers( mut self, - observer: Option<&'a TestEventObserver>, - ) -> (TestPeer<'a>, Vec) { + observer: Option<&TestEventObserver>, + ) -> (TestPeer<'_>, Vec>) { let mut peer_config = TestPeerConfig::new(&self.test_name, 0, 0); peer_config.network_id = self.network_id; peer_config.private_key = self.private_key.clone(); @@ -662,11 +662,11 @@ impl NakamotoBootPlan { debug!("========================\n\n"); } - pub fn boot_into_nakamoto_peers<'a>( + pub fn boot_into_nakamoto_peers( self, boot_plan: Vec, - observer: Option<&'a TestEventObserver>, - ) -> (TestPeer<'a>, Vec) { + observer: Option<&TestEventObserver>, + ) -> (TestPeer<'_>, Vec>) { let test_signers = self.test_signers.clone(); let pox_constants = self.pox_constants.clone(); let test_stackers = self.test_stackers.clone(); @@ -690,7 +690,7 @@ impl NakamotoBootPlan { match plan_tenure { NakamotoBootTenure::NoSortition(boot_steps) => { - assert!(boot_steps.len() > 0); + assert!(!boot_steps.is_empty()); // just extend the last sortition let (burn_ops, tenure_change_extend, miner_key) = peer.begin_nakamoto_tenure(TenureChangeCause::Extended); @@ -732,7 +732,7 @@ impl NakamotoBootPlan { match next_step { NakamotoBootStep::TenureExtend(transactions) => { - assert!(transactions.len() > 0); + assert!(!transactions.is_empty()); if let Some(last_block) = last_block_opt { let tenure_extension = tenure_change.extend( next_consensus_hash.clone(), @@ -749,7 +749,7 @@ impl NakamotoBootPlan { debug!("\n\nExtend current tenure in empty tenure {} (blocks so far: {}, blocks_since_last_tenure = {}, steps so far: {})\n\n", &next_consensus_hash, blocks_so_far.len(), blocks_since_last_tenure, i); } NakamotoBootStep::Block(transactions) => { - assert!(transactions.len() > 0); + assert!(!transactions.is_empty()); debug!("\n\nMake block {} with {} transactions in empty tenure {}\n\n", blocks_so_far.len(), transactions.len(), &next_consensus_hash); txs.extend_from_slice(&transactions[..]); num_expected_transactions += transactions.len(); @@ -789,7 +789,7 @@ impl NakamotoBootPlan { all_blocks.push(blocks); } NakamotoBootTenure::Sortition(boot_steps) => { - assert!(boot_steps.len() > 0); + assert!(!boot_steps.is_empty()); let (burn_ops, mut tenure_change, miner_key) = peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); let (burn_ht, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); @@ -833,7 +833,7 @@ impl NakamotoBootPlan { match next_step { NakamotoBootStep::TenureExtend(transactions) => { - assert!(transactions.len() > 0); + assert!(!transactions.is_empty()); if let Some(last_block) = last_block_opt { let tenure_extension = tenure_change.extend( consensus_hash.clone(), @@ -850,7 +850,7 @@ impl NakamotoBootPlan { debug!("\n\nExtend current tenure {} (blocks so far: {}, steps so far: {})\n\n", &consensus_hash, blocks_so_far.len(), i); } NakamotoBootStep::Block(transactions) => { - assert!(transactions.len() > 0); + assert!(!transactions.is_empty()); debug!("\n\nMake block {} with {} transactions in tenure {}\n\n", blocks_so_far.len(), transactions.len(), &consensus_hash); txs.extend_from_slice(&transactions[..]); num_expected_transactions += transactions.len(); @@ -1017,11 +1017,11 @@ impl NakamotoBootPlan { (peer, other_peers) } - pub fn boot_into_nakamoto_peer<'a>( + pub fn boot_into_nakamoto_peer( self, boot_plan: Vec, - observer: Option<&'a TestEventObserver>, - ) -> TestPeer<'a> { + observer: Option<&TestEventObserver>, + ) -> TestPeer<'_> { self.boot_into_nakamoto_peers(boot_plan, observer).0 } } diff --git a/stackslib/src/net/tests/neighbors.rs b/stackslib/src/net/tests/neighbors.rs index 6a1ef7a4e9..d1be0fdf70 100644 --- a/stackslib/src/net/tests/neighbors.rs +++ b/stackslib/src/net/tests/neighbors.rs @@ -581,7 +581,7 @@ fn test_step_walk_1_neighbor_bootstrapping() { assert_eq!(w.result.replaced_neighbors.len(), 0); // peer 2 never gets added to peer 1's frontier - assert!(w.frontier.get(&neighbor_2.addr).is_none()); + assert!(!w.frontier.contains_key(&neighbor_2.addr)); } None => {} }; @@ -597,7 +597,7 @@ fn test_step_walk_1_neighbor_bootstrapping() { i += 1; } - debug!("Completed walk round {} step(s)", i); + debug!("Completed walk round {i} step(s)"); // peer 1 contacted peer 2 let stats_1 = peer_1 @@ -673,7 +673,7 @@ fn test_step_walk_1_neighbor_behind() { assert_eq!(w.result.replaced_neighbors.len(), 0); // peer 1 never gets added to peer 2's frontier - assert!(w.frontier.get(&neighbor_1.addr).is_none()); + assert!(!w.frontier.contains_key(&neighbor_1.addr)); } None => {} }; diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index f4fc8d9eb8..1106721e38 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -819,7 +819,7 @@ fn http_rpc(peer_http: u16, request: StacksHttpRequest) -> Result { - if resp.len() == 0 { + if resp.is_empty() { test_debug!("Client did not receive any data"); return Err(net_error::PermanentlyDrained); } @@ -934,7 +934,7 @@ fn push_microblocks( ); let msg = StacksMessageType::Microblocks(MicroblocksData { index_anchor_block: StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_hash), - microblocks: microblocks, + microblocks, }); push_message(peer, dest, relay_hints, msg) } @@ -955,7 +955,7 @@ fn broadcast_microblocks( ); let msg = StacksMessageType::Microblocks(MicroblocksData { index_anchor_block: StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_hash), - microblocks: microblocks, + microblocks, }); broadcast_message(peer, relay_hints, msg) } @@ -1200,11 +1200,7 @@ fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( let original_block_data = original_blocks_and_microblocks.borrow(); let mut next_idx = idx.borrow_mut(); let data_to_push = { - if block_data.len() > 0 { - let (consensus_hash, block, microblocks) = - block_data[*next_idx].clone(); - Some((consensus_hash, block, microblocks)) - } else { + if block_data.is_empty() { // start over (can happen if a message gets // dropped due to a timeout) test_debug!("Reset block transmission (possible timeout)"); @@ -1213,6 +1209,10 @@ fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( let (consensus_hash, block, microblocks) = block_data[*next_idx].clone(); Some((consensus_hash, block, microblocks)) + } else { + let (consensus_hash, block, microblocks) = + block_data[*next_idx].clone(); + Some((consensus_hash, block, microblocks)) } }; @@ -1259,7 +1259,7 @@ fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( if pushed_block && pushed_microblock { block_data.remove(*next_idx); - if block_data.len() > 0 { + if !block_data.is_empty() { *next_idx = thread_rng().gen::() % block_data.len(); } *sent_blocks = false; @@ -2123,8 +2123,8 @@ fn test_get_blocks_and_microblocks_peers_broadcast() { let ((tip_consensus_hash, tip_block, _), idx) = { let block_data = blocks_and_microblocks.borrow(); - let idx = blocks_idx.borrow(); - (block_data[(*idx as usize).saturating_sub(1)].clone(), *idx) + let idx: usize = *blocks_idx.borrow(); + (block_data[idx.saturating_sub(1)].clone(), idx) }; if idx > 0 { @@ -2596,7 +2596,7 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { peers[1].network.pending_messages.iter() { debug!("Pending at {} is ({}, {})", *i, event_id, pending.len()); - if pending.len() >= 1 { + if !pending.is_empty() { update_sortition = true; } } @@ -2968,7 +2968,7 @@ fn process_new_blocks_rejects_problematic_asts() { let mut bad_block = bad_block.0; bad_block.txs.push(bad_tx.clone()); - let txid_vecs = bad_block + let txid_vecs: Vec<_> = bad_block .txs .iter() .map(|tx| tx.txid().as_bytes().to_vec()) @@ -3024,7 +3024,7 @@ fn process_new_blocks_rejects_problematic_asts() { bad_mblock.txs.push(bad_tx.clone()); // force it in anyway - let txid_vecs = bad_mblock + let txid_vecs: Vec<_> = bad_mblock .txs .iter() .map(|tx| tx.txid().as_bytes().to_vec()) diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index 606f1f3fb2..f52c59bfb5 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -138,7 +138,7 @@ impl ExitedPeer { self.mempool = Some(mempool); self.indexer = Some(indexer); - receipts_res.and_then(|receipts| Ok((net_result, receipts))) + receipts_res.map(|receipts| (net_result, receipts)) } } diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index 231e0a91af..e7f1c256a4 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -1149,7 +1149,7 @@ impl PeerNetwork { unsolicited: HashMap>, ) -> PendingMessages { unsolicited.into_iter().filter_map(|(event_id, messages)| { - if messages.len() == 0 { + if messages.is_empty() { // no messages for this event return None; } @@ -1256,7 +1256,7 @@ impl PeerNetwork { } true }); - messages.len() > 0 + !messages.is_empty() }); unsolicited } @@ -1283,7 +1283,7 @@ impl PeerNetwork { buffer: bool, ) -> HashMap<(usize, NeighborKey), Vec> { unsolicited.retain(|(event_id, neighbor_key), messages| { - if messages.len() == 0 { + if messages.is_empty() { // no messages for this node return false; } @@ -1319,7 +1319,7 @@ impl PeerNetwork { } true }); - messages.len() > 0 + !messages.is_empty() }); unsolicited } diff --git a/stackslib/src/util_lib/bloom.rs b/stackslib/src/util_lib/bloom.rs index d1632f0b14..bd9706fd59 100644 --- a/stackslib/src/util_lib/bloom.rs +++ b/stackslib/src/util_lib/bloom.rs @@ -373,7 +373,7 @@ impl BloomCounter { Ok(BloomCounter { hasher, table_name: table_name.to_string(), - num_bins: num_bins, + num_bins, num_hashes, counts_rowid: counts_rowid as u32, }) diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index 53f597daa2..0deb4c7154 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -162,15 +162,15 @@ impl From for Error { } pub trait FromRow { - fn from_row<'a>(row: &'a Row) -> Result; + fn from_row(row: &Row) -> Result; } pub trait FromColumn { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result; + fn from_column(row: &Row, column_name: &str) -> Result; } impl FromRow for u64 { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let x: i64 = row.get(0)?; if x < 0 { return Err(Error::ParseError); @@ -180,28 +180,28 @@ impl FromRow for u64 { } impl FromRow for u32 { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let x: u32 = row.get(0)?; Ok(x) } } impl FromRow for String { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let x: String = row.get(0)?; Ok(x) } } impl FromRow> for Vec { - fn from_row<'a>(row: &'a Row) -> Result, Error> { + fn from_row(row: &Row) -> Result, Error> { let x: Vec = row.get(0)?; Ok(x) } } impl FromColumn for u64 { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let x: i64 = row.get(column_name)?; if x < 0 { return Err(Error::ParseError); @@ -211,7 +211,7 @@ impl FromColumn for u64 { } impl FromRow for StacksAddress { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let addr_str: String = row.get(0)?; let addr = StacksAddress::from_string(&addr_str).ok_or(Error::ParseError)?; Ok(addr) @@ -219,7 +219,7 @@ impl FromRow for StacksAddress { } impl FromColumn> for u64 { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result, Error> { + fn from_column(row: &Row, column_name: &str) -> Result, Error> { let x: Option = row.get(column_name)?; match x { Some(x) => { @@ -234,31 +234,28 @@ impl FromColumn> for u64 { } impl FromRow for i64 { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let x: i64 = row.get(0)?; Ok(x) } } impl FromColumn for i64 { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let x: i64 = row.get(column_name)?; Ok(x) } } impl FromColumn for QualifiedContractIdentifier { - fn from_column<'a>( - row: &'a Row, - column_name: &str, - ) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let value: String = row.get(column_name)?; QualifiedContractIdentifier::parse(&value).map_err(|_| Error::ParseError) } } impl FromRow for bool { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let x: bool = row.get(0)?; Ok(x) } @@ -266,7 +263,7 @@ impl FromRow for bool { /// Make public keys loadable from a sqlite database impl FromColumn for Secp256k1PublicKey { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let pubkey_hex: String = row.get(column_name)?; let pubkey = Secp256k1PublicKey::from_hex(&pubkey_hex).map_err(|_e| Error::ParseError)?; Ok(pubkey) @@ -275,7 +272,7 @@ impl FromColumn for Secp256k1PublicKey { /// Make private keys loadable from a sqlite database impl FromColumn for Secp256k1PrivateKey { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let privkey_hex: String = row.get(column_name)?; let privkey = Secp256k1PrivateKey::from_hex(&privkey_hex).map_err(|_e| Error::ParseError)?; @@ -510,14 +507,14 @@ where let mut rows = stmt.query(sql_args)?; let mut row_data = vec![]; while let Some(row) = rows.next().map_err(|e| Error::SqliteError(e))? { - if row_data.len() > 0 { + if !row_data.is_empty() { return Err(Error::Overflow); } let i: i64 = row.get(0)?; row_data.push(i); } - if row_data.len() == 0 { + if row_data.is_empty() { return Err(Error::NotFoundError); } @@ -553,7 +550,7 @@ fn inner_sql_pragma( pub fn sql_vacuum(conn: &Connection) -> Result<(), Error> { conn.execute("VACUUM", NO_PARAMS) .map_err(Error::SqliteError) - .and_then(|_| Ok(())) + .map(|_| ()) } /// Returns true if the database table `table_name` exists in the active @@ -630,7 +627,7 @@ impl<'a, C, T: MarfTrieId> IndexDBConn<'a, C, T> { } } -impl<'a, C, T: MarfTrieId> Deref for IndexDBConn<'a, C, T> { +impl Deref for IndexDBConn<'_, C, T> { type Target = DBConn; fn deref(&self) -> &DBConn { self.conn() @@ -664,7 +661,7 @@ pub fn tx_busy_handler(run_count: i32) -> bool { /// Begin an immediate-mode transaction, and handle busy errors with exponential backoff. /// Handling busy errors when the tx begins is preferable to doing it when the tx commits, since /// then we don't have to worry about any extra rollback logic. -pub fn tx_begin_immediate<'a>(conn: &'a mut Connection) -> Result, Error> { +pub fn tx_begin_immediate(conn: &mut Connection) -> Result, Error> { tx_begin_immediate_sqlite(conn).map_err(Error::from) } @@ -672,7 +669,7 @@ pub fn tx_begin_immediate<'a>(conn: &'a mut Connection) -> Result, Erro /// Handling busy errors when the tx begins is preferable to doing it when the tx commits, since /// then we don't have to worry about any extra rollback logic. /// Sames as `tx_begin_immediate` except that it returns a rusqlite error. -pub fn tx_begin_immediate_sqlite<'a>(conn: &'a mut Connection) -> Result, sqlite_error> { +pub fn tx_begin_immediate_sqlite(conn: &mut Connection) -> Result, sqlite_error> { conn.busy_handler(Some(tx_busy_handler))?; let tx = Transaction::new(conn, TransactionBehavior::Immediate)?; update_lock_table(tx.deref()); @@ -681,6 +678,7 @@ pub fn tx_begin_immediate_sqlite<'a>(conn: &'a mut Connection) -> Result IndexDBTx<'a, C, T> { IndexDBTx { _index: Some(tx), block_linkage: None, - context: context, + context, } } @@ -944,7 +942,7 @@ impl<'a, C: Clone, T: MarfTrieId> IndexDBTx<'a, C, T> { } } -impl<'a, C: Clone, T: MarfTrieId> Drop for IndexDBTx<'a, C, T> { +impl Drop for IndexDBTx<'_, C, T> { fn drop(&mut self) { if let Some((ref parent, ref child)) = self.block_linkage { let index_tx = self diff --git a/stackslib/src/util_lib/mod.rs b/stackslib/src/util_lib/mod.rs index 83a7ab2a25..87031676db 100644 --- a/stackslib/src/util_lib/mod.rs +++ b/stackslib/src/util_lib/mod.rs @@ -16,7 +16,7 @@ pub mod test { pub fn with_timeout(timeout_secs: u64, test_func: F) where - F: FnOnce() -> () + std::marker::Send + 'static + panic::UnwindSafe, + F: FnOnce() + std::marker::Send + 'static + panic::UnwindSafe, { let (sx, rx) = sync_channel(1); diff --git a/stackslib/src/util_lib/signed_structured_data.rs b/stackslib/src/util_lib/signed_structured_data.rs index ac5c0224d8..14882c2fb9 100644 --- a/stackslib/src/util_lib/signed_structured_data.rs +++ b/stackslib/src/util_lib/signed_structured_data.rs @@ -216,7 +216,7 @@ pub mod pox4 { ); result .expect("FATAL: failed to execute contract call") - .expect_buff(32 as usize) + .expect_buff(32) .expect("FATAL: expected buff result") }) } diff --git a/stackslib/src/util_lib/strings.rs b/stackslib/src/util_lib/strings.rs index 0486e6bf81..d1fb48c86b 100644 --- a/stackslib/src/util_lib/strings.rs +++ b/stackslib/src/util_lib/strings.rs @@ -58,7 +58,7 @@ pub struct StacksString(Vec); pub struct VecDisplay<'a, T: fmt::Display>(pub &'a [T]); -impl<'a, T: fmt::Display> fmt::Display for VecDisplay<'a, T> { +impl fmt::Display for VecDisplay<'_, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "[")?; for (ix, val) in self.0.iter().enumerate() { @@ -139,7 +139,7 @@ impl StacksMessageCodec for UrlString { } // must be a valid block URL, or empty string - if self.as_bytes().len() > 0 { + if !self.as_bytes().is_empty() { let _ = self.parse_to_block_url()?; } @@ -172,7 +172,7 @@ impl StacksMessageCodec for UrlString { })?; // must be a valid block URL, or empty string - if url.len() > 0 { + if !url.is_empty() { let _ = url.parse_to_block_url()?; } Ok(url) @@ -207,7 +207,7 @@ impl StacksString { // This is 0x20 through 0x7e, inclusive, as well as '\t' and '\n' // TODO: DRY up with vm::representations for c in s.as_bytes().iter() { - if (*c < 0x20 && *c != ('\t' as u8) && *c != ('\n' as u8)) || (*c > 0x7e) { + if (*c < 0x20 && *c != b'\t' && *c != b'\n') || *c > 0x7e { return false; } } @@ -254,7 +254,7 @@ impl UrlString { ))); } - if url.username().len() > 0 || url.password().is_some() { + if !url.username().is_empty() || url.password().is_some() { return Err(codec_error::DeserializeError( "Invalid URL: must not contain a username/password".to_string(), )); diff --git a/stx-genesis/chainstate-test.txt b/stx-genesis/chainstate-test.txt index 614cf3d9f4..6eedf241d1 100644 --- a/stx-genesis/chainstate-test.txt +++ b/stx-genesis/chainstate-test.txt @@ -69,4 +69,5 @@ SM1ZH700J7CEDSEHM5AJ4C4MKKWNESTS35DD3SZM5,13888889,2267 SM260QHD6ZM2KKPBKZB8PFE5XWP0MHSKTD1B7BHYR,208333333,45467 SM260QHD6ZM2KKPBKZB8PFE5XWP0MHSKTD1B7BHYR,208333333,6587 SM260QHD6ZM2KKPBKZB8PFE5XWP0MHSKTD1B7BHYR,208333333,2267 +SP2CTPPV8BHBVSQR727A3MK00ZD85RNY903KAG9F3,12345678,35 -----END STX VESTING----- \ No newline at end of file diff --git a/stx-genesis/chainstate-test.txt.sha256 b/stx-genesis/chainstate-test.txt.sha256 index 56782ae494..69ac95c254 100644 --- a/stx-genesis/chainstate-test.txt.sha256 +++ b/stx-genesis/chainstate-test.txt.sha256 @@ -1 +1 @@ -014402b47d53b0716402c172fa746adf308b03a826ebea91944a5eb6a304a823 \ No newline at end of file +088c3caea982a8f6f74dda48ec5f06f51f7605def9760a971b1acd763ee6b7cf \ No newline at end of file diff --git a/stx-genesis/src/lib.rs b/stx-genesis/src/lib.rs index 883eb8302b..27eba59e16 100644 --- a/stx-genesis/src/lib.rs +++ b/stx-genesis/src/lib.rs @@ -212,10 +212,10 @@ mod tests { #[test] fn test_names_read() { for name in GenesisData::new(false).read_names() { - assert!(name.owner.len() > 0); + assert!(!name.owner.is_empty()); } for name in GenesisData::new(true).read_names() { - assert!(name.owner.len() > 0); + assert!(!name.owner.is_empty()); } } diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 0c68d22ee7..3d253c8b89 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -14,7 +14,6 @@ serde_derive = "1" serde_json = { version = "1.0", features = ["arbitrary_precision", "raw_value"] } stacks = { package = "stackslib", path = "../../stackslib" } stx-genesis = { path = "../../stx-genesis"} -toml = "0.5.6" base64 = "0.12.0" backtrace = "0.3.50" libc = "0.2.151" @@ -38,7 +37,6 @@ thiserror = { workspace = true } tikv-jemallocator = {workspace = true} [dev-dependencies] -ring = "0.16.19" warp = "0.3.5" tokio = "1.15" reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls", "rustls-tls"] } diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 727483886e..f3aaa95ab5 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -48,6 +48,12 @@ use stacks::chainstate::burn::Opcodes; use stacks::chainstate::coordinator::comm::CoordinatorChannels; #[cfg(test)] use stacks::chainstate::stacks::address::PoxAddress; +use stacks::config::BurnchainConfig; +#[cfg(test)] +use stacks::config::{ + OP_TX_ANY_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, OP_TX_PRE_STACKS_ESTIM_SIZE, + OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, +}; use stacks::core::{EpochList, StacksEpochId}; use stacks::monitoring::{increment_btc_blocks_received_counter, increment_btc_ops_sent_counter}; use stacks::net::http::{HttpRequestContents, HttpResponsePayload}; @@ -74,12 +80,6 @@ use url::Url; use super::super::operations::BurnchainOpSigner; use super::super::Config; use super::{BurnchainController, BurnchainTip, Error as BurnchainControllerError}; -use crate::config::BurnchainConfig; -#[cfg(test)] -use crate::config::{ - OP_TX_ANY_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, OP_TX_PRE_STACKS_ESTIM_SIZE, - OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, -}; /// The number of bitcoin blocks that can have /// passed since the UTXO cache was last refreshed before @@ -2806,13 +2806,13 @@ mod tests { use std::io::Write; use stacks::burnchains::BurnchainSigner; + use stacks::config::DEFAULT_SATS_PER_VB; use stacks_common::deps_common::bitcoin::blockdata::script::Builder; use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, VRFSeed}; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use super::*; - use crate::config::DEFAULT_SATS_PER_VB; #[test] fn test_get_satoshis_per_byte() { diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 4d6eec8922..da1668cdd2 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -26,6 +26,8 @@ use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; use clarity::vm::types::{AssetIdentifier, QualifiedContractIdentifier, Value}; +#[cfg(any(test, feature = "testing"))] +use lazy_static::lazy_static; use rand::Rng; use rusqlite::{params, Connection}; use serde_json::json; @@ -49,6 +51,7 @@ use stacks::chainstate::stacks::miner::TransactionEvent; use stacks::chainstate::stacks::{ StacksBlock, StacksMicroblock, StacksTransaction, TransactionPayload, }; +use stacks::config::{EventKeyType, EventObserverConfig}; use stacks::core::mempool::{MemPoolDropReason, MemPoolEventDispatcher, ProposalCallbackReceiver}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::{ @@ -59,6 +62,8 @@ use stacks::net::http::HttpRequestContents; use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; use stacks::net::stackerdb::StackerDBEventDispatcher; use stacks::util::hash::to_hex; +#[cfg(any(test, feature = "testing"))] +use stacks::util::tests::TestFlag; use stacks::util_lib::db::Error as db_error; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; @@ -68,7 +73,11 @@ use stacks_common::util::hash::{bytes_to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; use url::Url; -use super::config::{EventKeyType, EventObserverConfig}; +#[cfg(any(test, feature = "testing"))] +lazy_static! { + /// Do not announce a signed/mined block to the network when set to true. + pub static ref TEST_SKIP_BLOCK_ANNOUNCEMENT: TestFlag = TestFlag::default(); +} #[derive(Debug, Clone)] struct EventObserver { @@ -153,6 +162,7 @@ pub struct MinedNakamotoBlockEvent { pub block_size: u64, pub cost: ExecutionCost, pub miner_signature: MessageSignature, + pub miner_signature_hash: Sha512Trunc256Sum, pub signer_signature_hash: Sha512Trunc256Sum, pub tx_events: Vec, pub signer_bitvec: String, @@ -943,9 +953,14 @@ impl ProposalCallbackReceiver for ProposalCallbackHandler { } impl MemPoolEventDispatcher for EventDispatcher { - fn mempool_txs_dropped(&self, txids: Vec, reason: MemPoolDropReason) { + fn mempool_txs_dropped( + &self, + txids: Vec, + new_txid: Option, + reason: MemPoolDropReason, + ) { if !txids.is_empty() { - self.process_dropped_mempool_txs(txids, reason) + self.process_dropped_mempool_txs(txids, new_txid, reason) } } @@ -1299,6 +1314,11 @@ impl EventDispatcher { let mature_rewards = serde_json::Value::Array(mature_rewards_vec); + #[cfg(any(test, feature = "testing"))] + if test_skip_block_announcement(&block) { + return; + } + for (observer_id, filtered_events_ids) in dispatch_matrix.iter().enumerate() { let filtered_events: Vec<_> = filtered_events_ids .iter() @@ -1514,6 +1534,7 @@ impl EventDispatcher { cost: consumed.clone(), tx_events, miner_signature: block.header.miner_signature, + miner_signature_hash: block.header.miner_signature_hash(), signer_signature_hash: block.header.signer_signature_hash(), signer_signature: block.header.signer_signature.clone(), signer_bitvec, @@ -1568,7 +1589,12 @@ impl EventDispatcher { } } - pub fn process_dropped_mempool_txs(&self, txs: Vec, reason: MemPoolDropReason) { + pub fn process_dropped_mempool_txs( + &self, + txs: Vec, + new_txid: Option, + reason: MemPoolDropReason, + ) { // lazily assemble payload only if we have observers let interested_observers = self.filter_observers(&self.mempool_observers_lookup, true); @@ -1581,10 +1607,22 @@ impl EventDispatcher { .map(|tx| serde_json::Value::String(format!("0x{tx}"))) .collect(); - let payload = json!({ - "dropped_txids": serde_json::Value::Array(dropped_txids), - "reason": reason.to_string(), - }); + let payload = match new_txid { + Some(id) => { + json!({ + "dropped_txids": serde_json::Value::Array(dropped_txids), + "reason": reason.to_string(), + "new_txid": format!("0x{}", &id), + }) + } + None => { + json!({ + "dropped_txids": serde_json::Value::Array(dropped_txids), + "reason": reason.to_string(), + "new_txid": null, + }) + } + }; for observer in interested_observers.iter() { observer.send_dropped_mempool_txs(&payload); @@ -1695,6 +1733,18 @@ impl EventDispatcher { } } +#[cfg(any(test, feature = "testing"))] +fn test_skip_block_announcement(block: &StacksBlockEventData) -> bool { + if TEST_SKIP_BLOCK_ANNOUNCEMENT.get() { + warn!( + "Skipping new block announcement due to testing directive"; + "block_hash" => %block.block_hash + ); + return true; + } + false +} + #[cfg(test)] mod test { use std::net::TcpListener; diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index c285c6a168..2a9a601723 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -10,10 +10,10 @@ use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::MinerStatus; +use stacks::config::MinerConfig; use stacks::net::NetworkResult; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, ConsensusHash}; -use crate::config::MinerConfig; use crate::neon::Counters; use crate::neon_node::LeaderKeyRegistrationState; use crate::run_loop::RegisteredKey; diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index 4fa1c5e5a7..bd2a8aed03 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -16,8 +16,6 @@ use stacks_common::util::hash::hex_bytes; pub mod monitoring; pub mod burnchains; -pub mod chain_data; -pub mod config; pub mod event_dispatcher; pub mod genesis_data; pub mod globals; @@ -41,19 +39,19 @@ use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvi use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::blocks::DummyEventDispatcher; use stacks::chainstate::stacks::db::StacksChainState; +use stacks::config::chain_data::MinerStats; +pub use stacks::config::{Config, ConfigFile}; #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] use tikv_jemallocator::Jemalloc; pub use self::burnchains::{ BitcoinRegtestController, BurnchainController, BurnchainTip, MocknetController, }; -pub use self::config::{Config, ConfigFile}; pub use self::event_dispatcher::EventDispatcher; pub use self::keychain::Keychain; pub use self::node::{ChainTip, Node}; pub use self::run_loop::{helium, neon}; pub use self::tenure::Tenure; -use crate::chain_data::MinerStats; use crate::neon_node::{BlockMinerThread, TipCandidate}; use crate::run_loop::boot_nakamoto; @@ -473,7 +471,7 @@ testnet\t\tStart a node that will join and stream blocks from the public testnet start\t\tStart a node with a config of your own. Can be used for joining a network, starting new chain, etc. \t\tArguments: -\t\t --config: path of the config (such as https://github.com/blockstack/stacks-blockchain/blob/master/testnet/stacks-node/conf/testnet-follower-conf.toml). +\t\t --config: path of the config (such as https://github.com/blockstack/stacks-blockchain/blob/master/sample/conf/testnet-follower-conf.toml). \t\tExample: \t\t stacks-node start --config /path/to/config.toml diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 6a4ea39b60..d9edf97e90 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -139,7 +139,7 @@ pub struct BlockMinerThread { burnchain: Burnchain, /// Last block mined last_block_mined: Option, - /// Number of blocks mined since a tenure change/extend + /// Number of blocks mined since a tenure change/extend was attempted mined_blocks: u64, /// Copy of the node's registered VRF key registered_key: RegisteredKey, @@ -160,6 +160,8 @@ pub struct BlockMinerThread { /// Handle to the p2p thread for block broadcast p2p_handle: NetworkHandle, signer_set_cache: Option, + /// The time at which tenure change/extend was attempted + tenure_change_time: Instant, } impl BlockMinerThread { @@ -187,6 +189,7 @@ impl BlockMinerThread { reason, p2p_handle: rt.get_p2p_handle(), signer_set_cache: None, + tenure_change_time: Instant::now(), } } @@ -1186,7 +1189,9 @@ impl BlockMinerThread { if self.last_block_mined.is_some() { // Check if we can extend the current tenure let tenure_extend_timestamp = coordinator.get_tenure_extend_timestamp(); - if get_epoch_time_secs() <= tenure_extend_timestamp { + if get_epoch_time_secs() <= tenure_extend_timestamp + && self.tenure_change_time.elapsed() <= self.config.miner.tenure_timeout + { return Ok(NakamotoTenureInfo { coinbase_tx: None, tenure_change_tx: None, @@ -1195,6 +1200,8 @@ impl BlockMinerThread { info!("Miner: Time-based tenure extend"; "current_timestamp" => get_epoch_time_secs(), "tenure_extend_timestamp" => tenure_extend_timestamp, + "tenure_change_time_elapsed" => self.tenure_change_time.elapsed().as_secs(), + "tenure_timeout_secs" => self.config.miner.tenure_timeout.as_secs(), ); self.tenure_extend_reset(); } @@ -1265,6 +1272,7 @@ impl BlockMinerThread { } fn tenure_extend_reset(&mut self) { + self.tenure_change_time = Instant::now(); self.reason = MinerReason::Extended { burn_view_consensus_hash: self.burn_block.consensus_hash, }; diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 00c21ec003..834c59fa95 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -35,16 +35,16 @@ use stacks::types::PublicKey; use stacks::util::get_epoch_time_secs; use stacks::util::hash::{MerkleHashFunc, Sha512Trunc256Sum}; use stacks::util::secp256k1::MessageSignature; +#[cfg(test)] +use stacks_common::util::tests::TestFlag; use super::Error as NakamotoNodeError; use crate::event_dispatcher::StackerDBChannel; -#[cfg(test)] -use crate::neon::TestFlag; #[cfg(test)] /// Fault injection flag to prevent the miner from seeing enough signer signatures. /// Used to test that the signers will broadcast a block if it gets enough signatures -pub static TEST_IGNORE_SIGNERS: LazyLock = LazyLock::new(TestFlag::default); +pub static TEST_IGNORE_SIGNERS: LazyLock> = LazyLock::new(TestFlag::default); /// How long should the coordinator poll on the event receiver before /// waking up to check timeouts? diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index c74ce3d878..2d4dc7fadd 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -187,6 +187,8 @@ use stacks::chainstate::stacks::{ StacksMicroblock, StacksPublicKey, StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionPayload, TransactionVersion, }; +use stacks::config::chain_data::MinerStats; +use stacks::config::NodeConfig; use stacks::core::mempool::MemPoolDB; use stacks::core::{EpochList, FIRST_BURNCHAIN_CONSENSUS_HASH, STACKS_EPOCH_3_0_MARKER}; use stacks::cost_estimates::metrics::{CostMetric, UnitMetric}; @@ -220,8 +222,6 @@ use crate::burnchains::bitcoin_regtest_controller::{ addr2str, burnchain_params_from_config, BitcoinRegtestController, OngoingBlockCommit, }; use crate::burnchains::{make_bitcoin_indexer, Error as BurnchainControllerError}; -use crate::chain_data::MinerStats; -use crate::config::NodeConfig; use crate::globals::{NeonGlobals as Globals, RelayerDirective}; use crate::nakamoto_node::signer_coordinator::SignerCoordinator; use crate::run_loop::neon::RunLoop; diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index 648c6d7470..171ebcb2cb 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -22,9 +22,8 @@ use std::{fs, thread}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::coordinator::comm::CoordinatorChannels; -use stacks::core::StacksEpochExtension; use stacks::net::p2p::PeerNetwork; -use stacks_common::types::{StacksEpoch, StacksEpochId}; +use stacks_common::types::StacksEpochId; use crate::event_dispatcher::EventDispatcher; use crate::globals::NeonGlobals; @@ -233,10 +232,7 @@ impl BootRunLoop { fn reached_epoch_30_transition(config: &Config) -> Result { let burn_height = Self::get_burn_height(config)?; - let epochs = StacksEpoch::get_epochs( - config.burnchain.get_bitcoin_network().1, - config.burnchain.epochs.as_ref(), - ); + let epochs = config.burnchain.get_epoch_list(); let epoch_3 = epochs .get(StacksEpochId::Epoch30) .ok_or("No Epoch-3.0 defined")?; diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 16f5a12b2d..335fb325d8 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -319,6 +319,7 @@ impl RunLoop { let mut fee_estimator = moved_config.make_fee_estimator(); let coord_config = ChainsCoordinatorConfig { + assume_present_anchor_blocks: moved_config.node.assume_present_anchor_blocks, always_use_affirmation_maps: moved_config.node.always_use_affirmation_maps, require_affirmed_anchor_blocks: moved_config .node diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index b2171b4e8b..4ecc84b73b 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -21,6 +21,8 @@ use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; +#[cfg(test)] +use stacks::util::tests::TestFlag; use stacks::util_lib::db::Error as db_error; use stacks_common::deps_common::ctrlc as termination; use stacks_common::deps_common::ctrlc::SignalId; @@ -94,30 +96,6 @@ impl std::ops::Deref for RunLoopCounter { } } -#[cfg(test)] -#[derive(Clone)] -pub struct TestFlag(pub Arc>>); - -#[cfg(test)] -impl Default for TestFlag { - fn default() -> Self { - Self(Arc::new(std::sync::Mutex::new(None))) - } -} - -#[cfg(test)] -impl TestFlag { - /// Set the test flag to the given value - pub fn set(&self, value: bool) { - *self.0.lock().unwrap() = Some(value); - } - - /// Get the test flag value. Defaults to false if the flag is not set. - pub fn get(&self) -> bool { - self.0.lock().unwrap().unwrap_or(false) - } -} - #[derive(Clone, Default)] pub struct Counters { pub blocks_processed: RunLoopCounter, @@ -135,7 +113,7 @@ pub struct Counters { pub naka_signer_pushed_blocks: RunLoopCounter, #[cfg(test)] - pub naka_skip_commit_op: TestFlag, + pub naka_skip_commit_op: TestFlag, } impl Counters { @@ -637,6 +615,7 @@ impl RunLoop { let mut fee_estimator = moved_config.make_fee_estimator(); let coord_config = ChainsCoordinatorConfig { + assume_present_anchor_blocks: moved_config.node.assume_present_anchor_blocks, always_use_affirmation_maps: moved_config.node.always_use_affirmation_maps, require_affirmed_anchor_blocks: moved_config .node @@ -1168,19 +1147,8 @@ impl RunLoop { let mut sortition_db_height = rc_aligned_height; let mut burnchain_height = sortition_db_height; - let mut num_sortitions_in_last_cycle = 1; // prepare to fetch the first reward cycle! - let mut target_burnchain_block_height = cmp::min( - burnchain_config.reward_cycle_to_block_height( - burnchain_config - .block_height_to_reward_cycle(burnchain_height) - .expect("BUG: block height is not in a reward cycle") - + 1, - ), - burnchain.get_headers_height() - 1, - ); - debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}"); let mut last_tenure_sortition_height = 0; @@ -1208,17 +1176,13 @@ impl RunLoop { let remote_chain_height = burnchain.get_headers_height() - 1; - // wait for the p2p state-machine to do at least one pass - debug!("Runloop: Wait until Stacks block downloads reach a quiescent state before processing more burnchain blocks"; "remote_chain_height" => remote_chain_height, "local_chain_height" => burnchain_height); - - // wait until it's okay to process the next reward cycle's sortitions - let ibd = match self.get_pox_watchdog().pox_sync_wait( + // wait until it's okay to process the next reward cycle's sortitions. + let (ibd, target_burnchain_block_height) = match self.get_pox_watchdog().pox_sync_wait( &burnchain_config, &burnchain_tip, remote_chain_height, - num_sortitions_in_last_cycle, ) { - Ok(ibd) => ibd, + Ok(x) => x, Err(e) => { debug!("Runloop: PoX sync wait routine aborted: {e:?}"); continue; @@ -1232,9 +1196,6 @@ impl RunLoop { 0.0 }; - // will recalculate this in the following loop - num_sortitions_in_last_cycle = 0; - // Download each burnchain block and process their sortitions. This, in turn, will // cause the node's p2p and relayer threads to go fetch and download Stacks blocks and // process them. This loop runs for one reward cycle, so that the next pass of the @@ -1282,8 +1243,6 @@ impl RunLoop { "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}" ); - let mut sort_count = 0; - debug!("Runloop: block mining until we process all sortitions"); signal_mining_blocked(globals.get_miner_status()); @@ -1301,9 +1260,6 @@ impl RunLoop { "Failed to find block in fork processed by burnchain indexer", ) }; - if block.sortition { - sort_count += 1; - } let sortition_id = &block.sortition_id; @@ -1350,9 +1306,8 @@ impl RunLoop { debug!("Runloop: enable miner after processing sortitions"); signal_mining_ready(globals.get_miner_status()); - num_sortitions_in_last_cycle = sort_count; debug!( - "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height}); {num_sortitions_in_last_cycle} sortitions" + "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height})" ); sortition_db_height = next_sortition_height; @@ -1371,22 +1326,6 @@ impl RunLoop { } } - // advance one reward cycle at a time. - // If we're still downloading, then this is simply target_burnchain_block_height + reward_cycle_len. - // Otherwise, this is burnchain_tip + reward_cycle_len - let next_target_burnchain_block_height = cmp::min( - burnchain_config.reward_cycle_to_block_height( - burnchain_config - .block_height_to_reward_cycle(target_burnchain_block_height) - .expect("FATAL: burnchain height before system start") - + 1, - ), - remote_chain_height, - ); - - debug!("Runloop: Advance target burnchain block height from {target_burnchain_block_height} to {next_target_burnchain_block_height} (sortition height {sortition_db_height})"); - target_burnchain_block_height = next_target_burnchain_block_height; - if sortition_db_height >= burnchain_height && !ibd { let canonical_stacks_tip_height = SortitionDB::get_canonical_burn_chain_tip(burnchain.sortdb_ref().conn()) diff --git a/testnet/stacks-node/src/syncctl.rs b/testnet/stacks-node/src/syncctl.rs index 395d829c8f..488234d21d 100644 --- a/testnet/stacks-node/src/syncctl.rs +++ b/testnet/stacks-node/src/syncctl.rs @@ -1,20 +1,28 @@ -use std::collections::VecDeque; +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::Arc; use stacks::burnchains::{Burnchain, Error as burnchain_error}; -use stacks::chainstate::stacks::db::StacksChainState; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use crate::burnchains::BurnchainTip; use crate::Config; -// amount of time to wait for an inv or download sync to complete. -// These _really should_ complete before the PoX sync watchdog permits processing the next reward -// cycle, so this number is intentionally high (like, there's something really wrong with your -// network if your node is actualy waiting a day in-between reward cycles). -const SYNC_WAIT_SECS: u64 = 24 * 3600; - #[derive(Clone)] pub struct PoxSyncWatchdogComms { /// how many passes in the p2p state machine have taken place since startup? @@ -56,22 +64,6 @@ impl PoxSyncWatchdogComms { self.last_ibd.load(Ordering::SeqCst) } - /// Wait for at least one inv-sync state-machine passes - pub fn wait_for_inv_sync_pass(&self, timeout: u64) -> Result { - let current = self.get_inv_sync_passes(); - - let now = get_epoch_time_secs(); - while current >= self.get_inv_sync_passes() { - if now + timeout < get_epoch_time_secs() { - debug!("PoX watchdog comms: timed out waiting for one inv-sync pass"); - return Ok(false); - } - self.interruptable_sleep(1)?; - std::hint::spin_loop(); - } - Ok(true) - } - fn interruptable_sleep(&self, secs: u64) -> Result<(), burnchain_error> { let deadline = secs + get_epoch_time_secs(); while get_epoch_time_secs() < deadline { @@ -83,21 +75,6 @@ impl PoxSyncWatchdogComms { Ok(()) } - pub fn wait_for_download_pass(&self, timeout: u64) -> Result { - let current = self.get_download_passes(); - - let now = get_epoch_time_secs(); - while current >= self.get_download_passes() { - if now + timeout < get_epoch_time_secs() { - debug!("PoX watchdog comms: timed out waiting for one download pass"); - return Ok(false); - } - self.interruptable_sleep(1)?; - std::hint::spin_loop(); - } - Ok(true) - } - pub fn should_keep_running(&self) -> bool { self.should_keep_running.load(Ordering::SeqCst) } @@ -124,82 +101,25 @@ impl PoxSyncWatchdogComms { /// unless it's reasonably sure that it has processed all Stacks blocks for this reward cycle. /// This struct monitors the Stacks chainstate to make this determination. pub struct PoxSyncWatchdog { - /// number of attachable but unprocessed staging blocks over time - new_attachable_blocks: VecDeque, - /// number of newly-processed staging blocks over time - new_processed_blocks: VecDeque, - /// last time we asked for attachable blocks - last_attachable_query: u64, - /// last time we asked for processed blocks - last_processed_query: u64, - /// number of samples to take - max_samples: u64, - /// maximum number of blocks to count per query (affects performance!) - max_staging: u64, - /// when did we first start watching? - watch_start_ts: u64, - /// when did we first see a flatline in block-processing rate? - last_block_processed_ts: u64, - /// estimated time for a block to get downloaded. Used to infer how long to wait for the first - /// blocks to show up when waiting for this reward cycle. - estimated_block_download_time: f64, - /// estimated time for a block to get processed -- from when it shows up as attachable to when - /// it shows up as processed. Used to infer how long to wait for the last block to get - /// processed before unblocking burnchain sync for the next reward cycle. - estimated_block_process_time: f64, - /// time between burnchain syncs in stead state + /// time between burnchain syncs in steady state steady_state_burnchain_sync_interval: u64, - /// when to re-sync under steady state - steady_state_resync_ts: u64, - /// chainstate handle - chainstate: StacksChainState, /// handle to relayer thread that informs the watchdog when the P2P state-machine does stuff relayer_comms: PoxSyncWatchdogComms, /// should this sync watchdog always download? used in integration tests. unconditionally_download: bool, } -const PER_SAMPLE_WAIT_MS: u64 = 1000; - impl PoxSyncWatchdog { pub fn new( config: &Config, watchdog_comms: PoxSyncWatchdogComms, ) -> Result { - let mainnet = config.is_mainnet(); - let chain_id = config.burnchain.chain_id; - let chainstate_path = config.get_chainstate_path_str(); let burnchain_poll_time = config.burnchain.poll_time_secs; - let download_timeout = config.connection_options.timeout; - let max_samples = config.node.pox_sync_sample_secs; let unconditionally_download = config.node.pox_sync_sample_secs == 0; - let marf_opts = config.node.get_marf_opts(); - - let (chainstate, _) = - match StacksChainState::open(mainnet, chain_id, &chainstate_path, Some(marf_opts)) { - Ok(cs) => cs, - Err(e) => { - return Err(format!( - "Failed to open chainstate at '{chainstate_path}': {e:?}" - )); - } - }; Ok(PoxSyncWatchdog { unconditionally_download, - new_attachable_blocks: VecDeque::new(), - new_processed_blocks: VecDeque::new(), - last_attachable_query: 0, - last_processed_query: 0, - max_samples, - max_staging: 10, - watch_start_ts: 0, - last_block_processed_ts: 0, - estimated_block_download_time: download_timeout as f64, - estimated_block_process_time: 5.0, steady_state_burnchain_sync_interval: burnchain_poll_time, - steady_state_resync_ts: 0, - chainstate, relayer_comms: watchdog_comms, }) } @@ -208,39 +128,9 @@ impl PoxSyncWatchdog { self.relayer_comms.clone() } - /// How many recently-added Stacks blocks are in an attachable state, up to $max_staging? - fn count_attachable_stacks_blocks(&mut self) -> Result { - // number of staging blocks that have arrived since the last sortition - let cnt = StacksChainState::count_attachable_staging_blocks( - self.chainstate.db(), - self.max_staging, - self.last_attachable_query, - ) - .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; - - self.last_attachable_query = get_epoch_time_secs(); - Ok(cnt) - } - - /// How many recently-processed Stacks blocks are there, up to $max_staging? - /// ($max_staging is necessary to limit the runtime of this method, since the underlying SQL - /// uses COUNT(*), which in Sqlite is a _O(n)_ operation for _n_ rows) - fn count_processed_stacks_blocks(&mut self) -> Result { - // number of staging blocks that have arrived since the last sortition - let cnt = StacksChainState::count_processed_staging_blocks( - self.chainstate.db(), - self.max_staging, - self.last_processed_query, - ) - .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; - - self.last_processed_query = get_epoch_time_secs(); - Ok(cnt) - } - /// Are we in the initial burnchain block download? i.e. is the burn tip snapshot far enough away /// from the burnchain height that we should be eagerly downloading snapshots? - pub fn infer_initial_burnchain_block_download( + fn infer_initial_burnchain_block_download( burnchain: &Burnchain, last_processed_height: u64, burnchain_height: u64, @@ -261,182 +151,23 @@ impl PoxSyncWatchdog { ibd } - /// Calculate the first derivative of a list of points - fn derivative(sample_list: &VecDeque) -> Vec { - let mut deltas = vec![]; - let mut prev = 0; - for (i, sample) in sample_list.iter().enumerate() { - if i == 0 { - prev = *sample; - continue; - } - let delta = *sample - prev; - prev = *sample; - deltas.push(delta); - } - deltas - } - - /// Is a derivative approximately flat, with a maximum absolute deviation from 0? - /// Return whether or not the sample is mostly flat, and how many points were over the given - /// error bar in either direction. - fn is_mostly_flat(deriv: &[i64], error: i64) -> (bool, usize) { - let mut total_deviates = 0; - let mut ret = true; - for d in deriv.iter() { - if d.abs() > error { - total_deviates += 1; - ret = false; - } - } - (ret, total_deviates) - } - - /// low and high pass filter average -- take average without the smallest and largest values - fn hilo_filter_avg(samples: &[i64]) -> f64 { - // take average with low and high pass - let mut min = i64::MAX; - let mut max = i64::MIN; - for s in samples.iter() { - if *s < 0 { - // nonsensical result (e.g. due to clock drift?) - continue; - } - if *s < min { - min = *s; - } - if *s > max { - max = *s; - } - } - - let mut count = 0; - let mut sum = 0; - for s in samples.iter() { - if *s < 0 { - // nonsensical result - continue; - } - if *s == min { - continue; - } - if *s == max { - continue; - } - count += 1; - sum += *s; - } - - if count == 0 { - // no viable samples - 1.0 - } else { - (sum as f64) / (count as f64) - } - } - - /// estimate how long a block remains in an unprocessed state - fn estimate_block_process_time( - chainstate: &StacksChainState, - burnchain: &Burnchain, - tip_height: u64, - ) -> f64 { - let this_reward_cycle = burnchain - .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); - let prev_reward_cycle = this_reward_cycle.saturating_sub(1); - - let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); - let end_height = burnchain.reward_cycle_to_block_height(this_reward_cycle); - - if this_reward_cycle > 0 { - assert!(start_height < end_height); - } else { - // no samples yet - return 1.0; - } - - let block_wait_times = - StacksChainState::measure_block_wait_time(chainstate.db(), start_height, end_height) - .expect("BUG: failed to query chainstate block-processing times"); - - PoxSyncWatchdog::hilo_filter_avg(&block_wait_times) - } - - /// estimate how long a block takes to download - fn estimate_block_download_time( - chainstate: &StacksChainState, - burnchain: &Burnchain, - tip_height: u64, - ) -> f64 { - let this_reward_cycle = burnchain - .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); - let prev_reward_cycle = this_reward_cycle.saturating_sub(1); - - let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); - let end_height = burnchain.reward_cycle_to_block_height(this_reward_cycle); - - if this_reward_cycle > 0 { - assert!(start_height < end_height); - } else { - // no samples yet - return 1.0; - } - - let block_download_times = StacksChainState::measure_block_download_time( - chainstate.db(), - start_height, - end_height, - ) - .expect("BUG: failed to query chainstate block-download times"); - - PoxSyncWatchdog::hilo_filter_avg(&block_download_times) - } - - /// Reset internal state. Performed when it's okay to begin syncing the burnchain. - /// Updates estimate for block-processing time and block-downloading time. - fn reset(&mut self, burnchain: &Burnchain, tip_height: u64) { - // find the average (with low/high pass filter) time a block spends in the DB without being - // processed, during this reward cycle - self.estimated_block_process_time = - PoxSyncWatchdog::estimate_block_process_time(&self.chainstate, burnchain, tip_height); - - // find the average (with low/high pass filter) time a block spends downloading - self.estimated_block_download_time = - PoxSyncWatchdog::estimate_block_download_time(&self.chainstate, burnchain, tip_height); - - debug!( - "Estimated block download time: {}s. Estimated block processing time: {}s", - self.estimated_block_download_time, self.estimated_block_process_time - ); - - self.new_attachable_blocks.clear(); - self.new_processed_blocks.clear(); - self.last_block_processed_ts = 0; - self.watch_start_ts = 0; - self.steady_state_resync_ts = 0; - } - - /// Wait until all of the Stacks blocks for the given reward cycle are seemingly downloaded and - /// processed. Do so by watching the _rate_ at which attachable Stacks blocks arrive and get - /// processed. - /// Returns whether or not we're still in the initial block download -- i.e. true if we're - /// still downloading burnchain blocks, or we haven't reached steady-state block-processing. + /// Wait until the next PoX anchor block arrives. + /// We know for a fact that they all exist for Epochs 2.5 and earlier, in both mainnet and + /// testnet. + /// Return (still-in-ibd?, maximum-burnchain-sync-height) on success. pub fn pox_sync_wait( &mut self, burnchain: &Burnchain, burnchain_tip: &BurnchainTip, // this is the highest burnchain snapshot we've sync'ed to burnchain_height: u64, // this is the absolute burnchain block height - num_sortitions_in_last_cycle: u64, - ) -> Result { - if self.watch_start_ts == 0 { - self.watch_start_ts = get_epoch_time_secs(); - } - if self.steady_state_resync_ts == 0 { - self.steady_state_resync_ts = - get_epoch_time_secs() + self.steady_state_burnchain_sync_interval; - } + ) -> Result<(bool, u64), burnchain_error> { + let burnchain_rc = burnchain + .block_height_to_reward_cycle(burnchain_height) + .expect("FATAL: burnchain height is before system start"); + + let sortition_rc = burnchain + .block_height_to_reward_cycle(burnchain_tip.block_snapshot.block_height) + .expect("FATAL: sortition height is before system start"); let ibbd = PoxSyncWatchdog::infer_initial_burnchain_block_download( burnchain, @@ -444,220 +175,23 @@ impl PoxSyncWatchdog { burnchain_height, ); - // unconditionally download the first reward cycle - if burnchain_tip.block_snapshot.block_height - < burnchain.first_block_height + (burnchain.pox_constants.reward_cycle_length as u64) - { - debug!("PoX watchdog in first reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); + let max_sync_height = if sortition_rc < burnchain_rc { + burnchain + .reward_cycle_to_block_height(sortition_rc + 1) + .min(burnchain_height) + } else { + burnchain_tip + .block_snapshot + .block_height + .max(burnchain_height) + }; + self.relayer_comms.set_ibd(ibbd); + if !self.unconditionally_download { self.relayer_comms .interruptable_sleep(self.steady_state_burnchain_sync_interval)?; - - return Ok(ibbd); - } - - if self.unconditionally_download { - debug!("PoX watchdog set to unconditionally download (ibd={ibbd})"); - self.relayer_comms.set_ibd(ibbd); - return Ok(ibbd); - } - - let mut waited = false; - if ibbd { - // we are far behind the burnchain tip (i.e. not in the last reward cycle), - // so make sure the downloader knows about blocks it doesn't have yet so we can go and - // fetch its blocks before proceeding. - if num_sortitions_in_last_cycle > 0 { - debug!("PoX watchdog: Wait for at least one inventory state-machine pass..."); - self.relayer_comms.wait_for_inv_sync_pass(SYNC_WAIT_SECS)?; - waited = true; - } else { - debug!("PoX watchdog: In initial block download, and no sortitions to consider in this reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); - return Ok(ibbd); - } - } else { - debug!("PoX watchdog: not in initial burn block download, so not waiting for an inventory state-machine pass"); } - if burnchain_tip.block_snapshot.block_height - + (burnchain.pox_constants.reward_cycle_length as u64) - >= burnchain_height - { - // unconditionally download if we're within the last reward cycle (after the poll timeout) - if !waited { - debug!( - "PoX watchdog in last reward cycle -- sync after {} seconds", - self.steady_state_burnchain_sync_interval - ); - self.relayer_comms.set_ibd(ibbd); - - self.relayer_comms - .interruptable_sleep(self.steady_state_burnchain_sync_interval)?; - } else { - debug!("PoX watchdog in last reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); - } - return Ok(ibbd); - } - - // have we reached steady-state behavior? i.e. have we stopped processing both burnchain - // and Stacks blocks? - let mut steady_state = false; - debug!("PoX watchdog: Wait until chainstate reaches steady-state block-processing..."); - - let ibbd = loop { - if !self.relayer_comms.should_keep_running() { - break false; - } - let ibbd = PoxSyncWatchdog::infer_initial_burnchain_block_download( - burnchain, - burnchain_tip.block_snapshot.block_height, - burnchain_height, - ); - - let expected_first_block_deadline = - self.watch_start_ts + (self.estimated_block_download_time as u64); - let expected_last_block_deadline = self.last_block_processed_ts - + (self.estimated_block_download_time as u64) - + (self.estimated_block_process_time as u64); - - match ( - self.count_attachable_stacks_blocks(), - self.count_processed_stacks_blocks(), - ) { - (Ok(num_available), Ok(num_processed)) => { - self.new_attachable_blocks.push_back(num_available as i64); - self.new_processed_blocks.push_back(num_processed as i64); - - if (self.new_attachable_blocks.len() as u64) > self.max_samples { - self.new_attachable_blocks.pop_front(); - } - if (self.new_processed_blocks.len() as u64) > self.max_samples { - self.new_processed_blocks.pop_front(); - } - - if (self.new_attachable_blocks.len() as u64) < self.max_samples - || (self.new_processed_blocks.len() as u64) < self.max_samples - { - // still getting initial samples - if self.new_processed_blocks.len() % 10 == 0 { - debug!( - "PoX watchdog: Still warming up: {} out of {} samples...", - &self.new_attachable_blocks.len(), - &self.max_samples - ); - } - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if self.watch_start_ts > 0 - && get_epoch_time_secs() < expected_first_block_deadline - { - // still waiting for that first block in this reward cycle - debug!("PoX watchdog: Still warming up: waiting until {expected_first_block_deadline}s for first Stacks block download (estimated download time: {}s)...", self.estimated_block_download_time); - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if self.watch_start_ts > 0 - && (self.new_attachable_blocks.len() as u64) < self.max_samples - && self.watch_start_ts - + self.max_samples - + self.steady_state_burnchain_sync_interval - * (burnchain.stable_confirmations as u64) - < get_epoch_time_secs() - { - debug!( - "PoX watchdog: could not calculate {} samples in {} seconds. Assuming suspend/resume, or assuming load is too high.", - self.max_samples, - self.max_samples + self.steady_state_burnchain_sync_interval * (burnchain.stable_confirmations as u64) - ); - self.reset(burnchain, burnchain_tip.block_snapshot.block_height); - - self.watch_start_ts = get_epoch_time_secs(); - self.steady_state_resync_ts = - get_epoch_time_secs() + self.steady_state_burnchain_sync_interval; - continue; - } - - // take first derivative of samples -- see if the download and processing rate has gone to 0 - let attachable_delta = PoxSyncWatchdog::derivative(&self.new_attachable_blocks); - let processed_delta = PoxSyncWatchdog::derivative(&self.new_processed_blocks); - - let (flat_attachable, attachable_deviants) = - PoxSyncWatchdog::is_mostly_flat(&attachable_delta, 0); - let (flat_processed, processed_deviants) = - PoxSyncWatchdog::is_mostly_flat(&processed_delta, 0); - - debug!("PoX watchdog: flat-attachable?: {flat_attachable}, flat-processed?: {flat_processed}, estimated block-download time: {}s, estimated block-processing time: {}s", - self.estimated_block_download_time, self.estimated_block_process_time); - - if flat_attachable && flat_processed && self.last_block_processed_ts == 0 { - // we're flat-lining -- this may be the end of this cycle - self.last_block_processed_ts = get_epoch_time_secs(); - } - - if self.last_block_processed_ts > 0 - && get_epoch_time_secs() < expected_last_block_deadline - { - debug!("PoX watchdog: Still processing blocks; waiting until at least min({},{expected_last_block_deadline})s before burnchain synchronization (estimated block-processing time: {}s)", - get_epoch_time_secs() + 1, self.estimated_block_process_time); - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if ibbd { - // doing initial burnchain block download right now. - // only proceed to fetch the next reward cycle's burnchain blocks if we're neither downloading nor - // attaching blocks recently - debug!("PoX watchdog: In initial burnchain block download: flat-attachable = {flat_attachable}, flat-processed = {flat_processed}, min-attachable: {attachable_deviants}, min-processed: {processed_deviants}"); - - if !flat_attachable || !flat_processed { - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - } else { - let now = get_epoch_time_secs(); - if now < self.steady_state_resync_ts { - // steady state - if !steady_state { - debug!("PoX watchdog: In steady-state; waiting until at least {} before burnchain synchronization", self.steady_state_resync_ts); - steady_state = flat_attachable && flat_processed; - } - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } else { - // steady state - if !steady_state { - debug!("PoX watchdog: In steady-state, but ready burnchain synchronization as of {}", self.steady_state_resync_ts); - steady_state = flat_attachable && flat_processed; - } - } - } - } - (err_attach, err_processed) => { - // can only happen on DB query failure - error!("PoX watchdog: Failed to count recently attached ('{err_attach:?}') and/or processed ('{err_processed:?}') staging blocks"); - panic!(); - } - }; - - if ibbd || !steady_state { - debug!("PoX watchdog: Wait for at least one downloader state-machine pass before resetting..."); - self.relayer_comms.wait_for_download_pass(SYNC_WAIT_SECS)?; - } else { - debug!("PoX watchdog: in steady-state, so not waiting for download pass"); - } - - self.reset(burnchain, burnchain_tip.block_snapshot.block_height); - break ibbd; - }; - - let ret = ibbd || !steady_state; - self.relayer_comms.set_ibd(ret); - Ok(ret) + Ok((ibbd, max_sync_height)) } } diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 3e69ac18cc..ef193f56f7 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -7,12 +7,12 @@ use stacks::chainstate::burn::operations::BlockstackOperationType::{ LeaderBlockCommit, LeaderKeyRegister, }; use stacks::chainstate::stacks::StacksPrivateKey; +use stacks::config::InitialBalance; use stacks::core::StacksEpochId; use stacks_common::util::hash::hex_bytes; use super::PUBLISH_CONTRACT; use crate::burnchains::bitcoin_regtest_controller::BitcoinRPCRequest; -use crate::config::InitialBalance; use crate::helium::RunLoop; use crate::tests::to_addr; use crate::Config; diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 1964612bd4..e555b6a8aa 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -13,6 +13,7 @@ use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::{ StacksBlockHeader, StacksPrivateKey, StacksTransaction, TransactionPayload, }; +use stacks::config::{EventKeyType, InitialBalance}; use stacks::core::{ self, EpochList, StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, @@ -22,7 +23,6 @@ use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, VRF use stacks_common::util::hash::hex_bytes; use stacks_common::util::sleep_ms; -use crate::config::{EventKeyType, InitialBalance}; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::*; use crate::tests::{ diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 55d3ee0b7b..d50cac0117 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -24,6 +24,7 @@ use stacks::chainstate::stacks::miner::{ }; use stacks::chainstate::stacks::StacksBlockHeader; use stacks::clarity_cli::vm_execute as execute; +use stacks::config::{Config, InitialBalance}; use stacks::core::{self, EpochList, BURNCHAIN_TX_SEARCH_WINDOW}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{ @@ -35,7 +36,6 @@ use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; use crate::burnchains::bitcoin_regtest_controller::UTXO; -use crate::config::{Config, InitialBalance}; use crate::neon::RunLoopCounter; use crate::operations::BurnchainOpSigner; use crate::stacks_common::address::AddressHashMode; diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 3bf521d7cb..493fb36fcd 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -8,6 +8,7 @@ use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready}; use stacks::clarity_cli::vm_execute as execute; +use stacks::config::{EventKeyType, EventObserverConfig, InitialBalance}; use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; @@ -17,7 +18,6 @@ use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; use super::neon_integrations::get_account; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; use crate::neon_node::StacksNode; use crate::stacks_common::types::Address; use crate::stacks_common::util::hash::bytes_to_hex; diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 92b6a97b8f..085e5a49cb 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -18,10 +18,10 @@ use std::{env, thread}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use stacks::burnchains::{Burnchain, PoxConstants}; +use stacks::config::InitialBalance; use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks_common::util::sleep_ms; -use crate::config::InitialBalance; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::*; use crate::tests::*; diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 5e4ff9852a..8780d08012 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -26,6 +26,7 @@ use stacks::chainstate::stacks::boot::RawRewardSetEntry; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::{Error, StacksTransaction, TransactionPayload}; use stacks::clarity_cli::vm_execute as execute; +use stacks::config::InitialBalance; use stacks::core::{self, EpochList, StacksEpochId}; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::consts::STACKS_EPOCH_MAX; @@ -35,7 +36,6 @@ use stacks_common::util::hash::{bytes_to_hex, hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; -use crate::config::InitialBalance; use crate::stacks_common::codec::StacksMessageCodec; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::{ diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index bedf8721cb..e840b0fcd3 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -17,11 +17,11 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use stacks::burnchains::{Burnchain, PoxConstants}; +use stacks::config::InitialBalance; use stacks::core::{self, EpochList, StacksEpochId}; use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::StacksPrivateKey; -use crate::config::InitialBalance; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::wait_for; use crate::tests::neon_integrations::{ diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 79c3394352..7f893835d1 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -24,6 +24,7 @@ use stacks::chainstate::stacks::{ }; use stacks::clarity_vm::clarity::ClarityConnection; use stacks::codec::StacksMessageCodec; +use stacks::config::InitialBalance; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, CHAIN_ID_TESTNET, PEER_VERSION_EPOCH_2_0, @@ -40,7 +41,6 @@ use super::{ make_contract_call, make_contract_publish, make_stacks_transfer, to_addr, ADDR_4, SK_1, SK_2, SK_3, }; -use crate::config::InitialBalance; use crate::helium::RunLoop; use crate::tests::make_sponsored_stacks_transfer_on_testnet; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 281feae99a..3edc88c96b 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -57,6 +57,7 @@ use stacks::chainstate::stacks::{ TransactionPostConditionMode, TransactionPublicKeyEncoding, TransactionSpendingCondition, TransactionVersion, MAX_BLOCK_LEN, }; +use stacks::config::{EventKeyType, InitialBalance}; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, @@ -95,7 +96,6 @@ use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; use stacks_signer::v0::SpawnedSigner; use super::bitcoin_regtest::BitcoinCoreController; -use crate::config::{EventKeyType, InitialBalance}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_SKIP_P2P_BROADCAST, }; @@ -734,10 +734,8 @@ pub fn next_block_and_wait_for_commits( .map(|x| x.load(Ordering::SeqCst)) .collect(); - let mut block_processed_time: Vec> = - (0..commits_before.len()).map(|_| None).collect(); - let mut commit_sent_time: Vec> = - (0..commits_before.len()).map(|_| None).collect(); + let mut block_processed_time: Vec> = vec![None; commits_before.len()]; + let mut commit_sent_time: Vec> = vec![None; commits_before.len()]; next_block_and(btc_controller, timeout_secs, || { for i in 0..commits_submitted.len() { let commits_sent = commits_submitted[i].load(Ordering::SeqCst); @@ -2875,6 +2873,7 @@ fn block_proposal_api_endpoint() { const HTTP_ACCEPTED: u16 = 202; const HTTP_TOO_MANY: u16 = 429; const HTTP_NOT_AUTHORIZED: u16 = 401; + const HTTP_UNPROCESSABLE: u16 = 422; let test_cases = [ ( "Valid Nakamoto block proposal", @@ -2924,6 +2923,16 @@ fn block_proposal_api_endpoint() { Some(Err(ValidateRejectCode::ChainstateError)), ), ("Not authorized", sign(&proposal), HTTP_NOT_AUTHORIZED, None), + ( + "Unprocessable entity", + { + let mut p = proposal.clone(); + p.block.header.timestamp = 0; + sign(&p) + }, + HTTP_UNPROCESSABLE, + None, + ), ]; // Build HTTP client @@ -3404,7 +3413,7 @@ fn vote_for_aggregate_key_burn_op() { /// This test boots a follower node using the block downloader #[test] #[ignore] -fn follower_bootup() { +fn follower_bootup_simple() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -6845,6 +6854,7 @@ fn continue_tenure_extend() { let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.connection_options.block_proposal_max_age_secs = u64::MAX; let http_origin = naka_conf.node.data_url.clone(); let sender_sk = Secp256k1PrivateKey::new(); // setup sender + recipient for a test stx transfer @@ -8840,7 +8850,8 @@ fn mock_mining() { info!("Booting follower-thread, waiting for the follower to sync to the chain tip"); - wait_for(120, || { + // use a high timeout for avoiding problem with github workflow + wait_for(600, || { let Some(miner_node_info) = get_chain_info_opt(&naka_conf) else { return Ok(false); }; @@ -9403,7 +9414,7 @@ fn v3_blockbyheight_api_endpoint() { assert!(block_data.status().is_success()); let block_bytes_vec = block_data.bytes().unwrap().to_vec(); - assert!(block_bytes_vec.len() > 0); + assert!(!block_bytes_vec.is_empty()); // does the block id of the returned blob matches ? let block_id = NakamotoBlockHeader::consensus_deserialize(&mut block_bytes_vec.as_slice()) @@ -9422,6 +9433,178 @@ fn v3_blockbyheight_api_endpoint() { run_loop_thread.join().unwrap(); } +/// Verify that lockup events are attached to a phantom tx receipt +/// if the block does not have a coinbase tx +#[test] +#[ignore] +fn nakamoto_lockup_events() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, _miner_account) = naka_neon_integration_conf(None); + let password = "12345".to_string(); + conf.connection_options.auth_token = Some(password.clone()); + conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let stacker_sk = setup_stacker(&mut conf); + let signer_sk = Secp256k1PrivateKey::new(); + let signer_addr = tests::to_addr(&signer_sk); + let _signer_pubkey = Secp256k1PublicKey::from_private(&signer_sk); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + conf.add_initial_balance( + PrincipalData::from(sender_addr).to_string(), + (send_amt + send_fee) * 100, + ); + conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // only subscribe to the block proposal events + test_observer::spawn(); + test_observer::register_any(&mut conf); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + let mut signers = TestSigners::new(vec![signer_sk]); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &conf, + &blocks_processed, + &[stacker_sk], + &[signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + blind_signer(&conf, &signers, proposals_submitted); + let burnchain = conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + // TODO (hack) instantiate the sortdb in the burnchain + _ = btc_regtest_controller.sortdb_mut(); + + info!("------------------------- Setup finished, run test -------------------------"); + + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let get_stacks_height = || { + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + tip.stacks_block_height + }; + let initial_block_height = get_stacks_height(); + + // This matches the data in `stx-genesis/chainstate-test.txt` + // Recipient: ST2CTPPV8BHBVSQR727A3MK00ZD85RNY9015WGW2D + let unlock_recipient = "ST2CTPPV8BHBVSQR727A3MK00ZD85RNY9015WGW2D"; + let unlock_height = 35_u64; + let interims_to_mine = unlock_height - initial_block_height; + + info!( + "----- Mining to unlock height -----"; + "unlock_height" => unlock_height, + "initial_height" => initial_block_height, + "interims_to_mine" => interims_to_mine, + ); + + // submit a tx so that the miner will mine an extra stacks block + let mut sender_nonce = 0; + + for _ in 0..interims_to_mine { + let height_before = get_stacks_height(); + info!("----- Mining interim block -----"; + "height" => %height_before, + "nonce" => %sender_nonce, + ); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + + wait_for(30, || Ok(get_stacks_height() > height_before)).unwrap(); + } + + let blocks = test_observer::get_blocks(); + let block = blocks.last().unwrap(); + assert_eq!( + block.get("block_height").unwrap().as_u64().unwrap(), + unlock_height + ); + + let events = block.get("events").unwrap().as_array().unwrap(); + let mut found_event = false; + for event in events { + let mint_event = event.get("stx_mint_event"); + if mint_event.is_some() { + found_event = true; + let mint_event = mint_event.unwrap(); + let recipient = mint_event.get("recipient").unwrap().as_str().unwrap(); + assert_eq!(recipient, unlock_recipient); + let amount = mint_event.get("amount").unwrap().as_str().unwrap(); + assert_eq!(amount, "12345678"); + let txid = event.get("txid").unwrap().as_str().unwrap(); + assert_eq!( + txid, + "0x63dd5773338782755e4947a05a336539137dfe13b19a0eac5154306850aca8ef" + ); + } + } + assert!(found_event); + + info!("------------------------- Test finished, clean up -------------------------"); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + #[test] #[ignore] /// This test spins up a nakamoto-neon node. @@ -9696,7 +9879,7 @@ fn test_shadow_recovery() { // fix node let shadow_blocks = shadow_chainstate_repair(&mut chainstate, &mut sortdb).unwrap(); - assert!(shadow_blocks.len() > 0); + assert!(!shadow_blocks.is_empty()); wait_for(30, || { let Some(info) = get_chain_info_opt(&naka_conf) else { diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index e3d592d23c..a3ce78eb24 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -5,7 +5,6 @@ use std::sync::{mpsc, Arc}; use std::time::{Duration, Instant}; use std::{cmp, env, fs, io, thread}; -use clarity::consts::BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP; use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; @@ -39,8 +38,9 @@ use stacks::chainstate::stacks::{ StacksPublicKey, StacksTransaction, TransactionContractCall, TransactionPayload, }; use stacks::clarity_cli::vm_execute as execute; -use stacks::cli::{self, StacksChainConfig}; +use stacks::cli; use stacks::codec::StacksMessageCodec; +use stacks::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, InitialBalance}; use stacks::core::mempool::MemPoolWalkTxTypes; use stacks::core::{ self, EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_20, BLOCK_LIMIT_MAINNET_205, @@ -83,7 +83,6 @@ use super::{ SK_2, SK_3, }; use crate::burnchains::bitcoin_regtest_controller::{self, addr2str, BitcoinRPCRequest, UTXO}; -use crate::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, InitialBalance}; use crate::neon_node::RelayerThread; use crate::operations::BurnchainOpSigner; use crate::stacks_common::types::PrivateKey; @@ -199,13 +198,13 @@ pub mod test_observer { use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::StacksTransaction; use stacks::codec::StacksMessageCodec; + use stacks::config::{EventKeyType, EventObserverConfig}; use stacks::net::api::postblock_proposal::BlockValidateResponse; use stacks::util::hash::hex_bytes; use stacks_common::types::chainstate::StacksBlockId; use warp::Filter; use {tokio, warp}; - use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::{MinedBlockEvent, MinedMicroblockEvent, MinedNakamotoBlockEvent}; use crate::Config; @@ -579,7 +578,7 @@ pub mod test_observer { PROPOSAL_RESPONSES.lock().unwrap().clear(); } - /// Parse the StacksTransactions from a block (does not include burn ops) + /// Parse the StacksTransactions from a block (does not include burn ops or phantom txs) /// panics on any failures to parse pub fn parse_transactions(block: &serde_json::Value) -> Vec { block @@ -589,15 +588,20 @@ pub mod test_observer { .unwrap() .iter() .filter_map(|tx_json| { + // Filter out burn ops if let Some(burnchain_op_val) = tx_json.get("burnchain_op") { if !burnchain_op_val.is_null() { return None; } } + // Filter out phantom txs let tx_hex = tx_json.get("raw_tx").unwrap().as_str().unwrap(); let tx_bytes = hex_bytes(&tx_hex[2..]).unwrap(); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + if tx.is_phantom() { + return None; + } Some(tx) }) .collect() @@ -3379,7 +3383,7 @@ fn make_signed_microblock( ) -> StacksMicroblock { let mut rng = rand::thread_rng(); - let txid_vecs = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); + let txid_vecs: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); let merkle_tree = MerkleTree::::new(&txid_vecs); let tx_merkle_root = merkle_tree.root(); @@ -12691,22 +12695,9 @@ fn mock_miner_replay() { let blocks_dir = blocks_dir.into_os_string().into_string().unwrap(); let db_path = format!("{}/neon", conf.node.working_dir); let args: Vec = vec!["replay-mock-mining".into(), db_path, blocks_dir]; - let SortitionDB { - first_block_height, - first_burn_header_hash, - .. - } = *btc_regtest_controller.sortdb_mut(); - let replay_config = StacksChainConfig { - chain_id: conf.burnchain.chain_id, - first_block_height, - first_burn_header_hash, - first_burn_header_timestamp: BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP.into(), - pox_constants: burnchain_config.pox_constants, - epochs: conf.burnchain.epochs.expect("Missing `epochs` in config"), - }; info!("Replaying mock mined blocks..."); - cli::command_replay_mock_mining(&args, Some(&replay_config)); + cli::command_replay_mock_mining(&args, Some(&conf)); // ---------- Test finished, clean up ---------- diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index ff128d0a03..ebb0990411 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -42,27 +42,30 @@ use libsigner::v0::messages::{ use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; +use stacks::chainstate::nakamoto::NakamotoBlock; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use stacks::chainstate::stacks::StacksPrivateKey; +use stacks::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; use stacks::types::chainstate::{StacksAddress, StacksPublicKey}; -use stacks::types::PublicKey; +use stacks::types::{PrivateKey, PublicKey}; +use stacks::util::get_epoch_time_secs; use stacks::util::hash::MerkleHashFunc; use stacks::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::tests::TestFlag; use stacks_signer::client::{ClientError, SignerSlotID, StackerDB, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; use super::nakamoto_integrations::{check_nakamoto_empty_block_heuristics, wait_for}; -use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; -use crate::neon::{Counters, RunLoopCounter, TestFlag}; +use crate::neon::{Counters, RunLoopCounter}; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::{ @@ -90,7 +93,7 @@ pub struct RunningNodes { pub nakamoto_blocks_mined: RunLoopCounter, pub nakamoto_blocks_rejected: RunLoopCounter, pub nakamoto_blocks_signer_pushed: RunLoopCounter, - pub nakamoto_test_skip_commit_op: TestFlag, + pub nakamoto_test_skip_commit_op: TestFlag, pub coord_channel: Arc>, pub conf: NeonConfig, } @@ -261,6 +264,33 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest info_before.stacks_tip_height - && blocks_mined > mined_before) + && (!use_nakamoto_blocks_mined || blocks_mined > mined_before)) }) .unwrap(); let mined_block_elapsed_time = mined_block_time.elapsed(); @@ -515,6 +546,27 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Result, ClientError> { + let valid_signer_set = + u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); + let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, false); + + let slots = self + .stacks_client + .get_stackerdb_signer_slots(&signer_stackerdb_contract_id, valid_signer_set)?; + + Ok(slots + .iter() + .position(|(address, _)| address == signer_address) + .map(|pos| { + SignerSlotID(u32::try_from(pos).expect("FATAL: number of signers exceeds u32::MAX")) + })) + } + fn get_signer_indices(&self, reward_cycle: u64) -> Vec { self.get_signer_slots(reward_cycle) .expect("FATAL: failed to get signer slots from stackerdb") @@ -589,24 +641,21 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest { - if accepted.signer_signature_hash == *signer_signature_hash - && expected_signers.iter().any(|pk| { - pk.verify( - accepted.signer_signature_hash.bits(), - &accepted.signature, - ) - .expect("Failed to verify signature") - }) - { - Some(accepted.signature) - } else { - None - } + if let SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) = message + { + if accepted.signer_signature_hash == *signer_signature_hash + && expected_signers.iter().any(|pk| { + pk.verify( + accepted.signer_signature_hash.bits(), + &accepted.signature, + ) + .expect("Failed to verify signature") + }) + { + return Some(accepted.signature); } - _ => None, } + None }) .collect::>(); Ok(signatures.len() > expected_signers.len() * 7 / 10) @@ -670,11 +719,10 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest BlockAccepted { - let block_response = self.get_latest_block_response(slot_id); - match block_response { - BlockResponse::Accepted(accepted) => accepted, - _ => panic!("Latest block response from slot #{slot_id} isn't a block acceptance"), - } + self.get_latest_block_response(slot_id) + .as_block_accepted() + .expect("Latest block response from slot #{slot_id} isn't a block acceptance") + .clone() } /// Get /v2/info from the node @@ -683,6 +731,61 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest, + reward_cycle: u64, + hash: Sha512Trunc256Sum, + ) { + let slot_ids: Vec<_> = self + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + + let latest_msgs = StackerDB::get_messages::( + stackerdb + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), + &slot_ids, + ) + .expect("Failed to get messages from stackerdb"); + for msg in latest_msgs.iter() { + if let SignerMessage::BlockResponse(response) = msg { + assert_ne!(response.get_signer_signature_hash(), hash); + } + } + } + + pub fn inject_accept_signature( + &self, + block: &NakamotoBlock, + private_key: &StacksPrivateKey, + reward_cycle: u64, + ) { + let mut stackerdb = StackerDB::new( + &self.running_nodes.conf.node.rpc_bind, + private_key.clone(), + false, + reward_cycle, + self.get_signer_slot_id(reward_cycle, &to_addr(private_key)) + .expect("Failed to get signer slot id") + .expect("Signer does not have a slot id"), + ); + + let signature = private_key + .sign(block.header.signer_signature_hash().bits()) + .expect("Failed to sign block"); + let accepted = BlockResponse::accepted( + block.header.signer_signature_hash(), + signature, + get_epoch_time_secs().wrapping_add(u64::MAX), + ); + stackerdb + .send_message_with_retry::(accepted.into()) + .expect("Failed to send accept signature"); + } } fn setup_stx_btc_node( diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index df20fc0087..86002e6c3a 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -23,7 +23,8 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use libsigner::v0::messages::{ - BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, SignerMessage, + BlockAccepted, BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, + SignerMessage, }; use libsigner::{BlockProposal, SignerSession, StackerDBSession, VERSION_STRING}; use stacks::address::AddressHashMode; @@ -36,11 +37,13 @@ use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; use stacks::chainstate::stacks::{StacksTransaction, TenureChangeCause, TransactionPayload}; use stacks::codec::StacksMessageCodec; +use stacks::config::{EventKeyType, EventObserverConfig}; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::getsigner::GetSignerResponse; use stacks::net::api::postblock_proposal::{ - ValidateRejectCode, TEST_VALIDATE_DELAY_DURATION_SECS, TEST_VALIDATE_STALL, + BlockValidateResponse, ValidateRejectCode, TEST_VALIDATE_DELAY_DURATION_SECS, + TEST_VALIDATE_STALL, }; use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; @@ -58,17 +61,16 @@ use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::v0::signer::{ +use stacks_signer::v0::tests::{ TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_PAUSE_BLOCK_BROADCAST, TEST_REJECT_ALL_BLOCK_PROPOSAL, - TEST_SKIP_BLOCK_BROADCAST, + TEST_SKIP_BLOCK_BROADCAST, TEST_SKIP_SIGNER_CLEANUP, TEST_STALL_BLOCK_VALIDATION_SUBMISSION, }; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; -use crate::config::{EventKeyType, EventObserverConfig}; -use crate::event_dispatcher::MinedNakamotoBlockEvent; +use crate::event_dispatcher::{MinedNakamotoBlockEvent, TEST_SKIP_BLOCK_ANNOUNCEMENT}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, }; @@ -279,7 +281,7 @@ impl SignerTest { // could be other miners mining blocks. let height_before = get_chain_info(&self.running_nodes.conf).stacks_tip_height; info!("Waiting for first Nakamoto block: {}", height_before + 1); - self.mine_nakamoto_block(Duration::from_secs(30)); + self.mine_nakamoto_block(Duration::from_secs(30), false); wait_for(30, || { Ok(get_chain_info(&self.running_nodes.conf).stacks_tip_height > height_before) }) @@ -288,12 +290,17 @@ impl SignerTest { } // Only call after already past the epoch 3.0 boundary - fn mine_and_verify_confirmed_naka_block(&mut self, timeout: Duration, num_signers: usize) { + fn mine_and_verify_confirmed_naka_block( + &mut self, + timeout: Duration, + num_signers: usize, + use_nakamoto_blocks_mined: bool, + ) { info!("------------------------- Try mining one block -------------------------"); let reward_cycle = self.get_current_reward_cycle(); - self.mine_nakamoto_block(timeout); + self.mine_nakamoto_block(timeout, use_nakamoto_blocks_mined); // Verify that the signers accepted the proposed block, sending back a validate ok response let proposed_signer_signature_hash = self @@ -376,11 +383,11 @@ impl SignerTest { let total_nmb_blocks_to_mine = burnchain_height.saturating_sub(current_block_height); debug!("Mining {total_nmb_blocks_to_mine} Nakamoto block(s) to reach burnchain height {burnchain_height}"); for _ in 0..total_nmb_blocks_to_mine { - self.mine_and_verify_confirmed_naka_block(timeout, num_signers); + self.mine_and_verify_confirmed_naka_block(timeout, num_signers, false); } } - /// Propose an invalid block to the signers + /// Propose a block to the signers fn propose_block(&mut self, block: NakamotoBlock, timeout: Duration) { let miners_contract_id = boot_code_id(MINERS_NAME, false); let mut session = @@ -390,6 +397,7 @@ impl SignerTest { .btc_regtest_controller .get_headers_height(); let reward_cycle = self.get_current_reward_cycle(); + let signer_signature_hash = block.header.signer_signature_hash(); let message = SignerMessage::BlockProposal(BlockProposal { block, burn_height, @@ -406,7 +414,7 @@ impl SignerTest { let mut version = 0; let slot_id = MinerSlotID::BlockProposal.to_u8() as u32; let start = Instant::now(); - debug!("Proposing invalid block to signers"); + debug!("Proposing block to signers: {signer_signature_hash}"); while !accepted { let mut chunk = StackerDBChunkData::new(slot_id * 2, version, message.serialize_to_vec()); @@ -488,6 +496,7 @@ fn block_proposal_rejection() { header: NakamotoBlockHeader::empty(), txs: vec![], }; + block.header.timestamp = get_epoch_time_secs(); // First propose a block to the signers that does not have the correct consensus hash or BitVec. This should be rejected BEFORE // the block is submitted to the node for validation. @@ -588,7 +597,7 @@ fn miner_gather_signatures() { signer_test.boot_to_epoch_3(); info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); // Test prometheus metrics response #[cfg(feature = "monitoring_prom")] @@ -820,14 +829,8 @@ fn reloads_signer_set_in() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( - num_signers, - vec![(sender_addr, send_amt + send_fee)], - |_config| {}, - |_| {}, - None, - None, - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); setup_epoch_3_reward_set( &signer_test.running_nodes.conf, @@ -951,7 +954,7 @@ fn forked_tenure_testing( config.first_proposal_burn_block_timing = proposal_limit; // don't allow signers to post signed blocks (limits the amount of fault injection we // need) - TEST_SKIP_BLOCK_BROADCAST.lock().unwrap().replace(true); + TEST_SKIP_BLOCK_BROADCAST.set(true); }, |config| { config.miner.tenure_cost_limit_per_block_percentage = None; @@ -1331,7 +1334,7 @@ fn bitcoind_forking_test() { for i in 0..pre_fork_tenures { info!("Mining pre-fork tenure {} of {pre_fork_tenures}", i + 1); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); } let pre_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; @@ -1403,7 +1406,7 @@ fn bitcoind_forking_test() { for i in 0..5 { info!("Mining post-fork tenure {} of 5", i + 1); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); } let pre_fork_2_nonce = get_account(&http_origin, &miner_address).nonce; @@ -1479,7 +1482,7 @@ fn bitcoind_forking_test() { for i in 0..5 { info!("Mining post-fork tenure {} of 5", i + 1); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); } let test_end_nonce = get_account(&http_origin, &miner_address).nonce; @@ -2415,10 +2418,7 @@ fn retry_on_rejection() { .map(StacksPublicKey::from_private) .take(num_signers) .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); let proposals_before = signer_test .running_nodes @@ -2465,10 +2465,7 @@ fn retry_on_rejection() { // resume signing info!("Disable unconditional rejection and wait for the block to be processed"); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(vec![]); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); loop { let blocks_mined = signer_test .running_nodes @@ -2512,7 +2509,7 @@ fn signers_broadcast_signed_blocks() { .running_nodes .nakamoto_blocks_mined .load(Ordering::SeqCst); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); wait_for(30, || { let blocks_mined = signer_test @@ -2579,8 +2576,8 @@ fn signers_broadcast_signed_blocks() { #[test] #[ignore] -/// This test verifies that a miner will produce a TenureExtend transaction after the idle timeout is reached. -fn tenure_extend_after_idle() { +/// This test verifies that a miner will produce a TenureExtend transaction after the signers' idle timeout is reached. +fn tenure_extend_after_idle_signers() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -2613,7 +2610,7 @@ fn tenure_extend_after_idle() { signer_test.boot_to_epoch_3(); info!("---- Nakamoto booted, starting test ----"); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); info!("---- Waiting for a tenure extend ----"); @@ -2628,6 +2625,173 @@ fn tenure_extend_after_idle() { signer_test.shutdown(); } +#[test] +#[ignore] +/// This test verifies that a miner will produce a TenureExtend transaction after the miner's idle timeout +/// even if they do not see the signers' tenure extend timestamp responses. +fn tenure_extend_after_idle_miner() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let _recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let idle_timeout = Duration::from_secs(30); + let miner_idle_timeout = idle_timeout + Duration::from_secs(10); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + config.tenure_idle_timeout = idle_timeout; + }, + |config| { + config.miner.tenure_timeout = miner_idle_timeout; + }, + None, + None, + ); + let _http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + info!("---- Nakamoto booted, starting test ----"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + + info!("---- Start a new tenure but ignore block signatures so no timestamps are recorded ----"); + let tip_height_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + TEST_IGNORE_SIGNERS.set(true); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || { + let tip_height = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + Ok(tip_height > tip_height_before) + }, + ) + .expect("Failed to mine the tenure change block"); + + // Now, wait for a block with a tenure change due to the new block + wait_for(30, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::BlockFound, + )) + }) + .expect("Timed out waiting for a block with a tenure change"); + + info!("---- Waiting for a tenure extend ----"); + + TEST_IGNORE_SIGNERS.set(false); + // Now, wait for a block with a tenure extend + wait_for(miner_idle_timeout.as_secs() + 20, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Timed out waiting for a block with a tenure extend"); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test verifies that a miner that attempts to produce a tenure extend too early will be rejected by the signers, +/// but will eventually succeed after the signers' idle timeout has passed. +fn tenure_extend_succeeds_after_rejected_attempt() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let _recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let idle_timeout = Duration::from_secs(30); + let miner_idle_timeout = Duration::from_secs(20); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + config.tenure_idle_timeout = idle_timeout; + }, + |config| { + config.miner.tenure_timeout = miner_idle_timeout; + }, + None, + None, + ); + let _http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + info!("---- Nakamoto booted, starting test ----"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + + info!("---- Waiting for a rejected tenure extend ----"); + // Now, wait for a block with a tenure extend proposal from the miner, but ensure it is rejected. + wait_for(30, || { + let block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + if let SignerMessage::BlockProposal(proposal) = message { + if proposal.block.get_tenure_tx_payload().unwrap().cause + == TenureChangeCause::Extended + { + return Some(proposal.block); + } + } + None + }); + let Some(block) = &block else { + return Ok(false); + }; + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + if let SignerMessage::BlockResponse(BlockResponse::Rejected(rejected)) = message { + if block.header.signer_signature_hash() == rejected.signer_signature_hash { + return Some(rejected.signature); + } + } + None + }) + .collect::>(); + Ok(signatures.len() >= num_signers * 7 / 10) + }) + .expect("Test timed out while waiting for a rejected tenure extend"); + + info!("---- Waiting for an accepted tenure extend ----"); + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Test timed out while waiting for an accepted tenure extend"); + signer_test.shutdown(); +} + #[test] #[ignore] /// Verify that Nakamoto blocks that don't modify the tenure's execution cost @@ -2675,7 +2839,7 @@ fn stx_transfers_dont_effect_idle_timeout() { "info_height" => info_before.stacks_tip_height, "blocks_before" => blocks_before, ); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); info!("---- Getting current idle timeout ----"); @@ -2792,7 +2956,10 @@ fn idle_tenure_extend_active_mining() { |config| { config.tenure_idle_timeout = idle_timeout; }, - |_| {}, + |config| { + // accept all proposals in the node + config.connection_options.block_proposal_max_age_secs = u64::MAX; + }, None, None, ); @@ -2813,7 +2980,7 @@ fn idle_tenure_extend_active_mining() { // Add a delay to the block validation process TEST_VALIDATE_DELAY_DURATION_SECS.lock().unwrap().replace(3); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(60), true); info!("---- Getting current idle timeout ----"); @@ -2881,7 +3048,7 @@ fn idle_tenure_extend_active_mining() { info!("----- Submitted deploy txs, mining BTC block -----"); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); let mut last_response = signer_test.get_latest_block_response(slot_id); // Make multiple tenures that get extended through idle timeouts @@ -3994,7 +4161,7 @@ fn signer_set_rollover() { send_amt, ); submit_tx(&http_origin, &transfer_tx); - signer_test.mine_nakamoto_block(short_timeout); + signer_test.mine_nakamoto_block(short_timeout, true); let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); let block_sighash = mined_block.signer_signature_hash; let signer_signatures = mined_block.signer_signature; @@ -4068,7 +4235,7 @@ fn signer_set_rollover() { }) .expect("Timed out waiting for stacking txs to be mined"); - signer_test.mine_nakamoto_block(short_timeout); + signer_test.mine_nakamoto_block(short_timeout, true); let next_reward_cycle = reward_cycle.saturating_add(1); @@ -4121,7 +4288,7 @@ fn signer_set_rollover() { send_amt, ); submit_tx(&http_origin, &transfer_tx); - signer_test.mine_nakamoto_block(short_timeout); + signer_test.mine_nakamoto_block(short_timeout, true); let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); info!("---- Verifying that the new signers signed the block -----"); @@ -4308,7 +4475,7 @@ fn duplicate_signers() { info!("------------------------- Try mining one block -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); info!("------------------------- Read all `BlockResponse::Accepted` messages -------------------------"); @@ -4807,7 +4974,8 @@ fn partial_tenure_fork() { info!("-------- Waiting miner 2 to catch up to miner 1 --------"); // Wait for miner 2 to catch up to miner 1 - wait_for(60, || { + // (note: use a high timeout to avoid potential failing on github workflow) + wait_for(600, || { let info_1 = get_chain_info(&conf); let info_2 = get_chain_info(&conf_node_2); Ok(info_1.stacks_tip_height == info_2.stacks_tip_height) @@ -5193,10 +5361,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .cloned() .take(num_signers / 2 + num_signers % 2) .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); test_observer::clear(); // Make a new stacks transaction to create a different block signature, but make sure to propose it // AFTER the signers are unfrozen so they don't inadvertently prevent the new block being accepted @@ -5229,10 +5394,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { info!("------------------------- Test Mine Nakamoto Block N+1' -------------------------"); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); let transfer_tx = make_stacks_transfer( &sender_sk, @@ -5388,10 +5550,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .cloned() .take(num_signers * 3 / 10) .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); test_observer::clear(); // submit a tx so that the miner will mine a stacks block N+1 @@ -5456,10 +5615,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { // Ensure that all signers accept the block proposal N+2 let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let blocks_before = mined_blocks.load(Ordering::SeqCst); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it let transfer_tx = make_stacks_transfer( @@ -5615,10 +5771,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .cloned() .skip(num_signers * 7 / 10) .collect(); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(ignoring_signers.clone()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(ignoring_signers.clone()); // Clear the stackerdb chunks test_observer::clear(); @@ -5647,15 +5800,13 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .filter_map(|chunk| { let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { - non_ignoring_signers.iter().find(|key| { - key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) - .is_ok() - }) - } - _ => None, + if let SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) = message { + return non_ignoring_signers.iter().find(|key| { + key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) + .is_ok() + }); } + None }) .collect::>(); Ok(accepted_signers.len() + ignoring_signers.len() == num_signers) @@ -5696,10 +5847,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(Vec::new()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(Vec::new()); wait_for(short_timeout, || { let info_after = signer_test .stacks_client @@ -5842,10 +5990,7 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { .cloned() .skip(num_signers * 7 / 10) .collect(); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(ignoring_signers.clone()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(ignoring_signers.clone()); // Clear the stackerdb chunks test_observer::clear(); @@ -5873,15 +6018,13 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { .filter_map(|chunk| { let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { - non_ignoring_signers.iter().find(|key| { - key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) - .is_ok() - }) - } - _ => None, + if let SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) = message { + return non_ignoring_signers.iter().find(|key| { + key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) + .is_ok() + }); } + None }) .collect::>(); Ok(accepted_signers.len() + ignoring_signers.len() == num_signers) @@ -5991,9 +6134,16 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { let send_fee = 180; let nmb_txs = 3; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + |_config| {}, + |config| { + // Accept all block proposals + config.connection_options.block_proposal_max_age_secs = u64::MAX; + }, + None, + None, ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); @@ -6064,7 +6214,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Forcing miner to ignore block responses for block N+1"); TEST_IGNORE_SIGNERS.set(true); info!("Delaying signer block N+1 broadcasting to the miner"); - TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(true); + TEST_PAUSE_BLOCK_BROADCAST.set(true); test_observer::clear(); let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test @@ -6115,19 +6265,15 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { .filter_map(|chunk| { let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { - if block.header.signer_signature_hash() == accepted.signer_signature_hash { - Some(accepted.signature) - } else { - None - } + if let SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) = message { + if block.header.signer_signature_hash() == accepted.signer_signature_hash { + return Some(accepted.signature); } - _ => None, } + None }) .collect::>(); - Ok(signatures.len() == num_signers) + Ok(signatures.len() >= num_signers * 7 / 10) }) .expect("Test timed out while waiting for signers signatures for first block proposal"); let block = block.unwrap(); @@ -6191,7 +6337,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Allowing miner to accept block responses again. "); TEST_IGNORE_SIGNERS.set(false); info!("Allowing signers to broadcast block N+1 to the miner"); - TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); + TEST_PAUSE_BLOCK_BROADCAST.set(false); // Assert the N+1' block was rejected let rejected_block = rejected_block.unwrap(); @@ -6217,7 +6363,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { } }) .collect::>(); - Ok(block_rejections.len() == num_signers) + Ok(block_rejections.len() >= num_signers * 7 / 10) }) .expect("FAIL: Timed out waiting for block proposal rejections"); @@ -6536,10 +6682,7 @@ fn continue_after_fast_block_no_sortition() { // Make all signers ignore block proposals let ignoring_signers = all_signers.to_vec(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(ignoring_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(ignoring_signers.clone()); info!("------------------------- Submit Miner 2 Block Commit -------------------------"); let rejections_before = signer_test @@ -6653,10 +6796,7 @@ fn continue_after_fast_block_no_sortition() { let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); let nmb_old_blocks = test_observer::get_blocks().len(); // Allow signers to respond to proposals again - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); info!("------------------------- Wait for Miner B's Block N -------------------------"); // wait for the new block to be processed @@ -6820,7 +6960,7 @@ fn continue_after_tenure_extend() { signer_test.boot_to_epoch_3(); info!("------------------------- Mine Normal Tenure -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); info!("------------------------- Extend Tenure -------------------------"); signer_test @@ -7315,13 +7455,12 @@ fn block_commit_delay() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![], |config| { // make the duration long enough that the miner will be marked as malicious - config.block_proposal_timeout = block_proposal_timeout; + config.block_proposal_timeout = Duration::from_secs(600); }, |config| { // Set the block commit delay to 10 minutes to ensure no block commit is sent @@ -7361,10 +7500,7 @@ fn block_commit_delay() { .iter() .map(StacksPublicKey::from_private) .collect::>(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(all_signers); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(all_signers); info!("------------------------- Test Mine Burn Block -------------------------"); let burn_height_before = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; @@ -7399,10 +7535,7 @@ fn block_commit_delay() { .load(Ordering::SeqCst); info!("------------------------- Resume Signing -------------------------"); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); // Wait for a block to be mined wait_for(60, || { @@ -7466,7 +7599,7 @@ fn block_validation_response_timeout() { signer_test.boot_to_epoch_3(); info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); info!("------------------------- Test Block Validation Stalled -------------------------"); TEST_VALIDATE_STALL.lock().unwrap().replace(true); let validation_stall_start = Instant::now(); @@ -7514,6 +7647,7 @@ fn block_validation_response_timeout() { header: NakamotoBlockHeader::empty(), txs: vec![], }; + block.header.timestamp = get_epoch_time_secs(); let info_before = get_chain_info(&signer_test.running_nodes.conf); // Propose a block to the signers that passes initial checks but will not be submitted to the stacks node due to the submission stall @@ -7583,7 +7717,7 @@ fn block_validation_response_timeout() { ); info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); let info_before = info_after; - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); wait_for(30, || { let info = get_chain_info(&signer_test.running_nodes.conf); @@ -9029,3 +9163,1655 @@ fn tenure_extend_after_2_bad_commits() { run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } + +#[test] +#[ignore] +/// Test the block_proposal_max_age_secs signer configuration option. It should reject blocks that are +/// invalid but within the max age window, otherwise it should simply drop the block without further processing. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// +/// Test Execution: +/// The stacks node is advanced to epoch 3.0 reward set calculation to ensure the signer set is determined. +/// An invalid block proposal with a recent timestamp is forcibly written to the miner's slot to simulate the miner proposing a block. +/// The signers process the invalid block and broadcast a block response rejection to the respective .signers-XXX-YYY contract. +/// A second block proposal with an outdated timestamp is then submitted to the miner's slot to simulate the miner proposing a very old block. +/// The test confirms no further block rejection response is submitted to the .signers-XXX-YYY contract. +/// +/// Test Assertion: +/// - Each signer successfully rejects the recent invalid block proposal. +/// - No signer submits a block proposal response for the outdated block proposal. +/// - The stacks tip does not advance +fn block_proposal_max_age_rejections() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![], + |config| { + config.block_proposal_max_age_secs = 30; + }, + |_| {}, + None, + None, + ); + signer_test.boot_to_epoch_3(); + let short_timeout = Duration::from_secs(30); + + info!("------------------------- Send Block Proposal To Signers -------------------------"); + let info_before = get_chain_info(&signer_test.running_nodes.conf); + let mut block = NakamotoBlock { + header: NakamotoBlockHeader::empty(), + txs: vec![], + }; + // First propose a stale block that is older than the block_proposal_max_age_secs + block.header.timestamp = get_epoch_time_secs().saturating_sub( + signer_test.signer_configs[0] + .block_proposal_max_age_secs + .saturating_add(1), + ); + let block_signer_signature_hash_1 = block.header.signer_signature_hash(); + signer_test.propose_block(block.clone(), short_timeout); + + // Next propose a recent invalid block + block.header.timestamp = get_epoch_time_secs(); + let block_signer_signature_hash_2 = block.header.signer_signature_hash(); + signer_test.propose_block(block, short_timeout); + + info!("------------------------- Test Block Proposal Rejected -------------------------"); + // Verify the signers rejected only the SECOND block proposal. The first was not even processed. + wait_for(30, || { + let rejections: Vec<_> = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .map(|chunk| { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + return None; + }; + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signer_signature_hash, + signature, + .. + })) => { + assert_eq!( + signer_signature_hash, block_signer_signature_hash_2, + "We should only reject the second block" + ); + Some(signature) + } + SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { + signer_signature_hash, + .. + })) => { + assert_ne!( + signer_signature_hash, block_signer_signature_hash_1, + "We should never have accepted block" + ); + None + } + _ => None, + } + }) + .collect(); + Ok(rejections.len() > num_signers * 7 / 10) + }) + .expect("Timed out waiting for block rejections"); + + info!("------------------------- Test Peer Info-------------------------"); + assert_eq!(info_before, get_chain_info(&signer_test.running_nodes.conf)); + + info!("------------------------- Test Shutdown-------------------------"); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// Test that signers do not mark a block as globally accepted if it was not announced by the node. +/// This will simulate this case via testing flags, and ensure that a block can be reorged across tenure +/// boundaries now (as it is only marked locally accepted and no longer gets marked globally accepted +/// by simply seeing the threshold number of signatures). +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// 1. The node mines 1 stacks block N (all signers sign it). +/// 2. <30% of signers are configured to auto reject any block proposals, broadcast of new blocks are skipped, and miners are configured to ignore signers responses. +/// 3. The node mines 1 stacks block N+1 (all signers sign it, but one which rejects it) but eventually all mark the block as locally accepted. +/// 4. A new tenure starts and the miner attempts to mine a new sister block N+1' (as it does not see the threshold number of signatures or any block push from signers). +/// 5. The signers accept this sister block as a valid reorg and the node advances to block N+1'. +/// +/// Test Assertion: +/// - All signers accepted block N. +/// - Less than 30% of the signers rejected block N+1. +/// - All signers accept block N+1' as a valid reorg. +/// - The node advances to block N+1' +fn global_acceptance_depends_on_block_announcement() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 4; + + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + |config| { + // Just accept all reorg attempts + config.tenure_last_block_proposal_timeout = Duration::from_secs(0); + }, + |config| { + config.miner.block_commit_delay = Duration::from_secs(0); + }, + None, + None, + ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = 30; + signer_test.boot_to_epoch_3(); + + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + test_observer::clear(); + // submit a tx so that the miner will mine a stacks block N + let mut sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N"); + + wait_for(short_timeout, || { + Ok(signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for N to be mined and processed"); + + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + + // Make sure that ALL signers accepted the block proposal + signer_test + .wait_for_block_acceptance(short_timeout, &block_n.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N"); + + info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); + // Make less than 30% of the signers reject the block and ensure it is accepted by the node, but not announced. + let rejecting_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(num_signers * 3 / 10) + .collect(); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); + TEST_SKIP_BLOCK_ANNOUNCEMENT.set(true); + TEST_IGNORE_SIGNERS.set(true); + TEST_SKIP_BLOCK_BROADCAST.set(true); + test_observer::clear(); + + // submit a tx so that the miner will mine a stacks block N+1 + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N+1"); + + let mut proposed_block = None; + let start_time = Instant::now(); + while proposed_block.is_none() && start_time.elapsed() < Duration::from_secs(30) { + proposed_block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_before.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + } + let proposed_block = proposed_block.expect("Failed to find proposed block within 30s"); + + // Even though one of the signers rejected the block, it will eventually accept the block as it sees the 70% threshold of signatures + signer_test + .wait_for_block_acceptance( + short_timeout, + &proposed_block.header.signer_signature_hash(), + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+1 by all signers"); + + info!( + "------------------------- Attempt to Mine Nakamoto Block N+1' -------------------------" + ); + + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); + TEST_SKIP_BLOCK_ANNOUNCEMENT.set(false); + TEST_IGNORE_SIGNERS.set(false); + TEST_SKIP_BLOCK_BROADCAST.set(false); + test_observer::clear(); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let info = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info.stacks_tip_height > info_before.stacks_tip_height) + }, + ) + .unwrap(); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let mut sister_block = None; + let start_time = Instant::now(); + while sister_block.is_none() && start_time.elapsed() < Duration::from_secs(30) { + sister_block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_after.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + } + let sister_block = sister_block.expect("Failed to find proposed sister block within 30s"); + signer_test + .wait_for_block_acceptance( + short_timeout, + &sister_block.header.signer_signature_hash(), + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+1' by all signers"); + + // Assert the block was mined and the tip has changed. + assert_eq!( + info_after.stacks_tip_height, + sister_block.header.chain_length + ); + assert_eq!(info_after.stacks_tip, sister_block.header.block_hash()); + assert_eq!( + info_after.stacks_tip_consensus_hash, + sister_block.header.consensus_hash + ); + assert_eq!( + sister_block.header.chain_length, + proposed_block.header.chain_length + ); + assert_ne!(sister_block, proposed_block); +} + +/// Test a scenario where: +/// Two miners boot to Nakamoto. +/// Sortition occurs. Miner 1 wins. +/// Miner 1 proposes a block N +/// Signers accept and the stacks tip advances to N +/// Sortition occurs. Miner 2 wins. +/// Miner 2 proposes block N+1 +/// Sortition occurs. Miner 1 wins. +/// Miner 1 proposes block N+1' +/// N+1 passes signers initial checks and is submitted to the node for validation. +/// N+1' arrives at the signers and passes inital checks, but BEFORE N+1' can be submitted for validation: +/// N+1 finishes being processed at the node and sits in the signers queue. +/// Signers THEN submit N+1' for node validation. +/// Signers process N+1 validation response ok, followed immediately by the N+1' validation response ok. +/// Signers broadcast N+1 acceptance +/// Signers broadcast N+1' rejection +/// Miner 2 proposes a new N+2 block built upon N+1 +/// Asserts: +/// - N+1 is signed and broadcasted +/// - N+1' is rejected as a sortition view mismatch +/// - The tip advances to N+1 (Signed by Miner 1) +/// - The tip advances to N+2 (Signed by Miner 2) +#[test] +#[ignore] +fn no_reorg_due_to_successive_block_validation_ok() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let num_txs = 1; + let sender_nonce = 0; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; + + info!("------------------------- Test Setup -------------------------"); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * num_txs)], + |signer_config| { + // Lets make sure we never time out since we need to stall some things to force our scenario + signer_config.block_proposal_validation_timeout = Duration::from_secs(u64::MAX); + signer_config.tenure_last_block_proposal_timeout = Duration::from_secs(u64::MAX); + signer_config.first_proposal_burn_block_timing = Duration::from_secs(u64::MAX); + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, + naka_mined_blocks: blocks_mined2, + naka_rejected_blocks: rl2_rejections, + naka_proposed_blocks: rl2_proposals, + .. + } = run_loop_2.counters(); + + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + let mining_pk_1 = StacksPublicKey::from_private(&conf.miner.mining_key.unwrap()); + let mining_pk_2 = StacksPublicKey::from_private(&conf_node_2.miner.mining_key.unwrap()); + let mining_pkh_1 = Hash160::from_node_public_key(&mining_pk_1); + let mining_pkh_2 = Hash160::from_node_public_key(&mining_pk_2); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; + let starting_peer_height = get_chain_info(&conf).stacks_tip_height; + let starting_burn_height = get_burn_height(); + + info!("------------------------- Pause Miner 1's Block Commits -------------------------"); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(true); + + info!("------------------------- Miner 1 Mines a Nakamoto Block N (Globally Accepted) -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let info_before = get_chain_info(&conf); + let mined_before = test_observer::get_mined_nakamoto_blocks().len(); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || { + Ok(get_burn_height() > starting_burn_height + && signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + > stacks_height_before + && blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && get_chain_info(&conf).stacks_tip_height > info_before.stacks_tip_height + && test_observer::get_mined_nakamoto_blocks().len() > mined_before) + }, + ) + .expect("Timed out waiting for Miner 1 to Mine Block N"); + + let blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = blocks.last().unwrap().clone(); + let block_n_signature_hash = block_n.signer_signature_hash; + + let info_after = get_chain_info(&conf); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + assert_eq!(block_n.signer_signature_hash, block_n_signature_hash); + assert_eq!( + info_after.stacks_tip_height, + info_before.stacks_tip_height + 1 + ); + + // assure we have a successful sortition that miner 1 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + debug!("Miner 1 mined block N: {block_n_signature_hash}"); + + info!("------------------------- Pause Block Validation Response of N+1 -------------------------"); + TEST_VALIDATE_STALL.lock().unwrap().replace(true); + let proposals_before_2 = rl2_proposals.load(Ordering::SeqCst); + let rejections_before_2 = rl2_rejections.load(Ordering::SeqCst); + let blocks_before = test_observer::get_blocks().len(); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); + + // Force miner 1 to submit a block + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + let mut block_n_1 = None; + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockProposal(proposal) = message { + if proposal.block.header.signer_signature_hash() != block_n_signature_hash + && proposal + .block + .header + .recover_miner_pk() + .map(|pk| pk == mining_pk_1) + .unwrap() + && proposal.block.header.chain_length == block_n.stacks_height + 1 + { + block_n_1 = Some(proposal.block.clone()); + return Ok(true); + } + } + } + Ok(false) + }) + .expect("Timed out waiting for Miner 1 to propose N+1"); + let block_n_1 = block_n_1.expect("Failed to find N+1 proposal"); + let block_n_1_signature_hash = block_n_1.header.signer_signature_hash(); + + assert_eq!( + block_n_1.header.parent_block_id.to_string(), + block_n.block_id + ); + debug!("Miner 1 proposed block N+1: {block_n_1_signature_hash}"); + + info!("------------------------- Unpause Miner 2's Block Commits -------------------------"); + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(false); + + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for Miner 2 to submit its block commit"); + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + + info!("------------------------- Pause Block Validation Submission of N+1'-------------------------"); + TEST_STALL_BLOCK_VALIDATION_SUBMISSION.set(true); + + info!("------------------------- Start Miner 2's Tenure-------------------------"); + let burn_height_before = get_burn_height(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || { + Ok(get_burn_height() > burn_height_before + && rl2_proposals.load(Ordering::SeqCst) > proposals_before_2 + && rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }, + ) + .expect("Timed out waiting for burn block height to advance and Miner 2 to propose a block"); + + let mut block_n_1_prime = None; + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockProposal(proposal) = message { + if proposal + .block + .header + .recover_miner_pk() + .map(|pk| pk == mining_pk_2) + .unwrap() + { + block_n_1_prime = Some(proposal.block.clone()); + return Ok(true); + } + } + } + Ok(false) + }) + .expect("Timed out waiting for Miner 2 to propose N+1'"); + + let block_n_1_prime = block_n_1_prime.expect("Failed to find N+1' proposal"); + let block_n_1_prime_signature_hash = block_n_1_prime.header.signer_signature_hash(); + + debug!("Miner 2 proposed N+1': {block_n_1_prime_signature_hash}"); + + // assure we have a successful sortition that miner 2 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + // Make sure that the tip is still at block N + assert_eq!(tip.canonical_stacks_tip_height, block_n.stacks_height); + assert_eq!( + tip.canonical_stacks_tip_hash.to_string(), + block_n.block_hash + ); + + // Just a precaution to make sure no stacks blocks has been processed between now and our original pause + assert_eq!(rejections_before_2, rl2_rejections.load(Ordering::SeqCst)); + assert_eq!( + blocks_processed_before_1, + blocks_mined1.load(Ordering::SeqCst) + ); + assert_eq!( + blocks_processed_before_2, + blocks_mined2.load(Ordering::SeqCst) + ); + assert_eq!(blocks_before, test_observer::get_blocks().len()); + + info!("------------------------- Unpause Block Validation Response of N+1 -------------------------"); + + TEST_VALIDATE_STALL.lock().unwrap().replace(false); + + // Verify that the node accepted the proposed N+1, sending back a validate ok response + wait_for(30, || { + for proposal in test_observer::get_proposal_responses() { + if let BlockValidateResponse::Ok(response) = proposal { + if response.signer_signature_hash == block_n_1_signature_hash { + return Ok(true); + } + } + } + Ok(false) + }) + .expect("Timed out waiting for validation response for N+1"); + + debug!( + "Node finished processing proposal validation request for N+1: {block_n_1_signature_hash}" + ); + + // This is awful but I can't gurantee signers have reached the submission stall and we need to ensure the event order is as expected. + sleep_ms(5_000); + + info!("------------------------- Unpause Block Validation Submission and Response for N+1' -------------------------"); + TEST_STALL_BLOCK_VALIDATION_SUBMISSION.set(false); + + info!("------------------------- Confirm N+1 is Accepted ------------------------"); + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { + signer_signature_hash, + .. + })) = message + { + if signer_signature_hash == block_n_1_signature_hash { + return Ok(true); + } + } + } + Ok(false) + }) + .expect("Timed out waiting for N+1 acceptance."); + + debug!("Miner 1 mined block N+1: {block_n_1_signature_hash}"); + + info!("------------------------- Confirm N+1' is Rejected ------------------------"); + + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signer_signature_hash, + .. + })) = message + { + if signer_signature_hash == block_n_1_prime_signature_hash { + return Ok(true); + } + } else if let SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { + signer_signature_hash, + .. + })) = message + { + assert!( + signer_signature_hash != block_n_1_prime_signature_hash, + "N+1' was accepted after N+1 was accepted. This should not be possible." + ); + } + } + Ok(false) + }) + .expect("Timed out waiting for N+1' rejection."); + + info!("------------------------- Confirm N+2 Accepted ------------------------"); + + let mut block_n_2 = None; + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockProposal(proposal) = message { + if proposal.block.header.chain_length == block_n_1.header.chain_length + 1 + && proposal + .block + .header + .recover_miner_pk() + .map(|pk| pk == mining_pk_2) + .unwrap() + { + block_n_2 = Some(proposal.block.clone()); + return Ok(true); + } + } + } + Ok(false) + }) + .expect("Timed out waiting for Miner 1 to propose N+2"); + let block_n_2 = block_n_2.expect("Failed to find N+2 proposal"); + + wait_for(30, || { + Ok(get_chain_info(&conf).stacks_tip_height >= block_n_2.header.chain_length) + }) + .expect("Timed out waiting for the stacks tip height to advance"); + + info!("------------------------- Confirm Stacks Chain is As Expected ------------------------"); + let info_after = get_chain_info(&conf); + assert_eq!(info_after.stacks_tip_height, block_n_2.header.chain_length); + assert_eq!(info_after.stacks_tip_height, starting_peer_height + 3); + assert_eq!( + info_after.stacks_tip.to_string(), + block_n_2.header.block_hash().to_string() + ); + assert_ne!( + info_after.stacks_tip_consensus_hash, + block_n_1.header.consensus_hash + ); + assert_eq!( + info_after.stacks_tip_consensus_hash, + block_n_2.header.consensus_hash + ); + assert_eq!( + block_n_2.header.parent_block_id, + block_n_1.header.block_id() + ); + assert_eq!( + block_n_1.header.parent_block_id.to_string(), + block_n.block_id + ); + + info!("------------------------- Shutdown -------------------------"); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// Test that signers for an incoming reward cycle, do not sign blocks for the previous reward cycle. +/// +/// Test Setup: +/// The test spins up five stacks signers that are stacked for multiple cycles, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines to the middle of the prepare phase of reward cycle N+1. +/// Sends a status request to the signers to ensure both the current and next reward cycle signers are active. +/// A valid Nakamoto block is proposed. +/// Two invalid Nakamoto blocks are proposed. +/// +/// Test Assertion: +/// All signers for cycle N sign the valid block. +/// No signers for cycle N+1 emit any messages. +/// All signers for cycle N reject the invalid blocks. +/// No signers for cycle N+1 emit any messages for the invalid blocks. +/// The chain advances to block N. +fn incoming_signers_ignore_block_proposals() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); + let timeout = Duration::from_secs(200); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + let curr_reward_cycle = signer_test.get_current_reward_cycle(); + // Mine to the middle of the prepare phase of the next reward cycle + let next_reward_cycle = curr_reward_cycle.saturating_add(1); + let prepare_phase_len = signer_test + .running_nodes + .conf + .get_burnchain() + .pox_constants + .prepare_length as u64; + let middle_of_prepare_phase = signer_test + .running_nodes + .btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(next_reward_cycle) + .saturating_sub(prepare_phase_len / 2); + + info!("------------------------- Test Mine Until Middle of Prepare Phase at Block Height {middle_of_prepare_phase} -------------------------"); + signer_test.run_until_burnchain_height_nakamoto(timeout, middle_of_prepare_phase, num_signers); + + signer_test.wait_for_registered_both_reward_cycles(30); + + let current_burnchain_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + assert_eq!(current_burnchain_height, middle_of_prepare_phase); + assert_eq!(curr_reward_cycle, signer_test.get_current_reward_cycle()); + + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + + info!("------------------------- Test Mine A Valid Block -------------------------"); + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + // a tenure has begun, so wait until we mine a block + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before) + }) + .expect("Timed out waiting for a block to be mined"); + + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let mut stackerdb = StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + next_reward_cycle, + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + + let next_signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(next_reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + + let mut no_next_signer_messages = || { + assert!(wait_for(30, || { + let latest_msgs = StackerDB::get_messages::( + stackerdb + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), + &next_signer_slot_ids, + ) + .expect("Failed to get messages from stackerdb"); + assert!( + latest_msgs.is_empty(), + "Next signers have messages in their stackerdb" + ); + Ok(false) + }) + .is_err()); + }; + + no_next_signer_messages(); + + let proposal_conf = ProposalEvalConfig { + first_proposal_burn_block_timing: Duration::from_secs(0), + block_proposal_timeout: Duration::from_secs(100), + tenure_last_block_proposal_timeout: Duration::from_secs(30), + tenure_idle_timeout: Duration::from_secs(300), + }; + let mut block = NakamotoBlock { + header: NakamotoBlockHeader::empty(), + txs: vec![], + }; + block.header.timestamp = get_epoch_time_secs(); + let signer_signature_hash_1 = block.header.signer_signature_hash(); + + info!("------------------------- Test Attempt to Mine Invalid Block {signer_signature_hash_1} -------------------------"); + + let short_timeout = Duration::from_secs(30); + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + test_observer::clear(); + + // Propose a block to the signers that passes initial checks but will be rejected by the stacks node + let view = SortitionsView::fetch_view(proposal_conf, &signer_test.stacks_client).unwrap(); + block.header.pox_treatment = BitVec::ones(1).unwrap(); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + block.header.chain_length = + get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height + 1; + let signer_signature_hash_2 = block.header.signer_signature_hash(); + + info!("------------------------- Test Attempt to Mine Invalid Block {signer_signature_hash_2} -------------------------"); + + signer_test.propose_block(block, short_timeout); + // Verify the signers rejected the second block via the endpoint + signer_test.wait_for_validate_reject_response(short_timeout, signer_signature_hash_2); + signer_test + .wait_for_block_rejections(30, &all_signers) + .expect("Timed out waiting for block rejections"); + no_next_signer_messages(); + + assert_eq!(blocks_before, mined_blocks.load(Ordering::SeqCst)); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// Test that signers for an outgoing reward cycle, do not sign blocks for the incoming reward cycle. +/// +/// Test Setup: +/// The test spins up five stacks signers that are stacked for multiple cycles, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines to the next reward cycle. +/// Sends a status request to the signers to ensure both the current and previous reward cycle signers are active. +/// A valid Nakamoto block is proposed. +/// Two invalid Nakamoto blocks are proposed. +/// +/// Test Assertion: +/// All signers for cycle N+1 sign the valid block. +/// No signers for cycle N emit any messages. +/// All signers for cycle N+1 reject the invalid blocks. +/// No signers for cycle N emit any messages for the invalid blocks. +/// The chain advances to block N. +fn outgoing_signers_ignore_block_proposals() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); + let timeout = Duration::from_secs(200); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + // Do not cleanup stale signers + TEST_SKIP_SIGNER_CLEANUP.set(true); + let curr_reward_cycle = signer_test.get_current_reward_cycle(); + // Mine to the middle of the prepare phase of the next reward cycle + let next_reward_cycle = curr_reward_cycle.saturating_add(1); + let next_reward_cycle_height = signer_test + .running_nodes + .btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(next_reward_cycle); + + info!("------------------------- Test Mine Until Next Reward Cycle at Height {next_reward_cycle_height} -------------------------"); + signer_test.run_until_burnchain_height_nakamoto(timeout, next_reward_cycle_height, num_signers); + + signer_test.wait_for_registered_both_reward_cycles(30); + + let current_burnchain_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + assert_eq!(current_burnchain_height, next_reward_cycle_height); + assert_eq!(next_reward_cycle, signer_test.get_current_reward_cycle()); + + let old_reward_cycle = curr_reward_cycle; + + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + + test_observer::clear(); + + info!("------------------------- Test Mine A Valid Block -------------------------"); + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + // a tenure has begun, so wait until we mine a block + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before) + }) + .expect("Timed out waiting for a block to be mined"); + + let new_signature_hash = test_observer::get_mined_nakamoto_blocks() + .last() + .unwrap() + .signer_signature_hash; + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let mut stackerdb = StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + old_reward_cycle, + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + + let old_signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(old_reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + + let mut old_signers_ignore_block_proposals = |hash| { + assert!(wait_for(10, || { + let latest_msgs = StackerDB::get_messages::( + stackerdb + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), + &old_signer_slot_ids, + ) + .expect("Failed to get messages from stackerdb"); + for msg in latest_msgs.iter() { + if let SignerMessage::BlockResponse(response) = msg { + assert_ne!(response.get_signer_signature_hash(), hash); + } + } + Ok(false) + }) + .is_err()); + }; + old_signers_ignore_block_proposals(new_signature_hash); + + let proposal_conf = ProposalEvalConfig { + first_proposal_burn_block_timing: Duration::from_secs(0), + block_proposal_timeout: Duration::from_secs(100), + tenure_last_block_proposal_timeout: Duration::from_secs(30), + tenure_idle_timeout: Duration::from_secs(300), + }; + let mut block = NakamotoBlock { + header: NakamotoBlockHeader::empty(), + txs: vec![], + }; + block.header.timestamp = get_epoch_time_secs(); + let signer_signature_hash_1 = block.header.signer_signature_hash(); + + info!("------------------------- Test Attempt to Mine Invalid Block {signer_signature_hash_1} -------------------------"); + + let short_timeout = Duration::from_secs(30); + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + test_observer::clear(); + + // Propose a block to the signers that passes initial checks but will be rejected by the stacks node + let view = SortitionsView::fetch_view(proposal_conf, &signer_test.stacks_client).unwrap(); + block.header.pox_treatment = BitVec::ones(1).unwrap(); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + block.header.chain_length = + get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height + 1; + let signer_signature_hash = block.header.signer_signature_hash(); + + info!("------------------------- Test Attempt to Mine Invalid Block {signer_signature_hash} -------------------------"); + + signer_test.propose_block(block, short_timeout); + // Verify the signers rejected the second block via the endpoint + signer_test.wait_for_validate_reject_response(short_timeout, signer_signature_hash); + signer_test + .wait_for_block_rejections(30, &all_signers) + .expect("Timed out waiting for block rejections"); + old_signers_ignore_block_proposals(signer_signature_hash); + + assert_eq!(blocks_before, mined_blocks.load(Ordering::SeqCst)); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// Test that signers ignore signatures for blocks that do not belong to their own reward cycle. +/// This is a regression test for a signer bug that caused an internal signer instances to +/// broadcast a block corresponding to a different reward cycle with a higher threshold, stalling the network. +/// +/// Test Setup: +/// The test spins up four stacks signers that are stacked for one cycle, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The same four stackers stack for an addiitonal cycle. +/// A new fifth signer is added to the stacker set, stacking for the next reward cycle. +/// The node advances to the next reward cycle. +/// The first two signers are set to ignore block proposals. +/// A valid Nakamoto block N is proposed to the current signers. +/// A signer signature over block N is forcibly written to the outgoing signer's stackerdb instance. +/// +/// Test Assertion: +/// All signers for the previous cycle ignore the incoming block N. +/// Outgoing signers ignore the forced signature. +/// The chain does NOT advance to block N. +fn injected_signatures_are_ignored_across_boundaries() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 4; + let new_num_signers = 5_usize; + let signer_private_keys: Vec<_> = (0..num_signers).map(|_| StacksPrivateKey::new()).collect(); + let new_signer_private_key = StacksPrivateKey::new(); + let mut new_signer_private_keys = signer_private_keys.clone(); + new_signer_private_keys.push(new_signer_private_key); + + let new_signer_public_keys: Vec<_> = new_signer_private_keys + .iter() + .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) + .collect(); + let new_signer_addresses: Vec<_> = new_signer_private_keys.iter().map(tests::to_addr).collect(); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + let mut initial_balances = new_signer_addresses + .iter() + .map(|addr| (*addr, POX_4_DEFAULT_STACKER_BALANCE)) + .collect::>(); + + initial_balances.push((sender_addr, (send_amt + send_fee) * 4)); + + let run_stamp = rand::random(); + + let rpc_port = 51024; + let rpc_bind = format!("127.0.0.1:{rpc_port}"); + + // Setup the new signers that will take over + let new_signer_config = build_signer_config_tomls( + &[new_signer_private_key], + &rpc_bind, + Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. + &Network::Testnet, + "12345", + run_stamp, + 3000 + num_signers, + Some(100_000), + None, + Some(9000 + num_signers), + None, + ) + .first() + .unwrap() + .clone(); + + info!("---- spawning signer ----"); + let signer_config = SignerConfig::load_from_str(&new_signer_config).unwrap(); + let new_spawned_signer = SpawnedSigner::new(signer_config.clone()); + + // Boot with some initial signer set + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + initial_balances, + |_| {}, + |naka_conf| { + info!( + "---- Adding signer endpoint to naka conf ({}) ----", + signer_config.endpoint + ); + + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("{}", signer_config.endpoint), + events_keys: vec![ + EventKeyType::StackerDBChunks, + EventKeyType::BlockProposal, + EventKeyType::BurnchainBlocks, + ], + timeout_ms: 1000, + }); + naka_conf.node.rpc_bind = rpc_bind.clone(); + }, + None, + Some(signer_private_keys), + ); + assert_eq!( + new_spawned_signer.config.node_host, + signer_test.running_nodes.conf.node.rpc_bind + ); + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = Duration::from_secs(20); + + // Verify that naka_conf has our new signer's event observers + let endpoint = format!("{}", signer_config.endpoint); + assert!(signer_test + .running_nodes + .conf + .events_observers + .iter() + .any(|observer| observer.endpoint == endpoint)); + + info!("---- Booting to epoch 3 -----"); + signer_test.boot_to_epoch_3(); + // Do not cleanup stale signers + TEST_SKIP_SIGNER_CLEANUP.set(true); + + // verify that the first reward cycle has the old signers in the reward set + let reward_cycle = signer_test.get_current_reward_cycle(); + let signer_test_public_keys: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) + .collect(); + + info!("---- Verifying that the current signers are the old signers ----"); + let current_signers = signer_test.get_reward_set_signers(reward_cycle); + assert_eq!(current_signers.len(), num_signers); + // Verify that the current signers are the same as the old signers + for signer in current_signers.iter() { + assert!(signer_test_public_keys.contains(&signer.signing_key.to_vec())); + } + + // advance to the next reward cycle, stacking to the new signers beforehand + let reward_cycle = signer_test.get_current_reward_cycle(); + + info!("---- Stacking new signers -----"); + + let burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + let accounts_to_check: Vec<_> = new_signer_private_keys.iter().map(tests::to_addr).collect(); + + // Stack the new signer + let pox_addr = PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + tests::to_addr(&new_signer_private_key).bytes, + ); + let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); + let signature = make_pox_4_signer_key_signature( + &pox_addr, + &new_signer_private_key, + reward_cycle.into(), + &Pox4SignatureTopic::StackStx, + CHAIN_ID_TESTNET, + 1_u128, + u128::MAX, + 1, + ) + .unwrap() + .to_rsv(); + + let signer_pk = Secp256k1PublicKey::from_private(&new_signer_private_key); + let stacking_tx = tests::make_contract_call( + &new_signer_private_key, + 0, + 1000, + signer_test.running_nodes.conf.burnchain.chain_id, + &StacksAddress::burn_address(false), + "pox-4", + "stack-stx", + &[ + clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), + pox_addr_tuple.clone(), + clarity::vm::Value::UInt(burn_block_height as u128), + clarity::vm::Value::UInt(1), + clarity::vm::Value::some(clarity::vm::Value::buff_from(signature).unwrap()).unwrap(), + clarity::vm::Value::buff_from(signer_pk.to_bytes_compressed()).unwrap(), + clarity::vm::Value::UInt(u128::MAX), + clarity::vm::Value::UInt(1), + ], + ); + submit_tx(&http_origin, &stacking_tx); + + wait_for(60, || { + Ok(accounts_to_check + .iter() + .all(|acct| get_account(&http_origin, acct).nonce >= 1)) + }) + .expect("Timed out waiting for stacking txs to be mined"); + + signer_test.mine_nakamoto_block(short_timeout, true); + + let next_reward_cycle = reward_cycle.saturating_add(1); + + let next_cycle_height = signer_test + .running_nodes + .btc_regtest_controller + .get_burnchain() + .nakamoto_first_block_of_cycle(next_reward_cycle) + .saturating_add(1); + + let next_calculation = next_cycle_height.saturating_sub(3); + info!("---- Mining to next reward set calculation (block {next_calculation}) -----"); + signer_test.run_until_burnchain_height_nakamoto( + Duration::from_secs(60), + next_calculation, + new_num_signers, + ); + + // Verify that the new reward set is the new signers + let reward_set = signer_test.get_reward_set_signers(next_reward_cycle); + assert_eq!(reward_set.len(), new_num_signers); + for signer in reward_set.iter() { + assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); + } + + info!("---- Manually mine a single burn block to force the signers to update ----"); + next_block_and_wait( + &mut signer_test.running_nodes.btc_regtest_controller, + &signer_test.running_nodes.blocks_processed, + ); + + signer_test.wait_for_registered_both_reward_cycles(60); + + info!("---- Mining to the next reward cycle (block {next_cycle_height}) -----",); + signer_test.run_until_burnchain_height_nakamoto( + Duration::from_secs(60), + next_cycle_height, + new_num_signers, + ); + let new_reward_cycle = signer_test.get_current_reward_cycle(); + assert_eq!(new_reward_cycle, reward_cycle.saturating_add(1)); + + let current_signers = signer_test.get_reward_set_signers(new_reward_cycle); + assert_eq!(current_signers.len(), new_num_signers); + + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + // Clear the stackerdb chunks + test_observer::clear(); + + let old_reward_cycle = reward_cycle; + let curr_reward_cycle = new_reward_cycle; + + info!("------------------------- Test Propose A Valid Block -------------------------"); + // Make the last three of the signers ignore the block proposal to ensure it it is not globally accepted/rejected + let all_signers: Vec<_> = new_signer_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + let non_ignoring_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(new_num_signers * 5 / 10) + .collect(); + let ignoring_signers: Vec<_> = all_signers + .iter() + .cloned() + .skip(new_num_signers * 5 / 10) + .collect(); + assert_eq!(ignoring_signers.len(), 3); + assert_eq!(non_ignoring_signers.len(), 2); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(ignoring_signers.clone()); + + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + // submit a tx so that the miner will ATTEMPT to mine a stacks block N + let transfer_tx = make_stacks_transfer( + &sender_sk, + 0, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + + info!("Submitted tx {tx} in attempt to mine block N"); + let mut new_signature_hash = None; + wait_for(30, || { + let accepted_signers = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + if let SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) = message { + new_signature_hash = Some(accepted.signer_signature_hash); + return non_ignoring_signers.iter().find(|key| { + key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) + .is_ok() + }); + } + None + }) + .collect::>(); + Ok(accepted_signers.len() + ignoring_signers.len() == new_num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance"); + let new_signature_hash = new_signature_hash.expect("Failed to get new signature hash"); + + // The first 50% of the signers are the ones that are ignoring block proposals and thus haven't sent a signature yet + let forced_signer = &signer_test.signer_stacks_private_keys[ignoring_signers.len()]; + let mut stackerdb = StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + forced_signer.clone(), + false, + old_reward_cycle, + signer_test + .get_signer_slot_id(old_reward_cycle, &tests::to_addr(forced_signer)) + .expect("Failed to get signer slot id") + .expect("Signer does not have a slot id"), + ); + signer_test.verify_no_block_response_found( + &mut stackerdb, + next_reward_cycle, + new_signature_hash, + ); + + // Get the last block proposal + let block_proposal = test_observer::get_stackerdb_chunks() + .iter() + .flat_map(|chunk| chunk.modified_slots.clone()) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + if let SignerMessage::BlockProposal(proposal) = message { + assert_eq!(proposal.reward_cycle, curr_reward_cycle); + assert_eq!( + proposal.block.header.signer_signature_hash(), + new_signature_hash + ); + return Some(proposal); + } + None + }) + .next() + .expect("Failed to find block proposal for reward cycle {curr_reward_cycle}"); + + let blocks_after = mined_blocks.load(Ordering::SeqCst); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_after, blocks_before); + assert_eq!(info_after, info_before); + + // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block = nakamoto_blocks.last().unwrap(); + assert_ne!(info_after.stacks_tip.to_string(), block.block_hash); + + info!("------------------------- Test Inject Valid Signature To Old Signers -------------------------"); + // Force a signature to force the threshold of the block over the old signers' threshold + // If the old signers were not fixed, the old signers would stall. + signer_test.inject_accept_signature(&block_proposal.block, forced_signer, old_reward_cycle); + + assert!(wait_for(10, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before) + }) + .is_err()); + + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_ne!(info_after.stacks_tip.to_string(), block.block_hash); + + info!("------------------------- Test Inject Valid Signatures to New Signers -------------------------"); + // Force two signatures to force the threshold of the block over the new signers' threshold + // This signature should be accepted by current signers, but ignored by the old signers. + signer_test.inject_accept_signature(&block_proposal.block, forced_signer, new_reward_cycle); + let forced_signer = new_signer_private_keys.last().unwrap(); + signer_test.inject_accept_signature(&block_proposal.block, forced_signer, new_reward_cycle); + + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before) + }) + .expect("Timed out waiting for block to be mined"); + + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block.block_hash,); + // Wait 5 seconds in case there are any lingering block pushes from the signers + std::thread::sleep(Duration::from_secs(5)); + signer_test.shutdown(); + + assert!(new_spawned_signer.stop().is_none()); +} diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index c68b477b47..6212dd6fcc 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -18,6 +18,7 @@ use std::{env, thread}; use clarity::vm::types::QualifiedContractIdentifier; use stacks::chainstate::stacks::StacksPrivateKey; +use stacks::config::{EventKeyType, InitialBalance}; use stacks::libstackerdb::{StackerDBChunkAckData, StackerDBChunkData}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::hash::Sha512Trunc256Sum; @@ -25,7 +26,6 @@ use {reqwest, serde_json}; use super::bitcoin_regtest::BitcoinCoreController; use crate::burnchains::BurnchainController; -use crate::config::{EventKeyType, InitialBalance}; use crate::tests::neon_integrations::{ neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, };