diff --git a/CHANGELOG.md b/CHANGELOG.md index 19ba761f9b0..096ab14015b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ ### Protocol Changes ### Non-protocol Changes +* `/debug` page now has client_config linked. You can also check your client_config directly at /debug/client_config ## 1.31.0 diff --git a/Cargo.lock b/Cargo.lock index 93288979685..2c1d23f766f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -318,9 +318,9 @@ checksum = "1485d4d2cc45e7b201ee3767015c96faa5904387c9d87c6efdd0fb511f12d305" [[package]] name = "arbitrary" -version = "1.1.0" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c38b6b6b79f671c25e1a3e785b7b82d7562ffc9cd3efdc98627e5668a2472490" +checksum = "3e90af4de65aa7b293ef2d09daff88501eb254f58edde2e1ac02c82d873eadad" dependencies = [ "derive_arbitrary", ] @@ -343,6 +343,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +[[package]] +name = "arrayvec" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" + [[package]] name = "assert_matches" version = "1.5.0" @@ -392,6 +398,22 @@ dependencies = [ "syn", ] +[[package]] +name = "attohttpc" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "262c3f7f5d61249d8c00e5546e2685cd15ebeeb1bc0f3cc5449350a1cb07319e" +dependencies = [ + "http", + "log", + "native-tls", + "openssl", + "serde", + "serde_json", + "url", + "wildmatch", +] + [[package]] name = "atty" version = "0.2.14" @@ -444,11 +466,36 @@ dependencies = [ "tokio", ] +[[package]] +name = "aws-creds" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aeeee1a5defa63cba39097a510dfe63ef53658fc8995202a610f6a8a4d03639" +dependencies = [ + "attohttpc", + "dirs", + "rust-ini", + "serde", + "serde-xml-rs", + "thiserror", + "time 0.3.9", + "url", +] + +[[package]] +name = "aws-region" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f92a8af5850d0ea0916ca3e015ab86951ded0bf4b70fd27896e81ae1dfb0af37" +dependencies = [ + "thiserror", +] + [[package]] name = "backtrace" -version = "0.3.65" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11a17d453482a265fd5f8479f2a3f405566e6ca627837aaddb85af8b1ab8ef61" +checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" dependencies = [ "addr2line", "cc", @@ -532,7 +579,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b64485778c4f16a6a5a9d335e80d449ac6c70cdd6a06d2af18a6f6f775a125b3" dependencies = [ "arrayref", - "arrayvec", + "arrayvec 0.5.2", "cc", "cfg-if 0.1.10", "constant_time_eq", @@ -558,6 +605,16 @@ dependencies = [ "generic-array 0.14.5", ] +[[package]] +name = "block_on_proc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b872f3528eeeb4370ee73b51194dc1cd93680c2d0eb6c7a223889038d2c1a167" +dependencies = [ + "quote", + "syn", +] + [[package]] name = "bolero" version = "0.8.0" @@ -734,9 +791,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.9.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" +checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" [[package]] name = "bytecheck" @@ -1021,17 +1078,15 @@ dependencies = [ [[package]] name = "console" -version = "0.15.0" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28b32d32ca44b70c3e4acd7db1babf555fa026e385fb95f18028f88848b3c31" +checksum = "c3d79fbe8970a77e3e34151cc13d3b3e248aa0faaecb9f6091fa07ebefe5ad60" dependencies = [ "encode_unicode", + "lazy_static", "libc", - "once_cell", - "regex", - "terminal_size", "unicode-width", - "winapi", + "windows-sys 0.42.0", ] [[package]] @@ -1103,23 +1158,27 @@ dependencies = [ [[package]] name = "cranelift-bforest" -version = "0.84.0" +version = "0.91.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fa7c3188913c2d11a361e0431e135742372a2709a99b103e79758e11a0a797e" +checksum = "fc952b310b24444fc14ab8b9cbe3fafd7e7329e3eec84c3a9b11d2b5cf6f3be1" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.84.0" +version = "0.91.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29285f70fd396a8f64455a15a6e1d390322e4a5f5186de513141313211b0a23e" +checksum = "e73470419b33011e50dbf0f6439cbccbaabe9381de172da4e1b6efcda4bb8fa7" dependencies = [ + "arrayvec 0.7.2", + "bumpalo", "cranelift-bforest", "cranelift-codegen-meta", "cranelift-codegen-shared", + "cranelift-egraph", "cranelift-entity", + "cranelift-isle", "gimli", "log", "regalloc2", @@ -1129,33 +1188,47 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.84.0" +version = "0.91.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057eac2f202ec95aebfd8d495e88560ac085f6a415b3c6c28529dc5eb116a141" +checksum = "911a1872464108a11ac9965c2b079e61bbdf1bc2e0b9001264264add2e12a38f" dependencies = [ "cranelift-codegen-shared", ] [[package]] name = "cranelift-codegen-shared" -version = "0.84.0" +version = "0.91.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75d93869efd18874a9341cfd8ad66bcb08164e86357a694a0e939d29e87410b9" +checksum = "e036f3f07adb24a86fb46e977e8fe03b18bb16b1eada949cf2c48283e5f8a862" + +[[package]] +name = "cranelift-egraph" +version = "0.91.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d6c623f4b5d2a6bad32c403f03765d4484a827eb93ee78f8cb6219ef118fd59" +dependencies = [ + "cranelift-entity", + "fxhash", + "hashbrown 0.12.3", + "indexmap", + "log", + "smallvec", +] [[package]] name = "cranelift-entity" -version = "0.84.0" +version = "0.91.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e34bd7a1fefa902c90a921b36323f17a398b788fa56a75f07a29d83b6e28808" +checksum = "74385eb5e405b3562f0caa7bcc4ab9a93c7958dd5bcd0e910bffb7765eacd6fc" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.84.0" +version = "0.91.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "457018dd2d6ee300953978f63215b5edf3ae42dbdf8c7c038972f10394599f72" +checksum = "8a4ac920422ee36bff2c66257fec861765e3d95a125cdf58d8c0f3bba7e40e61" dependencies = [ "cranelift-codegen", "log", @@ -1163,11 +1236,17 @@ dependencies = [ "target-lexicon 0.12.3", ] +[[package]] +name = "cranelift-isle" +version = "0.91.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c541263fb37ad2baa53ec8c37218ee5d02fa0984670d9419dedd8002ea68ff08" + [[package]] name = "cranelift-native" -version = "0.84.0" +version = "0.91.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bba027cc41bf1d0eee2ddf16caba2ee1be682d0214520fff0129d2c6557fda89" +checksum = "1de5d7a063e8563d670aaca38de16591a9b70dc66cbad4d49a7b4ae8395fd1ce" dependencies = [ "cranelift-codegen", "libc", @@ -1176,9 +1255,9 @@ dependencies = [ [[package]] name = "cranelift-wasm" -version = "0.84.0" +version = "0.91.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b17639ced10b9916c9be120d38c872ea4f9888aa09248568b10056ef0559bfa" +checksum = "dfbc4dd03b713b5d71b582915b8c272f4813cdd8c99a3e03d9ba70c44468a6e0" dependencies = [ "cranelift-codegen", "cranelift-entity", @@ -1186,7 +1265,7 @@ dependencies = [ "itertools", "log", "smallvec", - "wasmparser 0.84.0", + "wasmparser 0.95.0", "wasmtime-types", ] @@ -1419,9 +1498,9 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.1.0" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98e23c06c035dac87bd802d98f368df73a7f2cb05a66ffbd1f377e821fac4af9" +checksum = "8beee4701e2e229e8098bbdecdca12449bc3e322f137d269182fa1291e20bd00" dependencies = [ "proc-macro2", "quote", @@ -1478,9 +1557,9 @@ dependencies = [ [[package]] name = "dirs" -version = "3.0.2" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30baa043103c9d0c2a57cf537cc2f35623889dc0d405e6c3cccfadbc81c71309" +checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" dependencies = [ "dirs-sys", ] @@ -1502,6 +1581,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c97b9233581d84b8e1e689cdd3a47b6f69770084fc246e86a7f78b0d9c1d4a5" +[[package]] +name = "dlv-list" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" + [[package]] name = "dtoa" version = "0.4.8" @@ -1777,11 +1862,10 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ - "matches", "percent-encoding", ] @@ -2027,9 +2111,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ "ahash", ] @@ -2222,6 +2306,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "im" version = "15.1.0" @@ -2258,7 +2352,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg", - "hashbrown 0.12.1", + "hashbrown 0.12.3", "serde", ] @@ -2277,16 +2371,16 @@ dependencies = [ [[package]] name = "insta" -version = "1.14.0" +version = "1.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "689960f187c43c01650c805fb6bc6f55ab944499d86d4ffe9474ad78991d8e94" +checksum = "f6f0f08b46e4379744de2ab67aa8f7de3ffd1da3e275adc41fcc82053ede46ff" dependencies = [ "console", - "once_cell", + "lazy_static", + "linked-hash-map", "serde", - "serde_json", - "serde_yaml", "similar", + "yaml-rust", ] [[package]] @@ -2353,9 +2447,13 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "0.5.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec58677acfea8a15352d42fc87d11d63596ade9239e0a7c9352914417515dbe6" +checksum = "e7d6c6f8c91b4b9ed43484ad1a938e393caf35960fce7f82a040497207bd8e9e" +dependencies = [ + "libc", + "windows-sys 0.42.0", +] [[package]] name = "ipnet" @@ -2446,9 +2544,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.125" +version = "0.2.139" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5916d2ae698f6de9bfb891ad7a8d65c09d232dc58cc4ac433c7da3b2fd84bc2b" +checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" [[package]] name = "libfuzzer-sys" @@ -2511,15 +2609,15 @@ dependencies = [ [[package]] name = "linked-hash-map" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.0.42" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5284f00d480e1c39af34e72f8ad60b94f47007e3481cd3b731c1d67190ddc7b7" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "local-channel" @@ -2546,7 +2644,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" dependencies = [ "scopeguard", - "serde", ] [[package]] @@ -2632,12 +2729,38 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +[[package]] +name = "maybe-async" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6007f9dad048e0a224f27ca599d669fca8cfa0dac804725aab542b2eb032bce6" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "md5" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" + [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +[[package]] +name = "memfd" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b20a59d985586e4a5aef64564ac77299f8586d8be6cf9106a5a40207e8908efb" +dependencies = [ + "rustix", +] + [[package]] name = "memmap" version = "0.7.0" @@ -2672,6 +2795,25 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +[[package]] +name = "mime_guess" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +dependencies = [ + "mime", + "unicase", +] + +[[package]] +name = "minidom" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dddfe21863f8d600ed2bd1096cb9b5cd6ff984be6185cf9d563fb4a107bffc5" +dependencies = [ + "rxml", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -2696,7 +2838,7 @@ dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", + "windows-sys 0.36.1", ] [[package]] @@ -2874,8 +3016,10 @@ dependencies = [ "chrono", "derive_more", "near-crypto", + "near-o11y", "near-primitives", "num-rational", + "once_cell", "serde", "serde_json", "sha2 0.10.2", @@ -3018,9 +3162,17 @@ dependencies = [ name = "near-dyn-configs" version = "0.0.0" dependencies = [ + "anyhow", + "near-chain-configs", "near-o11y", + "near-primitives", "once_cell", "prometheus", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", ] [[package]] @@ -3304,7 +3456,7 @@ dependencies = [ "opentelemetry-semantic-conventions", "prometheus", "serde", - "smartstring", + "smartstring 1.0.1", "strum", "thiserror", "tokio", @@ -3584,7 +3736,6 @@ name = "near-vm-logic" version = "0.0.0" dependencies = [ "borsh", - "byteorder", "ed25519-dalek", "expect-test", "hex", @@ -3733,6 +3884,7 @@ dependencies = [ "futures", "near-amend-genesis", "near-chain-configs", + "near-client", "near-dyn-configs", "near-jsonrpc-primitives", "near-mirror", @@ -3779,7 +3931,6 @@ version = "0.0.0" dependencies = [ "assert_matches", "borsh", - "byteorder", "enum-map", "hex", "indicatif", @@ -3896,12 +4047,12 @@ checksum = "17b02fc0ff9a9e4b35b3342880f48e896ebf69f2967921fe8646bf5b7125956a" [[package]] name = "object" -version = "0.28.4" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424" +checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" dependencies = [ "crc32fast", - "hashbrown 0.11.2", + "hashbrown 0.12.3", "indexmap", "memchr", ] @@ -4027,6 +4178,16 @@ dependencies = [ "opentelemetry", ] +[[package]] +name = "ordered-multimap" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccd746e37177e1711c20dd619a1620f34f5c8b569c53590a72dedd5344d8924a" +dependencies = [ + "dlv-list", + "hashbrown 0.12.3", +] + [[package]] name = "os_str_bytes" version = "6.0.1" @@ -4054,9 +4215,9 @@ dependencies = [ [[package]] name = "paperclip" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f399678683ec199ddca1dd54db957dd158dedb5fc90826eb2a7e6c0800c3a868" +checksum = "461c6d9997c512648e9cfd41575a3e0d3f46a1ec3c8214a32dd91b729487b1dc" dependencies = [ "anyhow", "itertools", @@ -4064,7 +4225,6 @@ dependencies = [ "paperclip-actix", "paperclip-core", "paperclip-macros", - "parking_lot 0.10.2", "semver 1.0.9", "serde", "serde_derive", @@ -4076,32 +4236,31 @@ dependencies = [ [[package]] name = "paperclip-actix" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29880bc57ef516c272d6fdd215ecaf96375d9a5dbac5412d849b9f9afd0d7298" +checksum = "f8e2adab1a71766521af58973719fb66bd7b92e6ce8a3acf1e83c7acc0e78e17" dependencies = [ "actix-service", "actix-web", "futures", + "mime_guess", "once_cell", "paperclip-core", "paperclip-macros", - "parking_lot 0.10.2", "serde_json", ] [[package]] name = "paperclip-core" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bee516533b655ba63e41e788b49a2beb1139e1eebafb143e7cb56b8cabb5da1" +checksum = "d53eecc65b19fa7884e3e6939e54ba5104ec179894728bca6f6cee67adfa145e" dependencies = [ "actix-web", "mime", "once_cell", "paperclip-macros", - "parking_lot 0.10.2", - "pin-project", + "pin-project-lite", "regex", "serde", "serde_json", @@ -4111,9 +4270,9 @@ dependencies = [ [[package]] name = "paperclip-macros" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89990be67318e3da29c92adb3377e0251a8eee10b4f91ff349cbf2da945e9d1" +checksum = "a0e23a129dc95a45661cbbfac1b8f865094131da7937738a343d00f47d87fada" dependencies = [ "heck 0.4.0", "http", @@ -4183,7 +4342,7 @@ dependencies = [ "libc", "redox_syscall 0.2.13", "smallvec", - "windows-sys", + "windows-sys 0.36.1", ] [[package]] @@ -4200,9 +4359,9 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "petgraph" @@ -4710,9 +4869,9 @@ dependencies = [ [[package]] name = "regalloc2" -version = "0.1.3" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904196c12c9f55d3aea578613219f493ced8e05b3d0c6a42d11cb4142d8b4879" +checksum = "300d4fbfb40c1c66a78ba3ddd41c1110247cf52f97b87d0f2fc9209bd49b030c" dependencies = [ "fxhash", "log", @@ -4746,18 +4905,6 @@ version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" -[[package]] -name = "region" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0" -dependencies = [ - "bitflags", - "libc", - "mach", - "winapi", -] - [[package]] name = "region" version = "3.0.0" @@ -4817,6 +4964,7 @@ dependencies = [ "serde_urlencoded", "tokio", "tokio-native-tls", + "tokio-util 0.6.10", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -4854,7 +5002,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "517a3034eb2b1499714e9d1e49b2367ad567e07639b69776d35e259d9c27cca6" dependencies = [ "bytecheck", - "hashbrown 0.12.1", + "hashbrown 0.12.3", "ptr_meta", "rend", "rkyv_derive", @@ -4979,6 +5127,48 @@ dependencies = [ "smallvec", ] +[[package]] +name = "rust-ini" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6d5f2436026b4f6e79dc829837d467cc7e9a55ee40e750d716713540715a2df" +dependencies = [ + "cfg-if 1.0.0", + "ordered-multimap", +] + +[[package]] +name = "rust-s3" +version = "0.32.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6009d9d4cf910505534d62d380a0aa305805a2af0b5c3ad59a3024a0715b847" +dependencies = [ + "async-trait", + "aws-creds", + "aws-region", + "base64", + "block_on_proc", + "cfg-if 1.0.0", + "hex", + "hmac", + "http", + "log", + "maybe-async", + "md5", + "minidom", + "percent-encoding", + "reqwest", + "serde", + "serde-xml-rs", + "serde_derive", + "sha2 0.10.2", + "thiserror", + "time 0.3.9", + "tokio", + "tokio-stream", + "url", +] + [[package]] name = "rustc-demangle" version = "0.1.21" @@ -5017,16 +5207,16 @@ dependencies = [ [[package]] name = "rustix" -version = "0.33.7" +version = "0.36.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938a344304321a9da4973b9ff4f9f8db9caf4597dfd9dda6a60b523340a0fff0" +checksum = "4feacf7db682c6c329c4ede12649cd36ecab0f3be5b7d74e6a20304725db4549" dependencies = [ "bitflags", "errno", "io-lifetimes", "libc", "linux-raw-sys", - "winapi", + "windows-sys 0.42.0", ] [[package]] @@ -5035,6 +5225,25 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" +[[package]] +name = "rxml" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a071866b8c681dc2cfffa77184adc32b57b0caad4e620b6292609703bceb804" +dependencies = [ + "bytes", + "pin-project-lite", + "rxml_validation", + "smartstring 0.2.10", + "tokio", +] + +[[package]] +name = "rxml_validation" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53bc79743f9a66c2fb1f951cd83735f275d46bfe466259fbc5897bb60a0d00ee" + [[package]] name = "ryu" version = "1.0.10" @@ -5157,6 +5366,18 @@ dependencies = [ "serde", ] +[[package]] +name = "serde-xml-rs" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65162e9059be2f6a3421ebbb4fef3e74b7d9e7c60c50a0e292c6239f19f1edfa" +dependencies = [ + "log", + "serde", + "thiserror", + "xml-rs", +] + [[package]] name = "serde_bytes" version = "0.11.6" @@ -5233,14 +5454,15 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.26" +version = "0.9.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" +checksum = "92b5b431e8907b50339b51223b97d102db8d987ced36f6e4d03621db9316c834" dependencies = [ "indexmap", + "itoa 1.0.2", "ryu", "serde", - "yaml-rust", + "unsafe-libyaml", ] [[package]] @@ -5384,6 +5606,15 @@ dependencies = [ "syn", ] +[[package]] +name = "smartstring" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e714dff2b33f2321fdcd475b71cec79781a692d846f37f415fb395a1d2bcd48e" +dependencies = [ + "static_assertions", +] + [[package]] name = "smartstring" version = "1.0.1" @@ -5458,10 +5689,12 @@ dependencies = [ "rand 0.8.5", "rayon", "redis", + "rust-s3", "serde", "serde_json", "tempfile", "testlib", + "thiserror", "tracing", ] @@ -5619,16 +5852,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "terminal_size" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "633c1a546cee861a1a6d0dc69ebeca693bf4296661ba7852b9d21d159e0506df" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "testlib" version = "0.0.0" @@ -5737,6 +5960,7 @@ dependencies = [ "itoa 1.0.2", "libc", "num_threads", + "serde", "time-macros", ] @@ -6088,6 +6312,15 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "unicase" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] + [[package]] name = "unicode-bidi" version = "0.3.8" @@ -6127,15 +6360,20 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" +[[package]] +name = "unsafe-libyaml" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc7ed8ba44ca06be78ea1ad2c3682a43349126c8818054231ee6f4748012aed2" + [[package]] name = "url" -version = "2.2.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", - "idna", - "matches", + "idna 0.3.0", "percent-encoding", ] @@ -6145,7 +6383,7 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "841d6937c33ec6039d8071bcf72933146b5bbe378d645d8fa59bdadabfc2a249" dependencies = [ - "idna", + "idna 0.2.3", "lazy_static", "regex", "serde", @@ -6376,7 +6614,7 @@ dependencies = [ "cfg-if 1.0.0", "enumset", "leb128", - "region 3.0.0", + "region", "rkyv", "thiserror", "wasmer-compiler-near", @@ -6474,7 +6712,7 @@ dependencies = [ "libc", "memoffset", "more-asserts", - "region 3.0.0", + "region", "rkyv", "thiserror", "wasmer-types-near", @@ -6502,6 +6740,16 @@ dependencies = [ "indexmap", ] +[[package]] +name = "wasmparser" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2ea896273ea99b15132414be1da01ab0d8836415083298ecaffbe308eaac87a" +dependencies = [ + "indexmap", + "url", +] + [[package]] name = "wasmprinter" version = "0.2.34" @@ -6514,38 +6762,44 @@ dependencies = [ [[package]] name = "wasmtime" -version = "0.37.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfdd1101bdfa0414a19018ec0a091951a20b695d4d04f858d49f6c4cc53cd8dd" +checksum = "4abddf11816dd8f5e7310f6ebe5a2503b43f20ab2bf050b7d63f5b1bb96a81d9" dependencies = [ "anyhow", - "backtrace", "bincode", "cfg-if 1.0.0", "indexmap", - "lazy_static", "libc", "log", "object", "once_cell", "paste", "psm", - "region 2.2.0", "serde", "target-lexicon 0.12.3", - "wasmparser 0.84.0", + "wasmparser 0.95.0", "wasmtime-cranelift", "wasmtime-environ", "wasmtime-jit", "wasmtime-runtime", - "winapi", + "windows-sys 0.42.0", +] + +[[package]] +name = "wasmtime-asm-macros" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1f5206486f0467ba86e84d35996c4048b077cec2c9e5b322e7b853bdbe79334" +dependencies = [ + "cfg-if 1.0.0", ] [[package]] name = "wasmtime-cranelift" -version = "0.37.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e78edcfb0daa9a9579ac379d00e2d5a5b2a60c0d653c8c95e8412f2166acb9" +checksum = "9e5bcb1d5ef211726b11e1286fe96cb40c69044c3632e1d6c67805d88a2e1a34" dependencies = [ "anyhow", "cranelift-codegen", @@ -6555,39 +6809,37 @@ dependencies = [ "cranelift-wasm", "gimli", "log", - "more-asserts", "object", "target-lexicon 0.12.3", "thiserror", - "wasmparser 0.84.0", + "wasmparser 0.95.0", "wasmtime-environ", ] [[package]] name = "wasmtime-environ" -version = "0.37.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4201389132ec467981980549574b33fc70d493b40f2c045c8ce5c7b54fbad97e" +checksum = "dcab3fac5a2ff68ce9857166a7d7c0e5251b554839b9dda7ed3b5528e191936e" dependencies = [ "anyhow", "cranelift-entity", "gimli", "indexmap", "log", - "more-asserts", "object", "serde", "target-lexicon 0.12.3", "thiserror", - "wasmparser 0.84.0", + "wasmparser 0.95.0", "wasmtime-types", ] [[package]] name = "wasmtime-jit" -version = "0.37.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1587ca7752d00862faa540d00fd28e5ccf1ac61ba19756449193f1153cb2b127" +checksum = "a7d866e2a84ee164739b7ed7bd7cc9e1f918639d2ec5e2817a31e24c148cab20" dependencies = [ "addr2line", "anyhow", @@ -6597,61 +6849,69 @@ dependencies = [ "gimli", "log", "object", - "region 2.2.0", "rustc-demangle", - "rustix", "serde", "target-lexicon 0.12.3", - "thiserror", "wasmtime-environ", + "wasmtime-jit-icache-coherence", "wasmtime-runtime", - "winapi", + "windows-sys 0.42.0", ] [[package]] name = "wasmtime-jit-debug" -version = "0.37.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b27233ab6c8934b23171c64f215f902ef19d18c1712b46a0674286d1ef28d5dd" +checksum = "0104c2b1ce443f2a2806216fcdf6dce09303203ec5797a698d313063b31e5bc8" dependencies = [ - "lazy_static", + "once_cell", +] + +[[package]] +name = "wasmtime-jit-icache-coherence" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22d9c2e92b0fc124d2cad6cb497a4c840580a7dd2414a37109e8c7cfe699c0ea" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "windows-sys 0.42.0", ] [[package]] name = "wasmtime-runtime" -version = "0.37.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47d3b0b8f13db47db59d616e498fe45295819d04a55f9921af29561827bdb816" +checksum = "0a1f0f99297a94cb20c511d1d4e864d9b54794644016d2530dc797cacfa7224a" dependencies = [ "anyhow", - "backtrace", "cc", "cfg-if 1.0.0", "indexmap", "libc", "log", "mach", + "memfd", "memoffset", - "more-asserts", + "paste", "rand 0.8.5", - "region 2.2.0", "rustix", - "thiserror", + "wasmtime-asm-macros", "wasmtime-environ", "wasmtime-jit-debug", - "winapi", + "windows-sys 0.42.0", ] [[package]] name = "wasmtime-types" -version = "0.37.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1630d9dca185299bec7f557a7e73b28742fe5590caf19df001422282a0a98ad1" +checksum = "62f3d8ee409447cae51651fd812437a0047ed8d7f44e94171ee05ce7cb955c96" dependencies = [ "cranelift-entity", "serde", "thiserror", - "wasmparser 0.84.0", + "wasmparser 0.95.0", ] [[package]] @@ -6695,6 +6955,12 @@ dependencies = [ "libc", ] +[[package]] +name = "wildmatch" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee583bdc5ff1cf9db20e9db5bb3ff4c3089a8f6b8b31aff265c9aba85812db86" + [[package]] name = "winapi" version = "0.3.9" @@ -6732,43 +6998,100 @@ version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" dependencies = [ - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_msvc", + "windows_aarch64_msvc 0.36.1", + "windows_i686_gnu 0.36.1", + "windows_i686_msvc 0.36.1", + "windows_x86_64_gnu 0.36.1", + "windows_x86_64_msvc 0.36.1", +] + +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc 0.42.1", + "windows_i686_gnu 0.42.1", + "windows_i686_msvc 0.42.1", + "windows_x86_64_gnu 0.42.1", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc 0.42.1", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" + [[package]] name = "windows_aarch64_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" + [[package]] name = "windows_i686_gnu" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" +[[package]] +name = "windows_i686_gnu" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" + [[package]] name = "windows_i686_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" +[[package]] +name = "windows_i686_msvc" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" + [[package]] name = "windows_x86_64_gnu" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" + [[package]] name = "windows_x86_64_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" + [[package]] name = "winreg" version = "0.10.1" @@ -6787,6 +7110,12 @@ dependencies = [ "libc", ] +[[package]] +name = "xml-rs" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3" + [[package]] name = "xshell" version = "0.2.1" diff --git a/Cargo.toml b/Cargo.toml index 31836f0424e..9c24dcc1f98 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -74,7 +74,7 @@ actix-rt = "2" actix-web = "4.0.1" ansi_term = "0.12" anyhow = "1.0.62" -arbitrary = { version = "1", features = ["derive"] } +arbitrary = { version = "1.2.3", features = ["derive"] } arc-swap = "1.5" assert_matches = "1.5.0" async-recursion = "0.3.2" @@ -105,7 +105,7 @@ crossbeam-channel = "0.5" csv = "1.1.1" curve25519-dalek = "3" derive_more = "0.99.9" -dirs = "3" +dirs = "4" easy-ext = "0.2" ed25519-dalek = "1" elastic-array = "0.11" @@ -121,7 +121,7 @@ hyper = { version = "0.14", features = ["full"] } hyper-tls = "0.5.0" im = "15" indicatif = { version = "0.15.0", features = ["with_rayon"] } -insta = "1.14.0" +insta = { version = "1.26.0", features = ["json", "yaml"] } itertools = "0.10.0" itoa = "1.0" libc = "0.2.81" @@ -141,7 +141,7 @@ openssl-probe = "0.1.4" opentelemetry = { version = "0.17.0", features = ["rt-tokio", "trace"] } opentelemetry-otlp = "0.10.0" opentelemetry-semantic-conventions = "0.9.0" -paperclip = { version = "0.7.0", features = ["actix4"] } +paperclip = { version = "0.8.0", features = ["actix4"] } parity-wasm = { version = "0.42", default-features = false } parity-wasm_41 = { package = "parity-wasm", version = "0.41" } parking_lot = "0.12.1" @@ -163,6 +163,7 @@ reqwest = { version = "0.11.0", features = ["blocking"] } ripemd = "0.1.1" rlimit = "0.7" rocksdb = { version = "0.19.0", default-features = false, features = ["snappy", "lz4", "zstd", "zlib", "jemalloc"] } +rust-s3 = { version = "0.32.3", features = ["blocking"] } rusqlite = {version = "0.27.0", features = ["bundled", "chrono", "functions"] } secp256k1 = { version = "0.24", features = ["recovery", "rand-std"] } semver = "1.0.4" @@ -170,7 +171,7 @@ serde = { version = "1.0.136", features = ["alloc", "derive", "rc"] } serde_ignored = "0.1" serde_json = "1.0.68" serde_repr = "0.1.8" -serde_yaml = "0.8.26" +serde_yaml = "0.9" sha2 = "0.10" sha3 = "0.10" shell-escape = "0.1.5" @@ -199,7 +200,7 @@ wasm-encoder = "0.11.0" wasm-smith = "0.10" wasmparser = "0.78" wasmprinter = "0.2" -wasmtime = { version = "0.37.0", default-features = false, features = ["cranelift", "wasm-backtrace"] } +wasmtime = { version = "4.0.0", default-features = false, features = ["cranelift"] } wat = "1.0.40" xshell = "0.2.1" xz2 = "0.1.6" diff --git a/chain/chain/src/blocks_delay_tracker.rs b/chain/chain/src/blocks_delay_tracker.rs index 99989788166..e0e75c26a2c 100644 --- a/chain/chain/src/blocks_delay_tracker.rs +++ b/chain/chain/src/blocks_delay_tracker.rs @@ -14,7 +14,7 @@ use std::mem; use std::time::Instant; use tracing::error; -use crate::{metrics, Chain, ChainStoreAccess, RuntimeAdapter}; +use crate::{metrics, Chain, ChainStoreAccess, RuntimeWithEpochManagerAdapter}; const BLOCK_DELAY_TRACKING_COUNT: u64 = 50; @@ -92,7 +92,7 @@ impl ChunkTrackingStats { fn to_chunk_processing_info( &self, chunk_hash: ChunkHash, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, ) -> ChunkProcessingInfo { let status = if self.completed_timestamp.is_some() { ChunkProcessingStatus::Completed @@ -353,7 +353,7 @@ impl BlocksDelayTracker { block_height: BlockHeight, block_hash: &CryptoHash, chain: &Chain, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, ) -> Option { self.blocks.get(block_hash).map(|block_stats| { let chunks_info: Vec<_> = block_stats diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index 49c308ff249..acc6d8cecb3 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -51,8 +51,8 @@ use near_primitives::views::{ LightClientBlockView, SignedTransactionView, }; #[cfg(feature = "protocol_feature_flat_state")] -use near_store::flat_state; -use near_store::{DBCol, ShardTries, StorageError, StoreUpdate, WrappedTrieChanges}; +use near_store::{flat_state, StorageError}; +use near_store::{DBCol, ShardTries, StoreUpdate, WrappedTrieChanges}; use crate::block_processing_utils::{ BlockPreprocessInfo, BlockProcessingArtifact, BlocksInProcessing, DoneApplyChunkCallback, @@ -67,7 +67,7 @@ use crate::store::{ChainStore, ChainStoreAccess, ChainStoreUpdate, GCMode}; use crate::types::{ AcceptedBlock, ApplySplitStateResult, ApplySplitStateResultOrStateChanges, ApplyTransactionResult, Block, BlockEconomicsConfig, BlockHeader, BlockHeaderInfo, BlockStatus, - ChainConfig, ChainGenesis, Provenance, RuntimeAdapter, + ChainConfig, ChainGenesis, Provenance, RuntimeWithEpochManagerAdapter, }; use crate::validate::{ validate_challenge, validate_chunk_proofs, validate_chunk_with_chunk_extra, @@ -86,7 +86,7 @@ use near_primitives::shard_layout::{ use near_primitives::version::PROTOCOL_VERSION; #[cfg(feature = "protocol_feature_flat_state")] use near_store::flat_state::{store_helper, FlatStateDelta}; -use near_store::flat_state::{FlatStorageError, FlatStorageStateStatus}; +use near_store::flat_state::{FlatStorageCreationStatus, FlatStorageError}; use once_cell::sync::OnceCell; use rayon::iter::{IntoParallelIterator, ParallelIterator}; @@ -429,7 +429,7 @@ type BlockApplyChunksResult = (CryptoHash, Vec>) /// Provides current view on the state according to the chain state. pub struct Chain { store: ChainStore, - pub runtime_adapter: Arc, + pub runtime_adapter: Arc, orphans: OrphanBlockPool, pub blocks_with_missing_chunks: MissingChunksPool, genesis: Block, @@ -479,7 +479,7 @@ impl Drop for Chain { } impl Chain { pub fn make_genesis_block( - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, chain_genesis: &ChainGenesis, ) -> Result { let (_, state_roots) = runtime_adapter.genesis_state(); @@ -507,7 +507,7 @@ impl Chain { } pub fn new_for_view_client( - runtime_adapter: Arc, + runtime_adapter: Arc, chain_genesis: &ChainGenesis, doomslug_threshold_mode: DoomslugThresholdMode, save_trie_changes: bool, @@ -538,7 +538,7 @@ impl Chain { } pub fn new( - runtime_adapter: Arc, + runtime_adapter: Arc, chain_genesis: &ChainGenesis, doomslug_threshold_mode: DoomslugThresholdMode, chain_config: ChainConfig, @@ -692,7 +692,7 @@ impl Chain { } pub fn compute_bp_hash( - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, epoch_id: EpochId, prev_epoch_id: EpochId, last_known_hash: &CryptoHash, @@ -719,7 +719,7 @@ impl Chain { /// compute the light client block pub fn create_light_client_block( header: &BlockHeader, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, chain_store: &dyn ChainStoreAccess, ) -> Result { let final_block_header = { @@ -1086,7 +1086,7 @@ impl Chain { } fn validate_block_impl( - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, genesis_block: &Block, block: &Block, ) -> Result<(), Error> { @@ -2560,7 +2560,7 @@ impl Chain { /// 2) Shard layout will be the same. In this case, the method returns all shards that `me` will /// track in the next epoch but not this epoch fn get_shards_to_dl_state( - runtime_adapter: Arc, + runtime_adapter: Arc, me: &Option, parent_hash: &CryptoHash, ) -> Result, Error> { @@ -2573,7 +2573,7 @@ impl Chain { } fn should_catch_up_shard( - runtime_adapter: Arc, + runtime_adapter: Arc, me: &Option, parent_hash: &CryptoHash, shard_id: ShardId, @@ -3179,6 +3179,7 @@ impl Chain { // We synced shard state on top of _previous_ block for chunk in shard state header and applied state parts to // flat storage. Now we can set flat head to hash of this block and create flat storage. + // TODO (#7327): ensure that no flat storage work is done for `KeyValueRuntime`. #[cfg(feature = "protocol_feature_flat_state")] { let mut store_update = self.runtime_adapter.store().store_update(); @@ -3186,15 +3187,14 @@ impl Chain { store_update.commit()?; } - match self.runtime_adapter.try_create_flat_storage_state_for_shard( - shard_id, - block_height, - self.store(), - ) { - FlatStorageStateStatus::Ready | FlatStorageStateStatus::DontCreate => {} - status @ _ => { - return Err(Error::StorageError(StorageError::FlatStorageError(format!("Unable to create flat storage during syncing shard {shard_id}, got status {status:?}")))); - } + if self.runtime_adapter.get_flat_storage_creation_status(shard_id) + == FlatStorageCreationStatus::Ready + { + self.runtime_adapter.create_flat_storage_state_for_shard( + shard_id, + block_height, + self.store(), + ); } let mut height = shard_state_header.chunk_height_included(); @@ -3438,7 +3438,6 @@ impl Chain { Ok(self.store.get_outcomes_by_id(id)?.into_iter().map(Into::into).collect()) } - /// Returns all tx results given a tx hash, excluding refund receipts fn get_recursive_transaction_results( &self, id: &CryptoHash, @@ -3447,13 +3446,6 @@ impl Chain { let receipt_ids = outcome.outcome.receipt_ids.clone(); let mut results = vec![outcome]; for receipt_id in &receipt_ids { - // don't include refund receipts to speed up tx status query - if let Some(receipt) = self.store.get_receipt(&receipt_id)? { - let is_refund = receipt.predecessor_id.is_system(); - if is_refund { - continue; - } - } results.extend(self.get_recursive_transaction_results(receipt_id)?); } Ok(results) @@ -4412,9 +4404,9 @@ impl Chain { &mut self.store } - /// Returns underlying RuntimeAdapter. + /// Returns underlying RuntimeWithEpochManagerAdapter. #[inline] - pub fn runtime_adapter(&self) -> Arc { + pub fn runtime_adapter(&self) -> Arc { self.runtime_adapter.clone() } @@ -4557,7 +4549,7 @@ impl Chain { /// `get_prev_chunks(runtime_adapter, prev_block)` will return /// `[prev_block.chunks()[0], prev_block.chunks()[0], prev_block.chunks()[1], prev_block.chunks()[1]]` pub fn get_prev_chunk_headers( - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, prev_block: &Block, ) -> Result, Error> { let epoch_id = runtime_adapter.get_epoch_id_from_prev_block(prev_block.hash())?; @@ -4572,7 +4564,7 @@ impl Chain { } pub fn get_prev_chunk_header( - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, prev_block: &Block, shard_id: ShardId, ) -> Result { @@ -4643,7 +4635,7 @@ impl Chain { /// If rejected nothing will be updated in underlying storage. /// Safe to stop process mid way (Ctrl+C or crash). pub struct ChainUpdate<'a> { - runtime_adapter: Arc, + runtime_adapter: Arc, chain_store_update: ChainStoreUpdate<'a>, doomslug_threshold_mode: DoomslugThresholdMode, #[allow(unused)] @@ -4678,7 +4670,7 @@ pub enum ApplyChunkResult { impl<'a> ChainUpdate<'a> { pub fn new( store: &'a mut ChainStore, - runtime_adapter: Arc, + runtime_adapter: Arc, doomslug_threshold_mode: DoomslugThresholdMode, transaction_validity_period: BlockHeightDelta, ) -> Self { @@ -4692,7 +4684,7 @@ impl<'a> ChainUpdate<'a> { } fn new_impl( - runtime_adapter: Arc, + runtime_adapter: Arc, doomslug_threshold_mode: DoomslugThresholdMode, transaction_validity_period: BlockHeightDelta, chain_store_update: ChainStoreUpdate<'a>, @@ -4785,7 +4777,7 @@ impl<'a> ChainUpdate<'a> { /// otherwise, this function returns state changes needed to be applied to split /// states. These state changes will be stored in the database by `process_split_state` fn apply_split_state_changes( - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, block_hash: &CryptoHash, prev_block_hash: &CryptoHash, apply_result: &ApplyTransactionResult, @@ -5520,7 +5512,7 @@ pub fn collect_receipts_from_response( #[derive(Message)] #[rtype(result = "()")] pub struct ApplyStatePartsRequest { - pub runtime: Arc, + pub runtime: Arc, pub shard_id: ShardId, pub state_root: StateRoot, pub num_parts: u64, @@ -5556,7 +5548,7 @@ pub struct BlockCatchUpResponse { #[derive(Message)] #[rtype(result = "()")] pub struct StateSplitRequest { - pub runtime: Arc, + pub runtime: Arc, pub sync_hash: CryptoHash, pub shard_id: ShardId, pub shard_uid: ShardUId, diff --git a/chain/chain/src/flat_storage_creator.rs b/chain/chain/src/flat_storage_creator.rs index 0969c8937e0..0b340555627 100644 --- a/chain/chain/src/flat_storage_creator.rs +++ b/chain/chain/src/flat_storage_creator.rs @@ -9,7 +9,7 @@ //! `CatchingUp`: moves flat storage head forward, so it may reach chain final head. //! `Ready`: flat storage is created and it is up-to-date. -use crate::{ChainStore, ChainStoreAccess, RuntimeAdapter}; +use crate::{ChainStore, ChainStoreAccess, RuntimeWithEpochManagerAdapter}; #[cfg(feature = "protocol_feature_flat_state")] use assert_matches::assert_matches; use crossbeam_channel::{unbounded, Receiver, Sender}; @@ -19,7 +19,7 @@ use near_primitives::shard_layout::ShardUId; use near_primitives::state::ValueRef; use near_primitives::state_part::PartId; use near_primitives::types::{AccountId, BlockHeight, ShardId, StateRoot}; -use near_store::flat_state::FlatStorageStateStatus; +use near_store::flat_state::FlatStorageCreationStatus; #[cfg(feature = "protocol_feature_flat_state")] use near_store::flat_state::{store_helper, FetchingStateStatus}; #[cfg(feature = "protocol_feature_flat_state")] @@ -56,7 +56,7 @@ struct FlatStorageCreationMetrics { /// If we launched a node with enabled flat storage but it doesn't have flat storage data on disk, we have to create it. /// This struct is responsible for this process for the given shard. -/// See doc comment on [`FlatStorageStateStatus`] for the details of the process. +/// See doc comment on [`FlatStorageCreationStatus`] for the details of the process. pub struct FlatStorageShardCreator { #[allow(unused)] shard_id: ShardId, @@ -64,7 +64,7 @@ pub struct FlatStorageShardCreator { #[allow(unused)] start_height: BlockHeight, #[allow(unused)] - runtime_adapter: Arc, + runtime_adapter: Arc, /// Tracks number of state parts which are not fetched yet during a single step. /// Stores Some(parts) if threads for fetching state were spawned and None otherwise. #[allow(unused)] @@ -87,7 +87,7 @@ impl FlatStorageShardCreator { pub fn new( shard_id: ShardId, start_height: BlockHeight, - runtime_adapter: Arc, + runtime_adapter: Arc, ) -> Self { let (fetched_parts_sender, fetched_parts_receiver) = unbounded(); // `itoa` is much faster for printing shard_id to a string than trivial alternatives. @@ -194,11 +194,11 @@ impl FlatStorageShardCreator { thread_pool: &rayon::ThreadPool, ) -> Result { let current_status = - store_helper::get_flat_storage_state_status(chain_store.store(), self.shard_id); + store_helper::get_flat_storage_creation_status(chain_store.store(), self.shard_id); self.metrics.status.set((¤t_status).into()); let shard_id = self.shard_id; match ¤t_status { - FlatStorageStateStatus::SavingDeltas => { + FlatStorageCreationStatus::SavingDeltas => { let final_head = chain_store.final_head()?; let final_height = final_head.height; @@ -254,7 +254,7 @@ impl FlatStorageShardCreator { store_update.commit()?; } } - FlatStorageStateStatus::FetchingState(fetching_state_status) => { + FlatStorageCreationStatus::FetchingState(fetching_state_status) => { let store = self.runtime_adapter.store().clone(); let block_hash = store_helper::get_flat_head(&store, shard_id).unwrap(); let start_part_id = fetching_state_status.part_id; @@ -341,7 +341,7 @@ impl FlatStorageShardCreator { } } } - FlatStorageStateStatus::CatchingUp => { + FlatStorageCreationStatus::CatchingUp => { let store = self.runtime_adapter.store(); let old_flat_head = store_helper::get_flat_head(store, shard_id).unwrap(); let mut flat_head = old_flat_head.clone(); @@ -379,24 +379,23 @@ impl FlatStorageShardCreator { // If we reached chain final head, we can finish catchup and finally create flat storage. store_helper::finish_catchup(&mut store_update, shard_id); store_update.commit()?; - let status = self.runtime_adapter.try_create_flat_storage_state_for_shard( + self.runtime_adapter.create_flat_storage_state_for_shard( shard_id, chain_store.head().unwrap().height, chain_store, ); - assert_eq!(status, FlatStorageStateStatus::Ready); info!(target: "chain", %shard_id, %flat_head, %height, "Flat storage creation done"); } else { store_update.commit()?; } } } - FlatStorageStateStatus::Ready => {} - FlatStorageStateStatus::DontCreate => { + FlatStorageCreationStatus::Ready => {} + FlatStorageCreationStatus::DontCreate => { panic!("We initiated flat storage creation for shard {shard_id} but according to flat storage state status in db it cannot be created"); } }; - Ok(current_status == FlatStorageStateStatus::Ready) + Ok(current_status == FlatStorageCreationStatus::Ready) } } @@ -412,31 +411,33 @@ impl FlatStorageCreator { /// or starts migration to flat storage which updates DB in background and creates flat storage afterwards. pub fn new( me: Option<&AccountId>, - runtime_adapter: Arc, + runtime_adapter: Arc, chain_store: &ChainStore, num_threads: usize, - ) -> Option { - let chain_head = chain_store.head().unwrap(); - let num_shards = runtime_adapter.num_shards(&chain_head.epoch_id).unwrap(); - let start_height = chain_head.height; + ) -> Result, Error> { + let chain_head = chain_store.head()?; + let num_shards = runtime_adapter.num_shards(&chain_head.epoch_id)?; let mut shard_creators: HashMap = HashMap::new(); let mut creation_needed = false; for shard_id in 0..num_shards { if runtime_adapter.cares_about_shard(me, &chain_head.prev_block_hash, shard_id, true) { - let status = runtime_adapter.try_create_flat_storage_state_for_shard( - shard_id, - chain_store.head().unwrap().height, - chain_store, - ); + let status = runtime_adapter.get_flat_storage_creation_status(shard_id); match status { - FlatStorageStateStatus::Ready | FlatStorageStateStatus::DontCreate => {} + FlatStorageCreationStatus::Ready => { + runtime_adapter.create_flat_storage_state_for_shard( + shard_id, + chain_head.height, + chain_store, + ); + } + FlatStorageCreationStatus::DontCreate => {} _ => { creation_needed = true; shard_creators.insert( shard_id, FlatStorageShardCreator::new( shard_id, - start_height, + chain_head.height, runtime_adapter.clone(), ), ); @@ -445,14 +446,15 @@ impl FlatStorageCreator { } } - if creation_needed { + let flat_storage_creator = if creation_needed { Some(Self { shard_creators, pool: rayon::ThreadPoolBuilder::new().num_threads(num_threads).build().unwrap(), }) } else { None - } + }; + Ok(flat_storage_creator) } /// Updates statuses of underlying flat storage creation processes. Returns boolean diff --git a/chain/chain/src/lib.rs b/chain/chain/src/lib.rs index bb68d0a3aba..20754e7b5de 100644 --- a/chain/chain/src/lib.rs +++ b/chain/chain/src/lib.rs @@ -6,7 +6,9 @@ pub use near_chain_primitives::{self, Error}; pub use near_primitives::receipt::ReceiptResult; pub use store::{ChainStore, ChainStoreAccess, ChainStoreUpdate}; pub use store_validator::{ErrorMessage, StoreValidator}; -pub use types::{Block, BlockHeader, BlockStatus, ChainGenesis, Provenance, RuntimeAdapter}; +pub use types::{ + Block, BlockHeader, BlockStatus, ChainGenesis, Provenance, RuntimeWithEpochManagerAdapter, +}; mod block_processing_utils; pub mod blocks_delay_tracker; diff --git a/chain/chain/src/lightclient.rs b/chain/chain/src/lightclient.rs index fea58150949..228fb9f8d00 100644 --- a/chain/chain/src/lightclient.rs +++ b/chain/chain/src/lightclient.rs @@ -5,12 +5,12 @@ use near_primitives::types::EpochId; use near_primitives::views::validator_stake_view::ValidatorStakeView; use near_primitives::views::{BlockHeaderInnerLiteView, LightClientBlockView}; -use crate::{ChainStoreAccess, RuntimeAdapter}; +use crate::{ChainStoreAccess, RuntimeWithEpochManagerAdapter}; pub fn get_epoch_block_producers_view( epoch_id: &EpochId, prev_hash: &CryptoHash, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, ) -> Result, Error> { Ok(runtime_adapter .get_epoch_block_producers_ordered(epoch_id, prev_hash)? diff --git a/chain/chain/src/migrations.rs b/chain/chain/src/migrations.rs index fa98b377806..27df18931a7 100644 --- a/chain/chain/src/migrations.rs +++ b/chain/chain/src/migrations.rs @@ -1,12 +1,12 @@ use crate::store::ChainStoreAccess; -use crate::types::RuntimeAdapter; +use crate::types::RuntimeWithEpochManagerAdapter; use near_chain_primitives::error::Error; use near_primitives::hash::CryptoHash; use near_primitives::types::ShardId; /// Check that epoch of block with given prev_block_hash is the first one with current protocol version. fn is_first_epoch_with_protocol_version( - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, prev_block_hash: &CryptoHash, ) -> Result { let prev_epoch_id = runtime_adapter.get_prev_epoch_id_from_prev_block(prev_block_hash)?; @@ -20,7 +20,7 @@ fn is_first_epoch_with_protocol_version( /// We assume that current block contain the chunk for shard with the given id. pub fn check_if_block_is_first_with_chunk_of_version( chain_store: &dyn ChainStoreAccess, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, prev_block_hash: &CryptoHash, shard_id: ShardId, ) -> Result { diff --git a/chain/chain/src/store.rs b/chain/chain/src/store.rs index 2b3e64df95d..188b779769d 100644 --- a/chain/chain/src/store.rs +++ b/chain/chain/src/store.rs @@ -45,7 +45,7 @@ use near_store::{ use crate::chunks_store::ReadOnlyChunksStore; use crate::types::{Block, BlockHeader, LatestKnown}; -use crate::{byzantine_assert, RuntimeAdapter}; +use crate::{byzantine_assert, RuntimeWithEpochManagerAdapter}; use near_store::db::StoreStatistics; use near_store::flat_state::{BlockInfo, ChainAccessForFlatStorage}; use std::sync::Arc; @@ -293,7 +293,7 @@ pub trait ChainStoreAccess { /// Get epoch id of the last block with existing chunk for the given shard id. fn get_epoch_id_of_last_block_with_chunk( &self, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, hash: &CryptoHash, shard_id: ShardId, ) -> Result { @@ -468,7 +468,7 @@ impl ChainStore { /// pub fn get_outgoing_receipts_for_shard( &self, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, prev_block_hash: CryptoHash, shard_id: ShardId, last_included_height: BlockHeight, @@ -2021,7 +2021,7 @@ impl<'a> ChainStoreUpdate<'a> { fn get_shard_uids_to_gc( &mut self, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, block_hash: &CryptoHash, ) -> Vec { let block_header = self.get_block_header(block_hash).expect("block header must exist"); @@ -2048,7 +2048,7 @@ impl<'a> ChainStoreUpdate<'a> { // Clearing block data of `block_hash.prev`, if on the Canonical Chain. pub fn clear_block_data( &mut self, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, mut block_hash: CryptoHash, gc_mode: GCMode, ) -> Result<(), Error> { @@ -2444,7 +2444,7 @@ impl<'a> ChainStoreUpdate<'a> { pub fn copy_chain_state_as_of_block( chain_store: &'a mut ChainStore, block_hash: &CryptoHash, - source_runtime: Arc, + source_runtime: Arc, source_store: &ChainStore, ) -> Result, Error> { let mut chain_store_update = ChainStoreUpdate::new(chain_store); @@ -2992,7 +2992,7 @@ mod tests { use crate::store_validator::StoreValidator; use crate::test_utils::{KeyValueRuntime, ValidatorSchedule}; use crate::types::ChainConfig; - use crate::{Chain, ChainGenesis, DoomslugThresholdMode, RuntimeAdapter}; + use crate::{Chain, ChainGenesis, DoomslugThresholdMode, RuntimeWithEpochManagerAdapter}; fn get_chain() -> Chain { get_chain_with_epoch_length(10) @@ -3245,7 +3245,7 @@ mod tests { // Adds block to the chain at given height after prev_block. fn add_block( chain: &mut Chain, - runtime_adapter: Arc, + runtime_adapter: Arc, prev_block: &mut Block, blocks: &mut Vec, signer: Arc, diff --git a/chain/chain/src/store_validator.rs b/chain/chain/src/store_validator.rs index 80c650b16b0..2236a280a53 100644 --- a/chain/chain/src/store_validator.rs +++ b/chain/chain/src/store_validator.rs @@ -24,7 +24,7 @@ use near_store::db::refcount; use near_store::{DBCol, Store, TrieChanges}; use validate::StoreValidatorError; -use crate::RuntimeAdapter; +use crate::RuntimeWithEpochManagerAdapter; use near_primitives::shard_layout::get_block_shard_uid_rev; use near_primitives::time::Clock; @@ -69,7 +69,7 @@ pub struct ErrorMessage { pub struct StoreValidator { me: Option, config: GenesisConfig, - runtime_adapter: Arc, + runtime_adapter: Arc, store: Store, inner: StoreValidatorCache, timeout: Option, @@ -84,7 +84,7 @@ impl StoreValidator { pub fn new( me: Option, config: GenesisConfig, - runtime_adapter: Arc, + runtime_adapter: Arc, store: Store, is_archival: bool, ) -> Self { diff --git a/chain/chain/src/store_validator/validate.rs b/chain/chain/src/store_validator/validate.rs index c352e3c6bc1..a81455e0374 100644 --- a/chain/chain/src/store_validator/validate.rs +++ b/chain/chain/src/store_validator/validate.rs @@ -908,15 +908,21 @@ pub(crate) fn receipt_refcount_final(sv: &mut StoreValidator) -> Result<(), Stor } pub(crate) fn block_refcount_final(sv: &mut StoreValidator) -> Result<(), StoreValidatorError> { - if let Some(block_refcount) = sv.inner.block_refcount.iter().next() { + let block_refcount_len = sv.inner.block_refcount.len(); + if block_refcount_len >= 2 { err!( "Found {:?} Blocks that are not counted, e.g. {:?}", - sv.inner.block_refcount.len(), - block_refcount + block_refcount_len, + sv.inner.block_refcount.iter().next() ); } - if let Some(tail_block) = sv.inner.genesis_blocks.first() { - err!("Found {:?} Genesis Blocks, e.g. {:?}", sv.inner.genesis_blocks.len(), tail_block); + let genesis_blocks_len = sv.inner.genesis_blocks.len(); + if genesis_blocks_len >= 2 { + err!( + "Found {:?} Genesis Blocks, e.g. {:?}", + genesis_blocks_len, + sv.inner.genesis_blocks.first() + ); } Ok(()) } diff --git a/chain/chain/src/test_utils/kv_runtime.rs b/chain/chain/src/test_utils/kv_runtime.rs index d6761686fcd..aa1dfe8f080 100644 --- a/chain/chain/src/test_utils/kv_runtime.rs +++ b/chain/chain/src/test_utils/kv_runtime.rs @@ -45,13 +45,15 @@ use near_store::{ DBCol, PartialStorage, ShardTries, Store, StoreUpdate, Trie, TrieChanges, WrappedTrieChanges, }; -use crate::types::{ApplySplitStateResult, ApplyTransactionResult, BlockHeaderInfo}; -use crate::{BlockHeader, RuntimeAdapter}; +use crate::types::{ + ApplySplitStateResult, ApplyTransactionResult, BlockHeaderInfo, RuntimeAdapter, +}; +use crate::{BlockHeader, RuntimeWithEpochManagerAdapter}; use near_primitives::epoch_manager::ShardConfig; use near_store::flat_state::ChainAccessForFlatStorage; -use near_store::flat_state::{FlatStorageState, FlatStorageStateStatus}; +use near_store::flat_state::{FlatStorageCreationStatus, FlatStorageState}; use super::ValidatorSchedule; @@ -832,13 +834,17 @@ impl RuntimeAdapter for KeyValueRuntime { None } - fn try_create_flat_storage_state_for_shard( + fn get_flat_storage_creation_status(&self, _shard_id: ShardId) -> FlatStorageCreationStatus { + FlatStorageCreationStatus::DontCreate + } + + fn create_flat_storage_state_for_shard( &self, - _shard_id: ShardId, + shard_id: ShardId, _latest_block_height: BlockHeight, _chain_access: &dyn ChainAccessForFlatStorage, - ) -> FlatStorageStateStatus { - FlatStorageStateStatus::DontCreate + ) { + panic!("Flat storage state can't be created for shard {shard_id} because KeyValueRuntime doesn't support this"); } fn remove_flat_storage_state_for_shard( @@ -1356,3 +1362,5 @@ impl RuntimeAdapter for KeyValueRuntime { Ok(HashMap::new()) } } + +impl RuntimeWithEpochManagerAdapter for KeyValueRuntime {} diff --git a/chain/chain/src/tests/simple_chain.rs b/chain/chain/src/tests/simple_chain.rs index 5779f667958..b7a9795440f 100644 --- a/chain/chain/src/tests/simple_chain.rs +++ b/chain/chain/src/tests/simple_chain.rs @@ -45,7 +45,7 @@ fn build_chain() { if cfg!(feature = "nightly") { insta::assert_display_snapshot!(hash, @"96KiRJdbMN8A9cFPXarZdaRQ8U2HvYcrGTGC8a4EgFzM"); } else { - insta::assert_display_snapshot!(hash, @"2iGtRFjF6BcqPF6tDcfLLojRaNax2PKDLxRqRc3RxRn7"); + insta::assert_display_snapshot!(hash, @"7r5VSLXhkxHHEeiAAPQbKPGv3rr877obehGYwPbKZMA7"); } for i in 1..5 { @@ -75,7 +75,7 @@ fn build_chain() { if cfg!(feature = "nightly") { insta::assert_display_snapshot!(hash, @"4eW4jvyu1Ek6WmY3EuUoFFkrascC7svRww5UcZbNMkUf"); } else { - insta::assert_display_snapshot!(hash, @"7BkghFM7ZA8piYHAWYu4vTY6vE1pkTwy14bqQnS138qE"); + insta::assert_display_snapshot!(hash, @"9772sSKzm1eGPV3pRi17YaZkotrcN6dAkJUn226CopTm"); } } diff --git a/chain/chain/src/types.rs b/chain/chain/src/types.rs index 1c9e94168ea..40e3cd0f77f 100644 --- a/chain/chain/src/types.rs +++ b/chain/chain/src/types.rs @@ -32,7 +32,7 @@ use near_primitives::version::{ }; use near_primitives::views::{QueryRequest, QueryResponse}; use near_store::flat_state::ChainAccessForFlatStorage; -use near_store::flat_state::{FlatStorageState, FlatStorageStateStatus}; +use near_store::flat_state::{FlatStorageCreationStatus, FlatStorageState}; use near_store::{PartialStorage, ShardTries, Store, StoreUpdate, Trie, WrappedTrieChanges}; pub use near_epoch_manager::EpochManagerAdapter; @@ -271,7 +271,7 @@ impl ChainGenesis { /// Bridge between the chain and the runtime. /// Main function is to update state given transactions. /// Additionally handles validators. -pub trait RuntimeAdapter: EpochManagerAdapter + Send + Sync { +pub trait RuntimeAdapter: Send + Sync { /// Get store and genesis state roots fn genesis_state(&self) -> (Store, Vec); @@ -300,13 +300,18 @@ pub trait RuntimeAdapter: EpochManagerAdapter + Send + Sync { fn get_flat_storage_state_for_shard(&self, shard_id: ShardId) -> Option; - /// Tries to create flat storage state for given shard, returns the status of creation. - fn try_create_flat_storage_state_for_shard( + /// Gets status of flat storage state background creation. + fn get_flat_storage_creation_status(&self, shard_id: ShardId) -> FlatStorageCreationStatus; + + /// Creates flat storage state for given shard, assuming that all flat storage data + /// is already stored in DB. + /// TODO (#7327): consider returning flat storage creation errors here + fn create_flat_storage_state_for_shard( &self, shard_id: ShardId, latest_block_height: BlockHeight, chain_access: &dyn ChainAccessForFlatStorage, - ) -> FlatStorageStateStatus; + ); /// Removes flat storage state for shard, if it exists. /// Used to clear old flat storage data from disk and memory before syncing to newer state. @@ -576,6 +581,8 @@ pub trait RuntimeAdapter: EpochManagerAdapter + Send + Sync { fn get_protocol_config(&self, epoch_id: &EpochId) -> Result; } +pub trait RuntimeWithEpochManagerAdapter: RuntimeAdapter + EpochManagerAdapter {} + /// The last known / checked height and time when we have processed it. /// Required to keep track of skipped blocks and not fallback to produce blocks at lower height. #[derive(BorshSerialize, BorshDeserialize, Debug, Clone, Default)] diff --git a/chain/chain/src/validate.rs b/chain/chain/src/validate.rs index 47bad8cb5fb..27f27455aff 100644 --- a/chain/chain/src/validate.rs +++ b/chain/chain/src/validate.rs @@ -17,7 +17,7 @@ use near_primitives::types::chunk_extra::ChunkExtra; use near_primitives::types::{AccountId, BlockHeight, EpochId, Nonce}; use crate::{byzantine_assert, Chain}; -use crate::{ChainStore, Error, RuntimeAdapter}; +use crate::{ChainStore, Error, RuntimeWithEpochManagerAdapter}; /// Gas limit cannot be adjusted for more than 0.1% at a time. const GAS_LIMIT_ADJUSTMENT_FACTOR: u64 = 1000; @@ -25,7 +25,7 @@ const GAS_LIMIT_ADJUSTMENT_FACTOR: u64 = 1000; /// Verifies that chunk's proofs in the header match the body. pub fn validate_chunk_proofs( chunk: &ShardChunk, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, ) -> Result { let correct_chunk_hash = match chunk { ShardChunk::V1(chunk) => ShardChunkHeaderV1::compute_hash(&chunk.header.inner), @@ -120,7 +120,7 @@ pub fn validate_transactions_order(transactions: &[SignedTransaction]) -> bool { /// Validate that all next chunk information matches previous chunk extra. pub fn validate_chunk_with_chunk_extra( chain_store: &ChainStore, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, prev_block_hash: &CryptoHash, prev_chunk_extra: &ChunkExtra, prev_chunk_height_included: BlockHeight, @@ -183,7 +183,7 @@ pub fn validate_chunk_with_chunk_extra( /// Validates a double sign challenge. /// Only valid if ancestors of both blocks are present in the chain. fn validate_double_sign( - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, block_double_sign: &BlockDoubleSign, ) -> Result<(CryptoHash, Vec), Error> { let left_block_header = BlockHeader::try_from_slice(&block_double_sign.left_block_header)?; @@ -219,7 +219,7 @@ fn validate_double_sign( } fn validate_header_authorship( - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, block_header: &BlockHeader, ) -> Result<(), Error> { if runtime_adapter.verify_header_signature(block_header)? { @@ -230,7 +230,7 @@ fn validate_header_authorship( } fn validate_chunk_authorship( - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, chunk_header: &ShardChunkHeader, ) -> Result { let epoch_id = runtime_adapter.get_epoch_id_from_prev_block(&chunk_header.prev_block_hash())?; @@ -251,7 +251,7 @@ fn validate_chunk_authorship( } fn validate_chunk_proofs_challenge( - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, chunk_proofs: &ChunkProofs, ) -> Result<(CryptoHash, Vec), Error> { let block_header = BlockHeader::try_from_slice(&chunk_proofs.block_header)?; @@ -303,7 +303,7 @@ fn validate_chunk_proofs_challenge( } fn validate_chunk_state_challenge( - _runtime_adapter: &dyn RuntimeAdapter, + _runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, _chunk_state: &ChunkState, ) -> Result<(CryptoHash, Vec), Error> { // TODO (#2445): Enable challenges when they are working correctly. @@ -383,7 +383,7 @@ fn validate_chunk_state_challenge( /// Returns `Some(block_hash, vec![account_id])` of invalid block and who to /// slash if challenge is correct and None if incorrect. pub fn validate_challenge( - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, epoch_id: &EpochId, last_block_hash: &CryptoHash, challenge: &Challenge, diff --git a/chain/chunks/src/lib.rs b/chain/chunks/src/lib.rs index 18dec3ca689..50062436938 100644 --- a/chain/chunks/src/lib.rs +++ b/chain/chunks/src/lib.rs @@ -95,7 +95,7 @@ use near_primitives::time::Utc; use rand::seq::{IteratorRandom, SliceRandom}; use tracing::{debug, error, warn}; -use near_chain::{byzantine_assert, RuntimeAdapter}; +use near_chain::{byzantine_assert, RuntimeWithEpochManagerAdapter}; use near_network::types::{NetworkRequests, PeerManagerAdapter, PeerManagerMessageRequest}; use near_primitives::block::Tip; use near_primitives::hash::CryptoHash; @@ -288,7 +288,7 @@ impl Seal<'_> { pub struct SealsManager { me: Option, - runtime_adapter: Arc, + runtime_adapter: Arc, active_demurs: HashMap, past_seals: BTreeMap>, @@ -296,7 +296,10 @@ pub struct SealsManager { } impl SealsManager { - fn new(me: Option, runtime_adapter: Arc) -> Self { + fn new( + me: Option, + runtime_adapter: Arc, + ) -> Self { Self { me, runtime_adapter, @@ -474,7 +477,7 @@ pub struct ShardsManager { me: Option, store: ReadOnlyChunksStore, - runtime_adapter: Arc, + runtime_adapter: Arc, peer_manager_adapter: Arc, client_adapter: Arc, rs: ReedSolomonWrapper, @@ -493,7 +496,7 @@ pub struct ShardsManager { impl ShardsManager { pub fn new( me: Option, - runtime_adapter: Arc, + runtime_adapter: Arc, network_adapter: Arc, client_adapter: Arc, store: ReadOnlyChunksStore, diff --git a/chain/chunks/src/logic.rs b/chain/chunks/src/logic.rs index de3fa1ed4d5..7acba7ac1f6 100644 --- a/chain/chunks/src/logic.rs +++ b/chain/chunks/src/logic.rs @@ -1,4 +1,6 @@ -use near_chain::{validate::validate_chunk_proofs, Chain, ChainStore, RuntimeAdapter}; +use near_chain::{ + validate::validate_chunk_proofs, Chain, ChainStore, RuntimeWithEpochManagerAdapter, +}; use near_chunks_primitives::Error; use near_primitives::{ hash::CryptoHash, @@ -16,7 +18,7 @@ pub fn need_receipt( prev_block_hash: &CryptoHash, shard_id: ShardId, me: Option<&AccountId>, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, ) -> bool { cares_about_shard_this_or_next_epoch(me, prev_block_hash, shard_id, true, runtime_adapter) } @@ -26,7 +28,7 @@ pub fn need_part( prev_block_hash: &CryptoHash, part_ord: u64, me: Option<&AccountId>, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, ) -> Result { let epoch_id = runtime_adapter.get_epoch_id_from_prev_block(prev_block_hash)?; Ok(Some(&runtime_adapter.get_part_owner(&epoch_id, part_ord)?) == me) @@ -37,7 +39,7 @@ pub fn cares_about_shard_this_or_next_epoch( parent_hash: &CryptoHash, shard_id: ShardId, is_me: bool, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, ) -> bool { runtime_adapter.cares_about_shard(account_id, parent_hash, shard_id, is_me) || runtime_adapter.will_care_about_shard(account_id, parent_hash, shard_id, is_me) @@ -48,7 +50,7 @@ pub fn cares_about_shard_this_or_next_epoch( pub fn make_outgoing_receipts_proofs( chunk_header: &ShardChunkHeader, outgoing_receipts: &[Receipt], - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, ) -> Result, near_chunks_primitives::Error> { let shard_id = chunk_header.shard_id(); let shard_layout = @@ -75,7 +77,7 @@ pub fn make_partial_encoded_chunk_from_owned_parts_and_needed_receipts<'a>( parts: impl Iterator, receipts: impl Iterator, me: Option<&AccountId>, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, ) -> PartialEncodedChunk { let prev_block_hash = header.prev_block_hash(); let cares_about_shard = cares_about_shard_this_or_next_epoch( @@ -111,7 +113,7 @@ pub fn decode_encoded_chunk( encoded_chunk: &EncodedShardChunk, merkle_paths: Vec, me: Option<&AccountId>, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, ) -> Result<(ShardChunk, PartialEncodedChunk), Error> { let chunk_hash = encoded_chunk.chunk_hash(); @@ -148,7 +150,7 @@ fn create_partial_chunk( merkle_paths: Vec, outgoing_receipts: Vec, me: Option<&AccountId>, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, ) -> Result { let header = encoded_chunk.cloned_header(); let receipts = diff --git a/chain/client-primitives/src/types.rs b/chain/client-primitives/src/types.rs index f5023d8e2f8..cce91e6c227 100644 --- a/chain/client-primitives/src/types.rs +++ b/chain/client-primitives/src/types.rs @@ -7,7 +7,7 @@ use actix::Message; use chrono::DateTime; use near_primitives::time::Utc; -use near_chain_configs::ProtocolConfigView; +use near_chain_configs::{ClientConfig, ProtocolConfigView}; use near_primitives::hash::CryptoHash; use near_primitives::merkle::{MerklePath, PartialMerkleTree}; use near_primitives::network::PeerId; @@ -940,6 +940,33 @@ impl From for GetMaintenanceWindowsError { } } +pub struct GetClientConfig {} + +impl Message for GetClientConfig { + type Result = Result; +} + +#[derive(thiserror::Error, Debug)] +pub enum GetClientConfigError { + #[error("IO Error: {0}")] + IOError(String), + // NOTE: Currently, the underlying errors are too broad, and while we tried to handle + // expected cases, we cannot statically guarantee that no other errors will be returned + // in the future. + // TODO #3851: Remove this variant once we can exhaustively match all the underlying errors + #[error("It is a bug if you receive this error type, please, report this incident: https://github.com/near/nearcore/issues/new/choose. Details: {0}")] + Unreachable(String), +} + +impl From for GetClientConfigError { + fn from(error: near_chain_primitives::Error) -> Self { + match error { + near_chain_primitives::Error::IOErr(error) => Self::IOError(error.to_string()), + _ => Self::Unreachable(error.to_string()), + } + } +} + #[cfg(feature = "sandbox")] #[derive(Debug)] pub enum SandboxMessage { diff --git a/chain/client/src/client.rs b/chain/client/src/client.rs index fe77842cc8c..cda859d7736 100644 --- a/chain/client/src/client.rs +++ b/chain/client/src/client.rs @@ -13,6 +13,7 @@ use near_chunks::logic::{ }; use near_client_primitives::debug::ChunkProduction; use near_primitives::time::Clock; +use near_store::metadata::DbKind; use tracing::{debug, error, info, trace, warn}; use near_chain::chain::{ @@ -24,9 +25,10 @@ use near_chain::test_utils::format_hash; use near_chain::types::{ChainConfig, LatestKnown}; use near_chain::{ BlockProcessingArtifact, BlockStatus, Chain, ChainGenesis, ChainStoreAccess, - DoneApplyChunkCallback, Doomslug, DoomslugThresholdMode, Provenance, RuntimeAdapter, + DoneApplyChunkCallback, Doomslug, DoomslugThresholdMode, Provenance, + RuntimeWithEpochManagerAdapter, }; -use near_chain_configs::ClientConfig; +use near_chain_configs::{ClientConfig, UpdateableClientConfig}; use near_chunks::ShardsManager; use near_network::types::{ HighestHeightPeerInfo, NetworkRequests, PeerManagerAdapter, ReasonForBan, @@ -99,7 +101,7 @@ pub struct Client { pub sync_status: SyncStatus, pub chain: Chain, pub doomslug: Doomslug, - pub runtime_adapter: Arc, + pub runtime_adapter: Arc, pub shards_mgr: ShardsManager, pub sharded_tx_pool: ShardedTransactionPool, prev_block_to_chunk_headers_ready_for_inclusion: LruCache< @@ -149,6 +151,12 @@ pub struct Client { flat_storage_creator: Option, } +impl Client { + pub(crate) fn update_client_config(&self, update_client_config: UpdateableClientConfig) { + self.config.expected_shutdown.update(update_client_config.expected_shutdown); + } +} + // Debug information about the upcoming block. #[derive(Default)] pub struct BlockDebugStatus { @@ -177,7 +185,7 @@ impl Client { pub fn new( config: ClientConfig, chain_genesis: ChainGenesis, - runtime_adapter: Arc, + runtime_adapter: Arc, network_adapter: Arc, client_adapter: Arc, validator_signer: Option>, @@ -206,7 +214,7 @@ impl Client { runtime_adapter.clone(), chain.store(), chain_config.background_migration_threads, - ); + )?; let shards_mgr = ShardsManager::new( me.clone(), runtime_adapter.clone(), @@ -1072,7 +1080,12 @@ impl Client { Ok(()) } Err(e) if e.is_bad_data() => { - self.ban_peer(peer_id.clone(), ReasonForBan::BadBlockHeader); + // We don't ban a peer if the block timestamp is too much in the future since it's possible + // that a block is considered valid in one machine and invalid in another machine when their + // clocks are not synced. + if !matches!(e, near_chain::Error::InvalidBlockFutureTime(_)) { + self.ban_peer(peer_id.clone(), ReasonForBan::BadBlockHeader); + } Err(e) } Err(_) => { @@ -1443,13 +1456,7 @@ impl Client { height = block.header().height()) .entered(); let _gc_timer = metrics::GC_TIME.start_timer(); - - let result = if self.config.archive { - self.chain.clear_archive_data(self.config.gc.gc_blocks_limit) - } else { - let tries = self.runtime_adapter.get_tries(); - self.chain.clear_data(tries, &self.config.gc) - }; + let result = self.clear_data(); log_assert!(result.is_ok(), "Can't clear old data, {:?}", result); } @@ -1683,7 +1690,10 @@ impl Client { error: near_chain::Error, ) { let is_validator = - |epoch_id, block_hash, account_id, runtime_adapter: &Arc| { + |epoch_id, + block_hash, + account_id, + runtime_adapter: &Arc| { match runtime_adapter.get_validator_by_account_id(epoch_id, block_hash, account_id) { Ok((_, is_slashed)) => !is_slashed, @@ -2219,6 +2229,29 @@ impl Client { }; Ok(result) } + + fn clear_data(&mut self) -> Result<(), near_chain::Error> { + // A RPC node should do regular garbage collection. + if !self.config.archive { + let tries = self.runtime_adapter.get_tries(); + return self.chain.clear_data(tries, &self.config.gc); + } + + // An archival node with split storage should perform garbage collection + // on the hot storage. In order to determine if split storage is enabled + // *and* that the migration to split storage is finished we can check + // the store kind. It's only set to hot after the migration is finished. + let store = self.chain.store().store(); + let kind = store.get_db_kind()?; + if kind == Some(DbKind::Hot) { + let tries = self.runtime_adapter.get_tries(); + return self.chain.clear_data(tries, &self.config.gc); + } + + // An archival node with legacy storage or in the midst of migration to split + // storage should do the legacy clear_archive_data. + self.chain.clear_archive_data(self.config.gc.gc_blocks_limit) + } } /* implements functions used to communicate with network */ @@ -2298,7 +2331,7 @@ impl Client { // are cheaper than block processing (and that they will work with both this and // the next epoch). The caching on top of that (in tier1_accounts_cache field) is just // a defence in depth, based on the previous experience with expensive - // RuntimeAdapter::get_validators_info call. + // RuntimeWithEpochManagerAdapter::get_validators_info call. for cp in self.runtime_adapter.get_epoch_chunk_producers(epoch_id)? { account_keys .entry(cp.account_id().clone()) diff --git a/chain/client/src/client_actor.rs b/chain/client/src/client_actor.rs index cacf4d42350..66160174d6d 100644 --- a/chain/client/src/client_actor.rs +++ b/chain/client/src/client_actor.rs @@ -11,6 +11,7 @@ use crate::adapter::{ RecvPartialEncodedChunkRequest, RecvPartialEncodedChunkResponse, SetNetworkInfo, StateResponse, }; use crate::client::{Client, EPOCH_START_INFO_BLOCKS}; +use crate::config_updater::ConfigUpdater; use crate::debug::new_network_info_view; use crate::info::{display_sync_status, InfoHelper}; use crate::metrics::PARTIAL_ENCODED_CHUNK_RESPONSE_DELAY; @@ -30,15 +31,15 @@ use near_chain::test_utils::format_hash; use near_chain::ChainStoreAccess; use near_chain::{ byzantine_assert, near_chain_primitives, Block, BlockHeader, BlockProcessingArtifact, - ChainGenesis, DoneApplyChunkCallback, Provenance, RuntimeAdapter, + ChainGenesis, DoneApplyChunkCallback, Provenance, RuntimeWithEpochManagerAdapter, }; use near_chain_configs::ClientConfig; use near_chunks::client::ShardsManagerResponse; use near_chunks::logic::cares_about_shard_this_or_next_epoch; use near_client_primitives::types::{ - Error, GetNetworkInfo, NetworkInfoResponse, Status, StatusError, StatusSyncInfo, SyncStatus, + Error, GetClientConfig, GetClientConfigError, GetNetworkInfo, NetworkInfoResponse, Status, + StatusError, StatusSyncInfo, SyncStatus, }; -use near_dyn_configs::EXPECTED_SHUTDOWN_AT; #[cfg(feature = "test_features")] use near_network::types::NetworkAdversarialMessage; use near_network::types::ReasonForBan; @@ -69,7 +70,7 @@ use std::collections::HashMap; use std::sync::Arc; use std::thread; use std::time::{Duration, Instant}; -use tokio::sync::oneshot; +use tokio::sync::broadcast; use tracing::{debug, error, info, trace, warn}; /// Multiplier on `max_block_time` to wait until deciding that chain stalled. @@ -116,7 +117,10 @@ pub struct ClientActor { /// Synchronization measure to allow graceful shutdown. /// Informs the system when a ClientActor gets dropped. - shutdown_signal: Option>, + shutdown_signal: Option>, + + /// Manages updating the config. + config_updater: Option, } /// Blocks the program until given genesis time arrives. @@ -144,7 +148,7 @@ impl ClientActor { address: Addr, config: ClientConfig, chain_genesis: ChainGenesis, - runtime_adapter: Arc, + runtime_adapter: Arc, node_id: PeerId, network_adapter: Arc, validator_signer: Option>, @@ -152,8 +156,9 @@ impl ClientActor { enable_doomslug: bool, rng_seed: RngSeed, ctx: &Context, - shutdown_signal: Option>, + shutdown_signal: Option>, adv: crate::adversarial::Controls, + config_updater: Option, ) -> Result { let state_parts_arbiter = Arbiter::new(); let self_addr = ctx.address(); @@ -222,7 +227,8 @@ impl ClientActor { #[cfg(feature = "sandbox")] fastforward_delta: 0, - shutdown_signal: shutdown_signal, + shutdown_signal, + config_updater, }) } } @@ -1129,14 +1135,20 @@ impl ClientActor { /// Returns the delay before the next time `check_triggers` should be called, which is /// min(time until the closest trigger, 1 second). fn check_triggers(&mut self, ctx: &mut Context) -> Duration { + if let Some(config_updater) = &mut self.config_updater { + config_updater.try_update(&|updateable_client_config| { + self.client.update_client_config(updateable_client_config) + }); + } + // Check block height to trigger expected shutdown if let Ok(head) = self.client.chain.head() { - let block_height_to_shutdown = - EXPECTED_SHUTDOWN_AT.load(std::sync::atomic::Ordering::Relaxed); - if block_height_to_shutdown > 0 && head.height >= block_height_to_shutdown { - info!(target: "client", "Expected shutdown triggered: head block({}) >= ({})", head.height, block_height_to_shutdown); - if let Some(tx) = self.shutdown_signal.take() { - let _ = tx.send(()); // Ignore send signal fail, it will send again in next trigger + if let Some(block_height_to_shutdown) = self.client.config.expected_shutdown.get() { + if head.height >= block_height_to_shutdown { + info!(target: "client", "Expected shutdown triggered: head block({}) >= ({:?})", head.height, block_height_to_shutdown); + if let Some(tx) = self.shutdown_signal.take() { + let _ = tx.send(()); // Ignore send signal fail, it will send again in next trigger + } } } } @@ -1755,7 +1767,12 @@ impl ClientActor { fn log_summary(&mut self) { let _span = tracing::debug_span!(target: "client", "log_summary").entered(); let _d = delay_detector::DelayDetector::new(|| "client log summary".into()); - self.info_helper.log_summary(&self.client, &self.node_id, &self.network_info) + self.info_helper.log_summary( + &self.client, + &self.node_id, + &self.network_info, + &self.config_updater, + ) } } @@ -1954,6 +1971,21 @@ impl Handler> for ClientActor { } } +impl Handler> for ClientActor { + type Result = Result; + + fn handle( + &mut self, + msg: WithSpanContext, + _: &mut Context, + ) -> Self::Result { + let (_span, _msg) = handler_debug_span!(target: "client", msg); + let _d = delay_detector::DelayDetector::new(|| "client get client config".into()); + + Ok(self.client.config.clone()) + } +} + /// Returns random seed sampled from the current thread pub fn random_seed_from_thread() -> RngSeed { let mut rng_seed: RngSeed = [0; 32]; @@ -1965,13 +1997,14 @@ pub fn random_seed_from_thread() -> RngSeed { pub fn start_client( client_config: ClientConfig, chain_genesis: ChainGenesis, - runtime_adapter: Arc, + runtime_adapter: Arc, node_id: PeerId, network_adapter: Arc, validator_signer: Option>, telemetry_actor: Addr, - sender: Option>, + sender: Option>, adv: crate::adversarial::Controls, + config_updater: Option, ) -> (Addr, ArbiterHandle) { let client_arbiter = Arbiter::new(); let client_arbiter_handle = client_arbiter.handle(); @@ -1990,6 +2023,7 @@ pub fn start_client( ctx, sender, adv, + config_updater, ) .unwrap() }); diff --git a/chain/client/src/config_updater.rs b/chain/client/src/config_updater.rs new file mode 100644 index 00000000000..8f2389823cb --- /dev/null +++ b/chain/client/src/config_updater.rs @@ -0,0 +1,53 @@ +use near_chain_configs::UpdateableClientConfig; +use near_dyn_configs::{UpdateableConfigLoaderError, UpdateableConfigs}; +use std::sync::Arc; +use tokio::sync::broadcast::Receiver; + +#[derive(Debug)] +pub enum ClientConfigUpdateError {} + +/// Manages updating the config encapsulating. +pub struct ConfigUpdater { + /// Receives config updates while the node is running. + rx_config_update: Receiver>>, + + /// Represents the latest Error of reading the dynamically reloadable configs. + updateable_configs_error: Option>, +} + +impl ConfigUpdater { + pub fn new( + rx_config_update: Receiver>>, + ) -> Self { + Self { rx_config_update, updateable_configs_error: None } + } + + /// Check if any of the configs were updated. + /// If they did, the receiver (rx_config_update) will contain a clone of the new configs. + pub fn try_update(&mut self, update_client_config_fn: &dyn Fn(UpdateableClientConfig)) { + while let Ok(maybe_updateable_configs) = self.rx_config_update.try_recv() { + match maybe_updateable_configs { + Ok(updateable_configs) => { + if let Some(client_config) = updateable_configs.client_config { + update_client_config_fn(client_config); + tracing::info!(target: "config", "Updated ClientConfig"); + } + self.updateable_configs_error = None; + } + Err(err) => { + self.updateable_configs_error = Some(err.clone()); + } + } + } + } + + /// Prints an error if it's present. + pub fn report_status(&self) { + if let Some(updateable_configs_error) = &self.updateable_configs_error { + tracing::warn!( + target: "stats", + "Dynamically updateable configs are not valid. Please fix this ASAP otherwise the node will probably crash after restart: {}", + *updateable_configs_error); + } + } +} diff --git a/chain/client/src/debug.rs b/chain/client/src/debug.rs index ca046633276..943e4a66a72 100644 --- a/chain/client/src/debug.rs +++ b/chain/client/src/debug.rs @@ -5,7 +5,7 @@ use actix::{Context, Handler}; use borsh::BorshSerialize; use itertools::Itertools; use near_chain::crypto_hash_timer::CryptoHashTimer; -use near_chain::{near_chain_primitives, Chain, ChainStoreAccess, RuntimeAdapter}; +use near_chain::{near_chain_primitives, Chain, ChainStoreAccess, RuntimeWithEpochManagerAdapter}; use near_client_primitives::debug::{ ApprovalAtHeightStatus, BlockProduction, ChunkCollection, DebugBlockStatusData, DebugStatus, DebugStatusResponse, MissedHeightInfo, ProductionAtHeight, ValidatorStatus, @@ -119,7 +119,7 @@ impl BlockProductionTracker { epoch_id: &EpochId, num_shards: ShardId, new_chunks: &HashMap, AccountId)>, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, ) -> Result, Error> { let mut chunk_collection_info = vec![]; for shard_id in 0..num_shards { diff --git a/chain/client/src/info.rs b/chain/client/src/info.rs index d53d5bdfc8a..2251e99d840 100644 --- a/chain/client/src/info.rs +++ b/chain/client/src/info.rs @@ -1,3 +1,4 @@ +use crate::config_updater::ConfigUpdater; use crate::{metrics, rocksdb_metrics, SyncStatus}; use actix::Addr; use itertools::Itertools; @@ -125,6 +126,7 @@ impl InfoHelper { client: &crate::client::Client, node_id: &PeerId, network_info: &NetworkInfo, + config_updater: &Option, ) { let is_syncing = client.sync_status.is_syncing(); let head = unwrap_or_return!(client.chain.head()); @@ -190,6 +192,7 @@ impl InfoHelper { .unwrap_or(0), statistics, &client.config, + config_updater, ); self.log_chain_processing_info(client, &head.epoch_id); } @@ -206,6 +209,7 @@ impl InfoHelper { protocol_upgrade_block_height: BlockHeight, statistics: Option, client_config: &ClientConfig, + config_updater: &Option, ) { let use_colour = matches!(self.log_summary_style, LogSummaryStyle::Colored); let paint = |colour: ansi_term::Colour, text: Option| match text { @@ -268,12 +272,14 @@ impl InfoHelper { paint(ansi_term::Colour::Blue, machine_info_log), ); if catchup_status_log != "" { - info!(target:"stats", "Catchups\n{}", catchup_status_log); + info!(target: "stats", "Catchups\n{}", catchup_status_log); } if let Some(statistics) = statistics { rocksdb_metrics::export_stats_as_metrics(statistics); } - + if let Some(config_updater) = &config_updater { + config_updater.report_status(); + } let (cpu_usage, memory_usage) = proc_info.unwrap_or_default(); let is_validator = validator_info.map(|v| v.is_validator).unwrap_or_default(); (metrics::IS_VALIDATOR.set(is_validator as i64)); diff --git a/chain/client/src/lib.rs b/chain/client/src/lib.rs index ee8aaa224ad..48d1c0417b2 100644 --- a/chain/client/src/lib.rs +++ b/chain/client/src/lib.rs @@ -1,8 +1,9 @@ pub use near_client_primitives::types::{ Error, GetBlock, GetBlockProof, GetBlockProofResponse, GetBlockWithMerkleTree, GetChunk, - GetExecutionOutcome, GetExecutionOutcomeResponse, GetExecutionOutcomesForBlock, GetGasPrice, - GetMaintenanceWindows, GetNetworkInfo, GetNextLightClientBlock, GetProtocolConfig, GetReceipt, - GetStateChanges, GetStateChangesInBlock, GetStateChangesWithCauseInBlock, + GetClientConfig, GetExecutionOutcome, GetExecutionOutcomeResponse, + GetExecutionOutcomesForBlock, GetGasPrice, GetMaintenanceWindows, GetNetworkInfo, + GetNextLightClientBlock, GetProtocolConfig, GetReceipt, GetStateChanges, + GetStateChangesInBlock, GetStateChangesWithCauseInBlock, GetStateChangesWithCauseInBlockForTrackedShards, GetValidatorInfo, GetValidatorOrdered, Query, QueryError, Status, StatusResponse, SyncStatus, TxStatus, TxStatusError, }; @@ -14,12 +15,14 @@ pub use crate::adapter::{ }; pub use crate::client::Client; pub use crate::client_actor::{start_client, ClientActor}; +pub use crate::config_updater::ConfigUpdater; pub use crate::view_client::{start_view_client, ViewClientActor}; pub mod adapter; pub mod adversarial; mod client; mod client_actor; +mod config_updater; pub mod debug; mod info; mod metrics; diff --git a/chain/client/src/sync/state.rs b/chain/client/src/sync/state.rs index 68af141955e..fe332a214a1 100644 --- a/chain/client/src/sync/state.rs +++ b/chain/client/src/sync/state.rs @@ -35,7 +35,7 @@ use rand::seq::SliceRandom; use rand::{thread_rng, Rng}; use tracing::{debug, error, info, warn}; -use near_chain::{Chain, RuntimeAdapter}; +use near_chain::{Chain, RuntimeWithEpochManagerAdapter}; use near_network::types::{ HighestHeightPeerInfo, NetworkRequests, NetworkResponses, PeerManagerAdapter, }; @@ -170,7 +170,7 @@ impl StateSync { sync_hash: CryptoHash, new_shard_sync: &mut HashMap, chain: &mut Chain, - runtime_adapter: &Arc, + runtime_adapter: &Arc, highest_height_peers: &[HighestHeightPeerInfo], tracking_shards: Vec, now: DateTime, @@ -517,7 +517,7 @@ impl StateSync { me: &Option, shard_id: ShardId, chain: &Chain, - runtime_adapter: &Arc, + runtime_adapter: &Arc, sync_hash: CryptoHash, highest_height_peers: &[HighestHeightPeerInfo], ) -> Result, Error> { @@ -572,7 +572,7 @@ impl StateSync { me: &Option, shard_id: ShardId, chain: &Chain, - runtime_adapter: &Arc, + runtime_adapter: &Arc, sync_hash: CryptoHash, shard_sync_download: ShardSyncDownload, highest_height_peers: &[HighestHeightPeerInfo], @@ -693,7 +693,7 @@ impl StateSync { sync_hash: CryptoHash, new_shard_sync: &mut HashMap, chain: &mut Chain, - runtime_adapter: &Arc, + runtime_adapter: &Arc, highest_height_peers: &[HighestHeightPeerInfo], // Shards to sync. tracking_shards: Vec, @@ -941,7 +941,7 @@ mod test { *request_hash, &mut new_shard_sync, &mut chain, - &(kv as Arc), + &(kv as Arc), &[], vec![0], &apply_parts_fn, diff --git a/chain/client/src/test_utils.rs b/chain/client/src/test_utils.rs index eddb0f0b8dc..9871b6f3be4 100644 --- a/chain/client/src/test_utils.rs +++ b/chain/client/src/test_utils.rs @@ -22,7 +22,8 @@ use near_chain::test_utils::{ }; use near_chain::types::ChainConfig; use near_chain::{ - Chain, ChainGenesis, ChainStoreAccess, DoomslugThresholdMode, Provenance, RuntimeAdapter, + Chain, ChainGenesis, ChainStoreAccess, DoomslugThresholdMode, Provenance, + RuntimeWithEpochManagerAdapter, }; use near_chain_configs::ClientConfig; use near_chunks::client::{ClientAdapterForShardsManager, ShardsManagerResponse}; @@ -265,6 +266,7 @@ pub fn setup( ctx, None, adv, + None, ) .unwrap(); (genesis_block, client, view_client_addr) @@ -1090,7 +1092,7 @@ pub fn setup_client_with_runtime( network_adapter: Arc, client_adapter: Arc, chain_genesis: ChainGenesis, - runtime_adapter: Arc, + runtime_adapter: Arc, rng_seed: RngSeed, archive: bool, save_trie_changes: bool, @@ -1166,7 +1168,7 @@ pub struct TestEnvBuilder { chain_genesis: ChainGenesis, clients: Vec, validators: Vec, - runtime_adapters: Option>>, + runtime_adapters: Option>>, network_adapters: Option>>, // random seed to be inject in each client according to AccountId // if not set, a default constant TEST_SEED will be injected @@ -1238,7 +1240,10 @@ impl TestEnvBuilder { /// The vector must have the same number of elements as they are clients /// (one by default). If that does not hold, [`Self::build`] method will /// panic. - pub fn runtime_adapters(mut self, adapters: Vec>) -> Self { + pub fn runtime_adapters( + mut self, + adapters: Vec>, + ) -> Self { self.runtime_adapters = Some(adapters); self } diff --git a/chain/client/src/view_client.rs b/chain/client/src/view_client.rs index 7e743dcf5fd..4477441cfb7 100644 --- a/chain/client/src/view_client.rs +++ b/chain/client/src/view_client.rs @@ -14,7 +14,7 @@ use tracing::{debug, error, info, trace, warn}; use near_chain::{ get_epoch_block_producers_view, Chain, ChainGenesis, ChainStoreAccess, DoomslugThresholdMode, - RuntimeAdapter, + RuntimeWithEpochManagerAdapter, }; use near_chain_configs::{ClientConfig, ProtocolConfigView}; use near_client_primitives::types::{ @@ -93,7 +93,7 @@ pub struct ViewClientActor { /// Validator account (if present). validator_account_id: Option, chain: Chain, - runtime_adapter: Arc, + runtime_adapter: Arc, network_adapter: Arc, pub config: ClientConfig, request_manager: Arc>, @@ -119,7 +119,7 @@ impl ViewClientActor { pub fn new( validator_account_id: Option, chain_genesis: &ChainGenesis, - runtime_adapter: Arc, + runtime_adapter: Arc, network_adapter: Arc, config: ClientConfig, request_manager: Arc>, @@ -1426,7 +1426,7 @@ impl Handler> for ViewClientActor { pub fn start_view_client( validator_account_id: Option, chain_genesis: ChainGenesis, - runtime_adapter: Arc, + runtime_adapter: Arc, network_adapter: Arc, config: ClientConfig, adv: crate::adversarial::Controls, diff --git a/chain/epoch-manager/src/adapter.rs b/chain/epoch-manager/src/adapter.rs index 0a42fa53f2b..c0b687a350d 100644 --- a/chain/epoch-manager/src/adapter.rs +++ b/chain/epoch-manager/src/adapter.rs @@ -24,7 +24,7 @@ use std::sync::{Arc, RwLockReadGuard, RwLockWriteGuard}; /// A trait that abstracts the interface of the EpochManager. /// /// It is intended to be an intermediate state in a refactor: we want to remove -/// epoch manager stuff from RuntimeAdapter's interface, and, as a first step, +/// epoch manager stuff from RuntimeWithEpochManagerAdapter's interface, and, as a first step, /// we move it to a new trait. The end goal is for the code to use the concrete /// epoch manager type directly. Though, we might want to still keep this trait /// in, to allow for easy overriding of epoch manager in tests. @@ -348,7 +348,7 @@ pub trait EpochManagerAdapter: Send + Sync { /// A technical plumbing trait to conveniently implement [`EpochManagerAdapter`] /// for `NightshadeRuntime` without too much copy-paste. /// -/// Once we remove `RuntimeAdapter: EpochManagerAdapter` bound, we could get rid +/// Once we remove `RuntimeWithEpochManagerAdapter: EpochManagerAdapter` bound, we could get rid /// of this trait and instead add inherent methods directly to /// `EpochManagerHandle`. pub trait HasEpochMangerHandle { diff --git a/chain/jsonrpc-primitives/src/types/client_config.rs b/chain/jsonrpc-primitives/src/types/client_config.rs new file mode 100644 index 00000000000..f1af15989d2 --- /dev/null +++ b/chain/jsonrpc-primitives/src/types/client_config.rs @@ -0,0 +1,38 @@ +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +#[derive(Serialize, Deserialize, Debug)] +pub struct RpcClientConfigRequest {} + +#[derive(Serialize, Deserialize, Debug)] +pub struct RpcClientConfigResponse { + #[serde(flatten)] + pub client_config: near_chain_configs::ClientConfig, +} + +#[derive(thiserror::Error, Debug, Serialize, Deserialize)] +#[serde(tag = "name", content = "info", rename_all = "SCREAMING_SNAKE_CASE")] +pub enum RpcClientConfigError { + #[error("The node reached its limits. Try again later. More details: {error_message}")] + InternalError { error_message: String }, +} + +impl From for crate::errors::RpcError { + fn from(error: RpcClientConfigError) -> Self { + let error_data = match &error { + RpcClientConfigError::InternalError { .. } => Some(Value::String(error.to_string())), + }; + + let error_data_value = match serde_json::to_value(error) { + Ok(value) => value, + Err(err) => { + return Self::new_internal_error( + None, + format!("Failed to serialize RpcClientConfigError: {:?}", err), + ) + } + }; + + Self::new_internal_or_handler_error(error_data, error_data_value) + } +} diff --git a/chain/jsonrpc-primitives/src/types/mod.rs b/chain/jsonrpc-primitives/src/types/mod.rs index 33604f6ac74..ae10aa18dd8 100644 --- a/chain/jsonrpc-primitives/src/types/mod.rs +++ b/chain/jsonrpc-primitives/src/types/mod.rs @@ -1,6 +1,7 @@ pub mod blocks; pub mod changes; pub mod chunks; +pub mod client_config; pub mod config; pub mod gas_price; pub mod light_client; diff --git a/chain/jsonrpc/jsonrpc-tests/res/genesis_config.json b/chain/jsonrpc/jsonrpc-tests/res/genesis_config.json index 4a81f4eccd2..03b9a0c4bbd 100644 --- a/chain/jsonrpc/jsonrpc-tests/res/genesis_config.json +++ b/chain/jsonrpc/jsonrpc-tests/res/genesis_config.json @@ -1,5 +1,5 @@ { - "protocol_version": 57, + "protocol_version": 58, "genesis_time": "1970-01-01T00:00:00.000000000Z", "chain_id": "sample", "genesis_height": 0, diff --git a/chain/jsonrpc/jsonrpc-tests/tests/rpc_transactions.rs b/chain/jsonrpc/jsonrpc-tests/tests/rpc_transactions.rs index 8ac13d25ca1..20415f8cb9f 100644 --- a/chain/jsonrpc/jsonrpc-tests/tests/rpc_transactions.rs +++ b/chain/jsonrpc/jsonrpc-tests/tests/rpc_transactions.rs @@ -1,10 +1,8 @@ use std::sync::{Arc, Mutex}; -use std::{thread, time}; use actix::{Actor, System}; use borsh::BorshSerialize; use futures::{future, FutureExt, TryFutureExt}; -use serde_json::json; use near_actix_test_utils::run_actix; use near_crypto::{InMemorySigner, KeyType}; @@ -99,50 +97,6 @@ fn test_send_tx_commit() { }); } -/// Test get_recursive_transaction_results (called by get_final_transaction_result) -/// only returns non-refund receipts -#[test] -fn test_refunds_not_in_receipts() { - test_with_client!(test_utils::NodeType::Validator, client, async move { - let block_hash = client.block(BlockReference::latest()).await.unwrap().header.hash; - let signer = InMemorySigner::from_seed("test1".parse().unwrap(), KeyType::ED25519, "test1"); - let tx = SignedTransaction::send_money( - 1, - "test1".parse().unwrap(), - "test2".parse().unwrap(), - &signer, - 100, - block_hash, - ); - let bytes = tx.try_to_vec().unwrap(); - client.broadcast_tx_commit(to_base64(&bytes)).await.unwrap(); - let mut tx_status = json!(client.EXPERIMENTAL_tx_status(to_base64(&bytes)).await.unwrap()); - for _ in 1..10 { - // poll every 10 milliseconds for updated tx status - thread::sleep(time::Duration::from_millis(10)); - tx_status = json!(client.EXPERIMENTAL_tx_status(to_base64(&bytes)).await.unwrap()); - let receipts = tx_status.get("receipts"); - if receipts.is_some() { - if !receipts.unwrap().as_array().unwrap().is_empty() { - break; - } - } - } - if let Some(receipt) = tx_status.get("receipts") { - if !receipt.as_array().unwrap().is_empty() { - let receipt_predecessor_id = receipt.get("predecessor_id"); - if receipt_predecessor_id.is_some() { - let is_refund = receipt["predecessor_id"].get("is_system").unwrap().as_bool(); - if is_refund.is_some() { - assert!(!is_refund.unwrap()); - } - } - } - } - assert!(tx_status.get("receipts").unwrap().as_array().is_some()); - }); -} - /// Test that expired transaction should be rejected #[test] fn test_expired_tx() { diff --git a/chain/jsonrpc/res/debug.html b/chain/jsonrpc/res/debug.html index 79708b09b56..48963b1883a 100644 --- a/chain/jsonrpc/res/debug.html +++ b/chain/jsonrpc/res/debug.html @@ -66,6 +66,7 @@

Epoch info

Chain & Chunk info

Sync info

Validator info

+

Client Config

diff --git a/chain/jsonrpc/res/rpc_errors_schema.json b/chain/jsonrpc/res/rpc_errors_schema.json index 4401321ff69..4606910cb67 100644 --- a/chain/jsonrpc/res/rpc_errors_schema.json +++ b/chain/jsonrpc/res/rpc_errors_schema.json @@ -460,7 +460,13 @@ "FunctionCallError", "NewReceiptValidationError", "OnlyImplicitAccountCreationAllowed", - "DeleteAccountWithLargeState" + "DeleteAccountWithLargeState", + "DelegateActionInvalidSignature", + "DelegateActionSenderDoesNotMatchTxReceiver", + "DelegateActionExpired", + "DelegateActionAccessKeyError", + "DelegateActionInvalidNonce", + "DelegateActionNonceTooLarge" ], "props": { "index": "" @@ -480,7 +486,9 @@ "FunctionCallMethodNameLengthExceeded", "FunctionCallArgumentsLengthExceeded", "UnsuitableStakingKey", - "FunctionCallZeroAttachedGas" + "FunctionCallZeroAttachedGas", + "DelegateActionCantContainNestedOne", + "DelegateActionMustBeOnlyOne" ], "props": {} }, @@ -556,6 +564,50 @@ "registrar_account_id": "" } }, + "DelegateActionCantContainNestedOne": { + "name": "DelegateActionCantContainNestedOne", + "subtypes": [], + "props": {} + }, + "DelegateActionExpired": { + "name": "DelegateActionExpired", + "subtypes": [], + "props": {} + }, + "DelegateActionInvalidNonce": { + "name": "DelegateActionInvalidNonce", + "subtypes": [], + "props": { + "ak_nonce": "", + "delegate_nonce": "" + } + }, + "DelegateActionInvalidSignature": { + "name": "DelegateActionInvalidSignature", + "subtypes": [], + "props": {} + }, + "DelegateActionMustBeOnlyOne": { + "name": "DelegateActionMustBeOnlyOne", + "subtypes": [], + "props": {} + }, + "DelegateActionNonceTooLarge": { + "name": "DelegateActionNonceTooLarge", + "subtypes": [], + "props": { + "delegate_nonce": "", + "upper_bound": "" + } + }, + "DelegateActionSenderDoesNotMatchTxReceiver": { + "name": "DelegateActionSenderDoesNotMatchTxReceiver", + "subtypes": [], + "props": { + "receiver_id": "", + "sender_id": "" + } + }, "DeleteAccountStaking": { "name": "DeleteAccountStaking", "subtypes": [], diff --git a/chain/jsonrpc/src/api/client_config.rs b/chain/jsonrpc/src/api/client_config.rs new file mode 100644 index 00000000000..b5e55363fcf --- /dev/null +++ b/chain/jsonrpc/src/api/client_config.rs @@ -0,0 +1,25 @@ +use near_client_primitives::types::GetClientConfigError; +use near_jsonrpc_primitives::types::client_config::RpcClientConfigError; + +use super::RpcFrom; + +impl RpcFrom for RpcClientConfigError { + fn rpc_from(error: actix::MailboxError) -> Self { + Self::InternalError { error_message: error.to_string() } + } +} + +impl RpcFrom for RpcClientConfigError { + fn rpc_from(error: GetClientConfigError) -> Self { + match error { + GetClientConfigError::IOError(error_message) => Self::InternalError { error_message }, + GetClientConfigError::Unreachable(ref error_message) => { + tracing::warn!(target: "jsonrpc", "Unreachable error occurred: {}", error_message); + crate::metrics::RPC_UNREACHABLE_ERROR_COUNT + .with_label_values(&["RpcClientConfigError"]) + .inc(); + Self::InternalError { error_message: error.to_string() } + } + } + } +} diff --git a/chain/jsonrpc/src/api/mod.rs b/chain/jsonrpc/src/api/mod.rs index 60bdd368363..d426b836ae2 100644 --- a/chain/jsonrpc/src/api/mod.rs +++ b/chain/jsonrpc/src/api/mod.rs @@ -8,6 +8,7 @@ use near_primitives::borsh::BorshDeserialize; mod blocks; mod changes; mod chunks; +mod client_config; mod config; mod gas_price; mod light_client; diff --git a/chain/jsonrpc/src/lib.rs b/chain/jsonrpc/src/lib.rs index 41f46e0afc8..885213e612c 100644 --- a/chain/jsonrpc/src/lib.rs +++ b/chain/jsonrpc/src/lib.rs @@ -18,10 +18,11 @@ use tracing::info; use near_chain_configs::GenesisConfig; use near_client::{ - ClientActor, DebugStatus, GetBlock, GetBlockProof, GetChunk, GetExecutionOutcome, GetGasPrice, - GetMaintenanceWindows, GetNetworkInfo, GetNextLightClientBlock, GetProtocolConfig, GetReceipt, - GetStateChanges, GetStateChangesInBlock, GetValidatorInfo, GetValidatorOrdered, - ProcessTxRequest, ProcessTxResponse, Query, Status, TxStatus, ViewClientActor, + ClientActor, DebugStatus, GetBlock, GetBlockProof, GetChunk, GetClientConfig, + GetExecutionOutcome, GetGasPrice, GetMaintenanceWindows, GetNetworkInfo, + GetNextLightClientBlock, GetProtocolConfig, GetReceipt, GetStateChanges, + GetStateChangesInBlock, GetValidatorInfo, GetValidatorOrdered, ProcessTxRequest, + ProcessTxResponse, Query, Status, TxStatus, ViewClientActor, }; pub use near_jsonrpc_client as client; use near_jsonrpc_primitives::errors::RpcError; @@ -313,6 +314,9 @@ impl JsonRpcHandler { process_method_call(request, |params| self.tx_status_common(params, false)).await } "validators" => process_method_call(request, |params| self.validators(params)).await, + "client_config" => { + process_method_call(request, |_params: ()| self.client_config()).await + } "EXPERIMENTAL_broadcast_tx_sync" => { process_method_call(request, |params| self.send_tx_sync(params)).await } @@ -1089,6 +1093,16 @@ impl JsonRpcHandler { let windows = self.view_client_send(GetMaintenanceWindows { account_id }).await?; Ok(windows.iter().map(|r| (r.start, r.end)).collect()) } + + async fn client_config( + &self, + ) -> Result< + near_jsonrpc_primitives::types::client_config::RpcClientConfigResponse, + near_jsonrpc_primitives::types::client_config::RpcClientConfigError, + > { + let client_config = self.client_send(GetClientConfig {}).await?; + Ok(near_jsonrpc_primitives::types::client_config::RpcClientConfigResponse { client_config }) + } } #[cfg(feature = "sandbox")] @@ -1409,6 +1423,18 @@ pub async fn prometheus_handler() -> Result { } } +fn client_config_handler( + handler: web::Data, +) -> impl Future> { + let response = async move { + match handler.client_config().await { + Ok(value) => Ok(HttpResponse::Ok().json(&value)), + Err(_) => Ok(HttpResponse::ServiceUnavailable().finish()), + } + }; + response.boxed() +} + fn get_cors(cors_allowed_origins: &[String]) -> Cors { let mut cors = Cors::permissive(); if cors_allowed_origins != ["*".to_string()] { @@ -1533,6 +1559,9 @@ pub fn start_http( web::resource("/debug/api/block_status/{starting_height}") .route(web::get().to(debug_block_status_handler)), ) + .service( + web::resource("/debug/client_config").route(web::get().to(client_config_handler)), + ) .service(debug_html) .service(display_debug_html) }) diff --git a/chain/network/src/network_protocol/mod.rs b/chain/network/src/network_protocol/mod.rs index 26ba0aff8a3..e24e1917d07 100644 --- a/chain/network/src/network_protocol/mod.rs +++ b/chain/network/src/network_protocol/mod.rs @@ -411,6 +411,10 @@ pub enum RoutedMessageBody { StateRequestHeader(ShardId, CryptoHash), StateRequestPart(ShardId, CryptoHash, u64), + /// StateResponse in not produced since protocol version 58. + /// We can remove the support for it in protocol version 60. + /// It has been obsoleted by VersionedStateResponse which + /// is a superset of StateResponse values. StateResponse(StateResponseInfoV1), PartialEncodedChunkRequest(PartialEncodedChunkRequestMsg), PartialEncodedChunkResponse(PartialEncodedChunkResponseMsg), diff --git a/chain/network/src/peer/peer_actor.rs b/chain/network/src/peer/peer_actor.rs index 539f8fd376c..1f27bb56655 100644 --- a/chain/network/src/peer/peer_actor.rs +++ b/chain/network/src/peer/peer_actor.rs @@ -4,7 +4,7 @@ use crate::concurrency::demux; use crate::network_protocol::{ Edge, EdgeState, Encoding, OwnedAccount, ParsePeerMessageError, PartialEdgeInfo, PeerChainInfoV2, PeerIdOrHash, PeerInfo, RawRoutedMessage, RoutedMessageBody, RoutedMessageV2, - RoutingTableUpdate, SyncAccountsData, + RoutingTableUpdate, StateResponseInfo, SyncAccountsData, }; use crate::peer::stream; use crate::peer::tracker::Tracker; @@ -896,6 +896,10 @@ impl PeerActor { network_state.client.state_response(info).await; None } + RoutedMessageBody::StateResponse(info) => { + network_state.client.state_response(StateResponseInfo::V1(info)).await; + None + } RoutedMessageBody::BlockApproval(approval) => { network_state.client.block_approval(approval, peer_id).await; None @@ -1403,7 +1407,9 @@ impl actix::Handler for PeerActor { // Connection has been closed. io::ErrorKind::UnexpectedEof | io::ErrorKind::ConnectionReset - | io::ErrorKind::BrokenPipe => true, + | io::ErrorKind::BrokenPipe + // libc::ETIIMEDOUT = 110, translates to io::ErrorKind::TimedOut. + | io::ErrorKind::TimedOut => true, // When stopping tokio runtime, an "IO driver has terminated" is sometimes // returned. io::ErrorKind::Other => true, diff --git a/chain/network/src/peer_manager/network_state/mod.rs b/chain/network/src/peer_manager/network_state/mod.rs index ad68ecc5bb7..0617ead91b9 100644 --- a/chain/network/src/peer_manager/network_state/mod.rs +++ b/chain/network/src/peer_manager/network_state/mod.rs @@ -474,11 +474,11 @@ impl NetworkState { msg: RoutedMessageBody, ) -> bool { let mut success = false; - let accounts_data = self.accounts_data.load(); // All TIER1 messages are being sent over both TIER1 and TIER2 connections for now, // so that we can actually observe the latency/reliability improvements in practice: // for each message we track over which network tier it arrived faster? if tcp::Tier::T1.is_allowed_routed(&msg) { + let accounts_data = self.accounts_data.load(); for key in accounts_data.keys_by_id.get(account_id).iter().flat_map(|keys| keys.iter()) { let data = match accounts_data.data.get(key) { @@ -503,34 +503,19 @@ impl NetworkState { } } - let peer_id_from_account_data = accounts_data - .keys_by_id - .get(account_id) - .iter() - .flat_map(|keys| keys.iter()) - .flat_map(|key| accounts_data.data.get(key)) - .next() - .map(|data| data.peer_id.clone()); - // Find the target peer_id: - // - first look it up in self.accounts_data - // - if missing, fall back to lookup in self.graph.routing_table - // We want to deprecate self.graph.routing_table.account_owner in the next release. - let target = if let Some(peer_id) = peer_id_from_account_data { - metrics::ACCOUNT_TO_PEER_LOOKUPS.with_label_values(&["AccountData"]).inc(); - peer_id - } else if let Some(peer_id) = self.graph.routing_table.account_owner(account_id) { - metrics::ACCOUNT_TO_PEER_LOOKUPS.with_label_values(&["AnnounceAccount"]).inc(); - peer_id - } else { - // TODO(MarX, #1369): Message is dropped here. Define policy for this case. - metrics::MessageDropped::UnknownAccount.inc(&msg); - tracing::debug!(target: "network", - account_id = ?self.config.validator.as_ref().map(|v|v.account_id()), - to = ?account_id, - ?msg,"Drop message: unknown account", - ); - tracing::trace!(target: "network", known_peers = ?self.graph.routing_table.get_accounts_keys(), "Known peers"); - return false; + let target = match self.graph.routing_table.account_owner(account_id) { + Some(peer_id) => peer_id, + None => { + // TODO(MarX, #1369): Message is dropped here. Define policy for this case. + metrics::MessageDropped::UnknownAccount.inc(&msg); + tracing::debug!(target: "network", + account_id = ?self.config.validator.as_ref().map(|v|v.account_id()), + to = ?account_id, + ?msg,"Drop message: unknown account", + ); + tracing::trace!(target: "network", known_peers = ?self.graph.routing_table.get_accounts_keys(), "Known peers"); + return false; + } }; let msg = RawRoutedMessage { target: PeerIdOrHash::PeerId(target), body: msg }; diff --git a/chain/network/src/peer_manager/peer_manager_actor.rs b/chain/network/src/peer_manager/peer_manager_actor.rs index dffe001a964..2af96c1e8f7 100644 --- a/chain/network/src/peer_manager/peer_manager_actor.rs +++ b/chain/network/src/peer_manager/peer_manager_actor.rs @@ -3,7 +3,7 @@ use crate::config; use crate::debug::{DebugStatus, GetDebugStatus}; use crate::network_protocol::{ AccountOrPeerIdOrHash, Edge, PeerIdOrHash, PeerMessage, Ping, Pong, RawRoutedMessage, - RoutedMessageBody, SignedAccountData, StateResponseInfo, + RoutedMessageBody, SignedAccountData, }; use crate::peer::peer_actor::PeerActor; use crate::peer_manager::connection; @@ -724,12 +724,7 @@ impl PeerManagerActor { } } NetworkRequests::StateResponse { route_back, response } => { - let body = match response { - StateResponseInfo::V1(response) => RoutedMessageBody::StateResponse(response), - response @ StateResponseInfo::V2(_) => { - RoutedMessageBody::VersionedStateResponse(response) - } - }; + let body = RoutedMessageBody::VersionedStateResponse(response); if self.state.send_message_to_peer( &self.clock, tcp::Tier::T2, diff --git a/chain/network/src/peer_manager/tests/routing.rs b/chain/network/src/peer_manager/tests/routing.rs index 5026f0893c3..66a4b738a40 100644 --- a/chain/network/src/peer_manager/tests/routing.rs +++ b/chain/network/src/peer_manager/tests/routing.rs @@ -874,7 +874,7 @@ async fn max_num_peers_limit() { drop(pm3); } -/// Test that TTL is handled properly. +// test that TTL is handled property. #[tokio::test] async fn ttl() { init_test_logger(); @@ -928,8 +928,8 @@ async fn ttl() { } } -/// After the initial exchange, all subsequent SyncRoutingTable messages are -/// expected to contain only the diff of the known data. +// After the initial exchange, all subsequent SyncRoutingTable messages are +// expected to contain only the diff of the known data. #[tokio::test] async fn repeated_data_in_sync_routing_table() { init_test_logger(); diff --git a/chain/network/src/peer_manager/tests/tier1.rs b/chain/network/src/peer_manager/tests/tier1.rs index ffae873b378..8e0ac5260e6 100644 --- a/chain/network/src/peer_manager/tests/tier1.rs +++ b/chain/network/src/peer_manager/tests/tier1.rs @@ -8,8 +8,10 @@ use crate::peer_manager::testonly::Event; use crate::tcp; use crate::testonly::{make_rng, Rng}; use crate::time; +use crate::types::{NetworkRequests, NetworkResponses, PeerManagerMessageRequest}; use near_o11y::testonly::init_test_logger; -use near_primitives::block_header::{Approval, ApprovalInner}; +use near_o11y::WithSpanContextExt; +use near_primitives::block_header::{Approval, ApprovalInner, ApprovalMessage}; use near_primitives::validator_signer::ValidatorSigner; use near_store::db::TestDB; use rand::Rng as _; @@ -46,66 +48,46 @@ async fn establish_connections(clock: &time::Clock, pms: &[&peer_manager::teston } } -// Sends a routed TIER1 message from `from` to `to`. -// Returns the message body that was sent, or None if the routing information was missing. async fn send_tier1_message( rng: &mut Rng, - clock: &time::Clock, from: &peer_manager::testonly::ActorHandler, to: &peer_manager::testonly::ActorHandler, -) -> Option { +) { let from_signer = from.cfg.validator.as_ref().unwrap().signer.clone(); let to_signer = to.cfg.validator.as_ref().unwrap().signer.clone(); let target = to_signer.validator_id().clone(); - let want = RoutedMessageBody::BlockApproval(make_block_approval(rng, from_signer.as_ref())); - let clock = clock.clone(); - from.with_state(move |s| async move { - if s.send_message_to_account(&clock, &target, want.clone()) { - Some(want) - } else { - None - } - }) - .await -} - -// Sends a routed TIER1 message from `from` to `to`, then waits until `to` receives it. -// `recv_tier` specifies over which network the message is expected to be actually delivered. -async fn send_and_recv_tier1_message( - rng: &mut Rng, - clock: &time::Clock, - from: &peer_manager::testonly::ActorHandler, - to: &peer_manager::testonly::ActorHandler, - recv_tier: tcp::Tier, -) { + let want = make_block_approval(rng, from_signer.as_ref()); + let req = NetworkRequests::Approval { + approval_message: ApprovalMessage { approval: want.clone(), target }, + }; let mut events = to.events.from_now(); - let want = send_tier1_message(rng, clock, from, to).await.expect("routing info not available"); + let resp = from + .actix + .addr + .send(PeerManagerMessageRequest::NetworkRequests(req).with_span_context()) + .await + .unwrap(); + assert_eq!(NetworkResponses::NoResponse, resp.as_network_response()); let got = events .recv_until(|ev| match ev { - Event::PeerManager(PME::MessageProcessed(tier, PeerMessage::Routed(got))) - if tier == recv_tier => - { + Event::PeerManager(PME::MessageProcessed(tcp::Tier::T1, PeerMessage::Routed(got))) => { Some(got) } _ => None, }) .await; assert_eq!(from.cfg.node_id(), got.author); - assert_eq!(want, got.body); + assert_eq!(RoutedMessageBody::BlockApproval(want), got.body); } /// Send a message over each connection. -async fn test_clique( - rng: &mut Rng, - clock: &time::Clock, - pms: &[&peer_manager::testonly::ActorHandler], -) { +async fn test_clique(rng: &mut Rng, pms: &[&peer_manager::testonly::ActorHandler]) { for from in pms { for to in pms { if from.cfg.node_id() == to.cfg.node_id() { continue; } - send_and_recv_tier1_message(rng, clock, from, to, tcp::Tier::T1).await; + send_tier1_message(rng, from, to).await; } } } @@ -119,7 +101,7 @@ async fn first_proxy_advertisement() { let rng = &mut rng; let mut clock = time::FakeClock::default(); let chain = Arc::new(data::Chain::make(&mut clock, rng, 10)); - let pm = start_pm( + let pm = peer_manager::testonly::start( clock.clock(), near_store::db::TestDB::new(), chain.make_config(rng), @@ -151,7 +133,7 @@ async fn direct_connections() { let mut pms = vec![]; for _ in 0..5 { pms.push( - start_pm( + peer_manager::testonly::start( clock.clock(), near_store::db::TestDB::new(), chain.make_config(rng), @@ -178,7 +160,7 @@ async fn direct_connections() { tracing::info!(target:"test", "Establish connections."); establish_connections(&clock.clock(), &pms[..]).await; tracing::info!(target:"test", "Test clique."); - test_clique(rng, &clock.clock(), &pms[..]).await; + test_clique(rng, &pms[..]).await; } /// Test which spawns N validators, each with 1 proxy. @@ -197,7 +179,7 @@ async fn proxy_connections() { let mut proxies = vec![]; for _ in 0..N { proxies.push( - start_pm( + peer_manager::testonly::start( clock.clock(), near_store::db::TestDB::new(), chain.make_config(rng), @@ -216,13 +198,20 @@ async fn proxy_connections() { peer_id: proxies[i].cfg.node_id(), addr: proxies[i].cfg.node_addr.unwrap(), }]); - validators - .push(start_pm(clock.clock(), near_store::db::TestDB::new(), cfg, chain.clone()).await); + validators.push( + peer_manager::testonly::start( + clock.clock(), + near_store::db::TestDB::new(), + cfg, + chain.clone(), + ) + .await, + ); } let validators: Vec<_> = validators.iter().collect(); // Connect validators and proxies in a star topology. Any connected graph would do. - let hub = start_pm( + let hub = peer_manager::testonly::start( clock.clock(), near_store::db::TestDB::new(), chain.make_config(rng), @@ -249,7 +238,7 @@ async fn proxy_connections() { pm.set_chain_info(chain_info.clone()).await; } establish_connections(&clock.clock(), &all[..]).await; - test_clique(rng, &clock.clock(), &validators[..]).await; + test_clique(rng, &validators[..]).await; } #[tokio::test] @@ -274,7 +263,7 @@ async fn account_keys_change() { pm.set_chain_info(chain_info.clone()).await; } establish_connections(&clock.clock(), &[&v0, &v1, &v2, &hub]).await; - test_clique(rng, &clock.clock(), &[&v0, &v1]).await; + test_clique(rng, &[&v0, &v1]).await; // TIER1 nodes in 2nd epoch are {v0,v2}. let chain_info = peer_manager::testonly::make_chain_info(&chain, &[&v0.cfg, &v2.cfg]); @@ -282,7 +271,7 @@ async fn account_keys_change() { pm.set_chain_info(chain_info.clone()).await; } establish_connections(&clock.clock(), &[&v0, &v1, &v2, &hub]).await; - test_clique(rng, &clock.clock(), &[&v0, &v2]).await; + test_clique(rng, &[&v0, &v2]).await; drop(v0); drop(v1); @@ -324,6 +313,8 @@ async fn proxy_change() { hub.connect_to(&p1.peer_info(), tcp::Tier::T2).await; hub.connect_to(&v0.peer_info(), tcp::Tier::T2).await; hub.connect_to(&v1.peer_info(), tcp::Tier::T2).await; + tracing::info!(target:"dupa","p0 = {}",p0cfg.node_id()); + tracing::info!(target:"dupa","hub = {}",hub.cfg.node_id()); tracing::info!(target:"test", "p0 goes down"); drop(p0); @@ -335,7 +326,7 @@ async fn proxy_change() { tracing::info!(target:"test", "TIER1 connections get established: v0 -> p1 <- v1."); establish_connections(&clock.clock(), &[&v0, &v1, &p1, &hub]).await; tracing::info!(target:"test", "Send message v1 -> v0 over TIER1."); - send_and_recv_tier1_message(rng, &clock.clock(), &v1, &v0, tcp::Tier::T1).await; + send_tier1_message(rng, &v1, &v0).await; // Advance time, so that the new AccountsData has newer timestamp. clock.advance(time::Duration::hours(1)); @@ -349,42 +340,10 @@ async fn proxy_change() { tracing::info!(target:"test", "TIER1 connections get established: v0 -> p0 <- v1."); establish_connections(&clock.clock(), &[&v0, &v1, &p0, &hub]).await; tracing::info!(target:"test", "Send message v1 -> v0 over TIER1."); - send_and_recv_tier1_message(rng, &clock.clock(), &v1, &v0, tcp::Tier::T1).await; + send_tier1_message(rng, &v1, &v0).await; drop(hub); drop(v0); drop(v1); drop(p0); } - -#[tokio::test] -async fn tier2_routing_using_accounts_data() { - init_test_logger(); - let mut rng = make_rng(921853233); - let rng = &mut rng; - let mut clock = time::FakeClock::default(); - let chain = Arc::new(data::Chain::make(&mut clock, rng, 10)); - - tracing::info!(target:"test", "start 2 nodes and connect them"); - let pm0 = start_pm(clock.clock(), TestDB::new(), chain.make_config(rng), chain.clone()).await; - let pm1 = start_pm(clock.clock(), TestDB::new(), chain.make_config(rng), chain.clone()).await; - pm0.connect_to(&pm1.peer_info(), tcp::Tier::T2).await; - - tracing::info!(target:"test", "Try to send a routed message pm0 -> pm1 over TIER2"); - // It should fail due to missing routing information: neither AccountData or AnnounceAccount is - // broadcasted by default in tests. - // TODO(gprusak): send_tier1_message sends an Approval message, which is not a valid message to - // be sent from a non-TIER1 node. Make it more realistic by sending a Transaction message. - assert!(send_tier1_message(rng, &clock.clock(), &pm0, &pm1).await.is_none()); - - tracing::info!(target:"test", "propagate AccountsData"); - let chain_info = peer_manager::testonly::make_chain_info(&chain, &[&pm1.cfg]); - for pm in [&pm0, &pm1] { - pm.set_chain_info(chain_info.clone()).await; - } - let data: HashSet<_> = pm1.tier1_advertise_proxies(&clock.clock()).await.into_iter().collect(); - pm0.wait_for_accounts_data(&data).await; - - tracing::info!(target:"test", "Send a routed message pm0 -> pm1 over TIER2."); - send_and_recv_tier1_message(rng, &clock.clock(), &pm0, &pm1, tcp::Tier::T2).await; -} diff --git a/chain/network/src/routing/graph/mod.rs b/chain/network/src/routing/graph/mod.rs index 663af27cdad..b2120afa0f4 100644 --- a/chain/network/src/routing/graph/mod.rs +++ b/chain/network/src/routing/graph/mod.rs @@ -220,7 +220,10 @@ impl Inner { for peer in next_hops.keys() { self.peer_reachable_at.insert(peer.clone(), now); } - self.prune_unreachable_peers(now - self.config.prune_unreachable_peers_after); + if let Some(unreachable_since) = now.checked_sub(self.config.prune_unreachable_peers_after) + { + self.prune_unreachable_peers(unreachable_since); + } let mut local_edges = HashMap::new(); for e in self.edges.clone().values() { if let Some(other) = e.other(&self.config.node_id) { diff --git a/chain/network/src/stats/metrics.rs b/chain/network/src/stats/metrics.rs index c325ee0dd95..c54842b3764 100644 --- a/chain/network/src/stats/metrics.rs +++ b/chain/network/src/stats/metrics.rs @@ -349,19 +349,6 @@ pub(crate) static ALREADY_CONNECTED_ACCOUNT: Lazy = Lazy::new(|| { .unwrap() }); -pub(crate) static ACCOUNT_TO_PEER_LOOKUPS: Lazy = Lazy::new(|| { - try_create_int_counter_vec( - "near_account_to_peer_lookups", - "number of lookups of peer_id by account_id (for routed messages)", - // Source is either "AnnounceAccount" or "AccountData". - // We want to deprecate AnnounceAccount, so eventually we want all - // lookups to be done via AccountData. For now AnnounceAccount is - // used as a fallback. - &["source"], - ) - .unwrap() -}); - /// Updated the prometheus metrics about the received routed message `msg`. /// `tier` indicates the network over which the message was transmitted. /// `fastest` indicates whether this message is the first copy of `msg` received - diff --git a/chain/rosetta-rpc/Cargo.toml b/chain/rosetta-rpc/Cargo.toml index 7876f388876..69f1129082f 100644 --- a/chain/rosetta-rpc/Cargo.toml +++ b/chain/rosetta-rpc/Cargo.toml @@ -36,3 +36,11 @@ near-o11y = { path = "../../core/o11y" } [dev-dependencies] insta = "1" near-actix-test-utils = { path = "../../test-utils/actix-test-utils" } + +[features] +protocol_feature_nep366_delegate_action = [ + "near-primitives/protocol_feature_nep366_delegate_action" +] +nightly = [ + "protocol_feature_nep366_delegate_action" +] \ No newline at end of file diff --git a/chain/rosetta-rpc/src/adapters/mod.rs b/chain/rosetta-rpc/src/adapters/mod.rs index e93dbec7fef..09716d7ec5a 100644 --- a/chain/rosetta-rpc/src/adapters/mod.rs +++ b/chain/rosetta-rpc/src/adapters/mod.rs @@ -419,6 +419,8 @@ impl From for Vec { ); operations.push(deploy_contract_operation); } + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + near_primitives::transaction::Action::Delegate(_) => todo!(), } } operations diff --git a/core/chain-configs/Cargo.toml b/core/chain-configs/Cargo.toml index dcc9fb90835..59347e863d0 100644 --- a/core/chain-configs/Cargo.toml +++ b/core/chain-configs/Cargo.toml @@ -15,6 +15,7 @@ anyhow.workspace = true chrono.workspace = true derive_more.workspace = true num-rational.workspace = true +once_cell.workspace = true serde.workspace = true serde_json.workspace = true sha2.workspace = true @@ -22,6 +23,7 @@ smart-default.workspace = true tracing.workspace = true near-crypto = { path = "../crypto" } +near-o11y = { path = "../o11y" } near-primitives = { path = "../primitives" } [features] diff --git a/core/chain-configs/src/client_config.rs b/core/chain-configs/src/client_config.rs index ad25750d3a5..6b583138f4e 100644 --- a/core/chain-configs/src/client_config.rs +++ b/core/chain-configs/src/client_config.rs @@ -5,7 +5,10 @@ use std::time::Duration; use serde::{Deserialize, Serialize}; -use near_primitives::types::{AccountId, BlockHeightDelta, Gas, NumBlocks, NumSeats, ShardId}; +use crate::MutableConfigValue; +use near_primitives::types::{ + AccountId, BlockHeight, BlockHeightDelta, Gas, NumBlocks, NumSeats, ShardId, +}; use near_primitives::version::Version; pub const TEST_STATE_SYNC_TIMEOUT: u64 = 5; @@ -70,7 +73,8 @@ impl GCConfig { } } -#[derive(Clone, Serialize, Deserialize)] +/// ClientConfig where some fields can be updated at runtime. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct ClientConfig { /// Version of the binary. pub version: Version, @@ -78,6 +82,8 @@ pub struct ClientConfig { pub chain_id: String, /// Listening rpc port for status. pub rpc_addr: Option, + /// Graceful shutdown at expected block height. + pub expected_shutdown: MutableConfigValue>, /// Duration to check for producing / skipping block. pub block_production_tracking_delay: Duration, /// Minimum duration before producing block. @@ -182,10 +188,11 @@ impl ClientConfig { because non-archival nodes must save trie changes in order to do do garbage collection." ); - ClientConfig { + Self { version: Default::default(), chain_id: "unittest".to_string(), rpc_addr: Some("0.0.0.0:3030".to_string()), + expected_shutdown: MutableConfigValue::new(None, "expected_shutdown"), block_production_tracking_delay: Duration::from_millis(std::cmp::max( 10, min_block_prod_time / 5, diff --git a/core/chain-configs/src/lib.rs b/core/chain-configs/src/lib.rs index ab2fcb53ca7..77925f77c37 100644 --- a/core/chain-configs/src/lib.rs +++ b/core/chain-configs/src/lib.rs @@ -1,6 +1,8 @@ mod client_config; mod genesis_config; pub mod genesis_validate; +mod metrics; +mod updateable_config; pub use client_config::{ ClientConfig, GCConfig, LogSummaryStyle, DEFAULT_GC_NUM_EPOCHS_TO_KEEP, @@ -10,3 +12,4 @@ pub use genesis_config::{ get_initial_supply, stream_records_from_file, Genesis, GenesisChangeConfig, GenesisConfig, GenesisRecords, GenesisValidationMode, ProtocolConfig, ProtocolConfigView, }; +pub use updateable_config::{MutableConfigValue, UpdateableClientConfig}; diff --git a/core/chain-configs/src/metrics.rs b/core/chain-configs/src/metrics.rs new file mode 100644 index 00000000000..926a893dc12 --- /dev/null +++ b/core/chain-configs/src/metrics.rs @@ -0,0 +1,11 @@ +use near_o11y::metrics::{try_create_int_gauge_vec, IntGaugeVec}; +use once_cell::sync::Lazy; + +pub static CONFIG_MUTABLE_FIELD: Lazy = Lazy::new(|| { + try_create_int_gauge_vec( + "near_config_mutable_field", + "Timestamp and value of a mutable config field", + &["field_name", "timestamp", "value"], + ) + .unwrap() +}); diff --git a/core/chain-configs/src/updateable_config.rs b/core/chain-configs/src/updateable_config.rs new file mode 100644 index 00000000000..ca140beae74 --- /dev/null +++ b/core/chain-configs/src/updateable_config.rs @@ -0,0 +1,76 @@ +use crate::metrics; +use chrono::{DateTime, Utc}; +use near_primitives::time::Clock; +use near_primitives::types::BlockHeight; +use serde::{Deserialize, Serialize}; +use std::fmt::Debug; +use std::sync::{Arc, Mutex}; + +/// A wrapper for a config value that can be updated while the node is running. +/// When initializing sub-objects (e.g. `ShardsManager`), please make sure to +/// pass this wrapper instead of passing a value from a single moment in time. +/// See `expected_shutdown` for an example how to use it. +/// TODO: custom implementation for Serialize and Deserialize s.t. only value is necessary(JIRA:ND-283) +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct MutableConfigValue { + value: Arc>, + // For metrics. + // Mutable config values are exported to prometheus with labels [field_name][last_update][value]. + field_name: String, + // For metrics. + // Mutable config values are exported to prometheus with labels [field_name][last_update][value]. + last_update: DateTime, +} + +impl MutableConfigValue { + /// Initializes a value. + /// `field_name` is needed to export the config value as a prometheus metric. + pub fn new(val: T, field_name: &str) -> Self { + let res = Self { + value: Arc::new(Mutex::new(val)), + field_name: field_name.to_string(), + last_update: Clock::utc(), + }; + res.set_metric_value(val, 1); + res + } + + pub fn get(&self) -> T { + *self.value.lock().unwrap() + } + + pub fn update(&self, val: T) { + let mut lock = self.value.lock().unwrap(); + if *lock != val { + tracing::info!(target: "config", "Updated config field '{}' from {:?} to {:?}", self.field_name, *lock, val); + self.set_metric_value(*lock, 0); + *lock = val; + self.set_metric_value(val, 1); + } else { + tracing::info!(target: "config", "Mutable config field '{}' remains the same: {:?}", self.field_name, val); + } + } + + fn set_metric_value(&self, value: T, metric_value: i64) { + // Use field_name as a label to tell different mutable config values apart. + // Use timestamp as a label to give some idea to the node operator (or + // people helping them debug their node) when exactly and what values + // exactly were part of the config. + // Use the config value as a label to make this work with config values + // of any type: int, float, string or even a composite object. + metrics::CONFIG_MUTABLE_FIELD + .with_label_values(&[ + &self.field_name, + &self.last_update.timestamp().to_string(), + &format!("{:?}", value), + ]) + .set(metric_value); + } +} + +#[derive(Default, Clone, Serialize, Deserialize)] +/// A subset of Config that can be updated white the node is running. +pub struct UpdateableClientConfig { + /// Graceful shutdown at expected block height. + pub expected_shutdown: Option, +} diff --git a/core/dyn-configs/Cargo.toml b/core/dyn-configs/Cargo.toml index 715ff42478b..cc0876aa9cc 100644 --- a/core/dyn-configs/Cargo.toml +++ b/core/dyn-configs/Cargo.toml @@ -11,6 +11,15 @@ repository = "https://github.com/near/nearcore" description = "Dynamic configure helpers for the near codebase" [dependencies] +anyhow.workspace = true once_cell.workspace = true prometheus.workspace = true +serde.workspace = true +serde_json.workspace = true +thiserror.workspace = true +tokio.workspace = true +tracing.workspace = true + +near-chain-configs = { path = "../chain-configs" } near-o11y = { path = "../o11y" } +near-primitives = { path = "../primitives" } diff --git a/core/dyn-configs/README.md b/core/dyn-configs/README.md index b6ba607094f..b996d7441e9 100644 --- a/core/dyn-configs/README.md +++ b/core/dyn-configs/README.md @@ -1,5 +1,26 @@ -Dynamic config helpers for the NEAR codebase. +Dynamic config helpers for the NEAR codebase. -This crate contains all utilities to dynamic control neard. +This crate contains utilities that allow to reconfigure the node while it is running. -- `EXPECTED_SHUTDOWN_AT`: the specified block height neard will gracefully shutdown at. +## How to: + +### Logging and tracing + +Make changes to `log_config.json` and send `SIGHUP` signal to the `neard` process. + +### Other config values + +Makes changes to `config.json` and send `SIGHUP` signal to the `neard` process. + +#### Fields of config that can be changed while the node is running: + +- `expected_shutdown`: the specified block height neard will gracefully shutdown at. + +#### Changing other fields of `config.json` + +The changes to other fields of `config.json` will be silently ignored as long as +`config.json` remains a valid json object and passes internal validation. + +Please be careful about making changes to `config.json` because when a node +starts (or restarts), it checks the validity of the config files and crashes if +detects any issues. diff --git a/core/dyn-configs/src/lib.rs b/core/dyn-configs/src/lib.rs index 4ad7cc75e0e..d0126380cc3 100644 --- a/core/dyn-configs/src/lib.rs +++ b/core/dyn-configs/src/lib.rs @@ -1,28 +1,73 @@ #![doc = include_str!("../README.md")] -use near_o11y::metrics::{try_create_int_counter, IntCounter}; -use once_cell::sync::Lazy; -use std::sync::atomic::{AtomicU64, Ordering}; - -/// An indicator for dynamic config changes -pub static DYN_CONFIG_CHANGE: Lazy = Lazy::new(|| { - try_create_int_counter( - "near_dynamic_config_changes", - "Total number of dynamic configuration changes", - ) - .unwrap() -}); - -// NOTE: AtomicU64 is the same unit as BlockHeight, and use to store the expected blockheight to -// shutdown -pub static EXPECTED_SHUTDOWN_AT: AtomicU64 = AtomicU64::new(0); - -/// Reload the dynamic config, and increase the counting metric near_dynamic_config_changes -pub fn reload(expected_shutdown: Option) { - if let Some(expected_shutdown) = expected_shutdown { - EXPECTED_SHUTDOWN_AT.store(expected_shutdown, Ordering::Relaxed); - } else { - EXPECTED_SHUTDOWN_AT.store(0, Ordering::Relaxed); +use near_chain_configs::UpdateableClientConfig; +use near_o11y::log_config::LogConfig; +use near_primitives::time::Clock; +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; +use std::sync::Arc; +use tokio::sync::broadcast::Sender; + +mod metrics; + +#[derive(Serialize, Deserialize, Clone, Default)] +/// Contains the latest state of configs which can be updated at runtime. +pub struct UpdateableConfigs { + /// Contents of the file LOG_CONFIG_FILENAME. + pub log_config: Option, + /// Contents of the `config.json` corresponding to the mutable fields of `ClientConfig`. + pub client_config: Option, +} + +/// Pushes the updates to listeners. +#[derive(Default)] +pub struct UpdateableConfigLoader { + /// Notifies receivers about the new config values available. + tx: Option>>>, +} + +#[derive(thiserror::Error, Debug)] +#[non_exhaustive] +pub enum UpdateableConfigLoaderError { + #[error("Failed to parse a dynamic config file {file:?}: {err:?}")] + Parse { file: PathBuf, err: serde_json::Error }, + #[error("Can't open or read a dynamic config file {file:?}: {err:?}")] + OpenAndRead { file: PathBuf, err: std::io::Error }, + #[error("Can't open or read the config file {file:?}: {err:?}")] + ConfigFileError { file: PathBuf, err: anyhow::Error }, + #[error("One or multiple dynamic config files reload errors {0:?}")] + Errors(Vec), + #[error("No home dir set")] + NoHomeDir(), +} + +impl UpdateableConfigLoader { + pub fn new( + updateable_configs: UpdateableConfigs, + tx: Sender>>, + ) -> Self { + let mut result = Self { tx: Some(tx) }; + result.reload(Ok(updateable_configs)); + result + } + + pub fn reload( + &mut self, + updateable_configs: Result, + ) { + match updateable_configs { + Ok(updateable_configs) => { + self.tx.as_ref().map(|tx| tx.send(Ok(updateable_configs.clone()))); + Self::update_metrics(); + } + Err(err) => { + self.tx.as_ref().map(|tx| tx.send(Err(Arc::new(err)))); + } + } + } + + fn update_metrics() { + metrics::CONFIG_RELOAD_TIMESTAMP.set(Clock::utc().timestamp()); + metrics::CONFIG_RELOADS.inc(); } - DYN_CONFIG_CHANGE.inc(); } diff --git a/core/dyn-configs/src/metrics.rs b/core/dyn-configs/src/metrics.rs new file mode 100644 index 00000000000..55442c3d230 --- /dev/null +++ b/core/dyn-configs/src/metrics.rs @@ -0,0 +1,18 @@ +use near_o11y::metrics::{try_create_int_counter, try_create_int_gauge, IntCounter, IntGauge}; +use once_cell::sync::Lazy; + +pub static CONFIG_RELOADS: Lazy = Lazy::new(|| { + try_create_int_counter( + "near_config_reloads_total", + "Number of times the configs were reloaded during the current run of the process", + ) + .unwrap() +}); + +pub static CONFIG_RELOAD_TIMESTAMP: Lazy = Lazy::new(|| { + try_create_int_gauge( + "near_config_reload_timestamp_seconds", + "Timestamp of the last reload of the config", + ) + .unwrap() +}); diff --git a/core/o11y/src/lib.rs b/core/o11y/src/lib.rs index 7674237de0d..a678a3314c4 100644 --- a/core/o11y/src/lib.rs +++ b/core/o11y/src/lib.rs @@ -27,6 +27,7 @@ use tracing_subscriber::{fmt, reload, EnvFilter, Layer, Registry}; /// Custom tracing subscriber implementation that produces IO traces. pub mod context; mod io_tracer; +pub mod log_config; pub mod macros; pub mod metrics; pub mod pretty; @@ -80,6 +81,7 @@ static DEFAULT_OTLP_LEVEL: OnceCell = OnceCell::new(); /// The default value for the `RUST_LOG` environment variable if one isn't specified otherwise. pub const DEFAULT_RUST_LOG: &str = "tokio_reactor=info,\ + config=info,\ near=info,\ recompress=info,\ stats=info,\ @@ -358,6 +360,12 @@ pub fn default_subscriber( } } +pub fn set_default_otlp_level(options: &Options) { + // Record the initial tracing level specified as a command-line flag. Use this recorded value to + // reset opentelemetry filter when the LogConfig file gets deleted. + DEFAULT_OTLP_LEVEL.set(options.opentelemetry).unwrap(); +} + /// Constructs a subscriber set to the option appropriate for the NEAR code. /// /// The subscriber enables logging, tracing and io tracing. @@ -378,9 +386,7 @@ pub async fn default_subscriber_with_opentelemetry( let subscriber = tracing_subscriber::registry(); - // Record the initial tracing level specified as a command-line flag. Use this recorded value to - // reset opentelemetry filter when the LogConfig file gets deleted. - DEFAULT_OTLP_LEVEL.set(options.opentelemetry).unwrap(); + set_default_otlp_level(options); let (subscriber, handle) = add_non_blocking_log_layer( env_filter, @@ -440,6 +446,20 @@ pub enum ReloadError { Parse(#[source] BuildEnvFilterError), } +pub fn reload_log_config(config: Option<&log_config::LogConfig>) -> Result<(), Vec> { + if let Some(config) = config { + reload( + config.rust_log.as_ref().map(|s| s.as_str()), + config.verbose_module.as_ref().map(|s| s.as_str()), + config.opentelemetry_level, + ) + } else { + // When the LOG_CONFIG_FILENAME is not available, reset to the tracing and logging config + // when the node was started. + reload(None, None, None) + } +} + /// Constructs new filters for the logging and opentelemetry layers. /// /// Attempts to reload all available errors. Returns errors for each layer that failed to reload. @@ -457,8 +477,10 @@ pub fn reload( let log_reload_result = LOG_LAYER_RELOAD_HANDLE.get().map_or( Err(ReloadError::NoLogReloadHandle), |reload_handle| { - let mut builder = - rust_log.map_or_else(EnvFilterBuilder::from_env, EnvFilterBuilder::new); + let mut builder = rust_log.map_or_else( + || EnvFilterBuilder::from_env(), + |rust_log| EnvFilterBuilder::new(rust_log), + ); if let Some(module) = verbose_module { builder = builder.verbose(Some(module)); } diff --git a/core/o11y/src/log_config.rs b/core/o11y/src/log_config.rs new file mode 100644 index 00000000000..46845bffe63 --- /dev/null +++ b/core/o11y/src/log_config.rs @@ -0,0 +1,13 @@ +use serde::{Deserialize, Serialize}; + +/// Configures logging. +#[derive(Default, Serialize, Deserialize, Clone, Debug)] +pub struct LogConfig { + /// Comma-separated list of EnvFitler directives. + pub rust_log: Option, + /// Some("") enables global debug logging. + /// Some("module") enables debug logging for "module". + pub verbose_module: Option, + /// Verbosity level of collected traces. + pub opentelemetry_level: Option, +} diff --git a/core/primitives-core/Cargo.toml b/core/primitives-core/Cargo.toml index 617391b1602..52e6e197bf1 100644 --- a/core/primitives-core/Cargo.toml +++ b/core/primitives-core/Cargo.toml @@ -35,4 +35,5 @@ insta.workspace = true [features] default = [] protocol_feature_ed25519_verify = [] -nightly = ["protocol_feature_ed25519_verify"] +protocol_feature_nep366_delegate_action = [] +nightly = ["protocol_feature_ed25519_verify", "protocol_feature_nep366_delegate_action"] diff --git a/core/primitives-core/src/config.rs b/core/primitives-core/src/config.rs index 7e230769592..0190b89b24a 100644 --- a/core/primitives-core/src/config.rs +++ b/core/primitives-core/src/config.rs @@ -473,6 +473,8 @@ pub enum ActionCosts { new_action_receipt = 12, new_data_receipt_base = 13, new_data_receipt_byte = 14, + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + delegate = 15, } impl ExtCosts { diff --git a/core/primitives-core/src/parameter.rs b/core/primitives-core/src/parameter.rs index 547dbe4900b..0bb7117c10b 100644 --- a/core/primitives-core/src/parameter.rs +++ b/core/primitives-core/src/parameter.rs @@ -18,10 +18,8 @@ use crate::config::ActionCosts; #[strum(serialize_all = "snake_case")] pub enum Parameter { // Gas economics config - BurntGasRewardNumerator, - BurntGasRewardDenominator, - PessimisticGasPriceInflationNumerator, - PessimisticGasPriceInflationDenominator, + BurntGasReward, + PessimisticGasPriceInflation, // Account creation config MinAllowedTopLevelAccountLength, @@ -81,6 +79,9 @@ pub enum Parameter { ActionDeleteKeySendSir, ActionDeleteKeySendNotSir, ActionDeleteKeyExecution, + ActionDelegateSendSir, + ActionDelegateSendNotSir, + ActionDelegateExecution, // Smart contract dynamic gas costs WasmRegularOpCost, @@ -207,6 +208,7 @@ pub enum FeeParameter { ActionAddFunctionCallKey, ActionAddFunctionCallKeyPerByte, ActionDeleteKey, + ActionDelegate, } impl Parameter { @@ -252,6 +254,8 @@ impl From for FeeParameter { match other { ActionCosts::create_account => Self::ActionCreateAccount, ActionCosts::delete_account => Self::ActionDeleteAccount, + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + ActionCosts::delegate => Self::ActionDelegate, ActionCosts::deploy_contract_base => Self::ActionDeployContract, ActionCosts::deploy_contract_byte => Self::ActionDeployContractPerByte, ActionCosts::function_call_base => Self::ActionFunctionCall, diff --git a/core/primitives-core/src/runtime/fees.rs b/core/primitives-core/src/runtime/fees.rs index 29acd6b3182..ee80be01e4e 100644 --- a/core/primitives-core/src/runtime/fees.rs +++ b/core/primitives-core/src/runtime/fees.rs @@ -7,7 +7,7 @@ use enum_map::EnumMap; use serde::{Deserialize, Serialize}; use crate::config::ActionCosts; -use crate::num_rational::Rational; +use crate::num_rational::Rational32; use crate::types::{Balance, Gas}; /// Costs associated with an object that can only be sent over the network (and executed @@ -54,10 +54,10 @@ pub struct RuntimeFeesConfig { pub storage_usage_config: StorageUsageConfig, /// Fraction of the burnt gas to reward to the contract account for execution. - pub burnt_gas_reward: Rational, + pub burnt_gas_reward: Rational32, /// Pessimistic gas price inflation ratio. - pub pessimistic_gas_price_inflation_ratio: Rational, + pub pessimistic_gas_price_inflation_ratio: Rational32, } /// Describes the cost of creating a data receipt, `DataReceipt`. @@ -108,6 +108,9 @@ pub struct ActionCreationConfig { /// Base cost of deleting an account. pub delete_account_cost: Fee, + + /// Base cost of a delegate action + pub delegate_cost: Fee, } /// Describes the cost of creating an access key. @@ -142,8 +145,8 @@ impl RuntimeFeesConfig { pub fn test() -> Self { Self { storage_usage_config: StorageUsageConfig::test(), - burnt_gas_reward: Rational::new(3, 10), - pessimistic_gas_price_inflation_ratio: Rational::new(103, 100), + burnt_gas_reward: Rational32::new(3, 10), + pessimistic_gas_price_inflation_ratio: Rational32::new(103, 100), action_fees: enum_map::enum_map! { ActionCosts::create_account => Fee { send_sir: 99607375000, @@ -220,6 +223,12 @@ impl RuntimeFeesConfig { send_not_sir: 59357464, execution: 59357464, }, + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + ActionCosts::delegate => Fee { + send_sir: 2319861500000, + send_not_sir: 2319861500000, + execution: 2319861500000, + }, }, } } @@ -230,8 +239,8 @@ impl RuntimeFeesConfig { _ => Fee { send_sir: 0, send_not_sir: 0, execution: 0 } }, storage_usage_config: StorageUsageConfig::free(), - burnt_gas_reward: Rational::from_integer(0), - pessimistic_gas_price_inflation_ratio: Rational::from_integer(0), + burnt_gas_reward: Rational32::from_integer(0), + pessimistic_gas_price_inflation_ratio: Rational32::from_integer(0), } } diff --git a/core/primitives/Cargo.toml b/core/primitives/Cargo.toml index ce1227eda89..484a2e21c69 100644 --- a/core/primitives/Cargo.toml +++ b/core/primitives/Cargo.toml @@ -52,12 +52,16 @@ protocol_feature_ed25519_verify = [ "near-primitives-core/protocol_feature_ed25519_verify" ] protocol_feature_zero_balance_account = [] +protocol_feature_nep366_delegate_action = [ + "near-primitives-core/protocol_feature_nep366_delegate_action" +] nightly = [ "nightly_protocol", "protocol_feature_fix_staking_threshold", "protocol_feature_fix_contract_loading_cost", "protocol_feature_reject_blocks_with_outdated_protocol_version", "protocol_feature_ed25519_verify", + "protocol_feature_nep366_delegate_action", "protocol_feature_zero_balance_account" ] diff --git a/core/primitives/res/runtime_configs/parameters.yaml b/core/primitives/res/runtime_configs/parameters.yaml index f6224056621..139c8dbeb42 100644 --- a/core/primitives/res/runtime_configs/parameters.yaml +++ b/core/primitives/res/runtime_configs/parameters.yaml @@ -3,10 +3,14 @@ # The diffs are stored in files named `NN.txt`, where `NN` is the version. # Gas economics config -burnt_gas_reward_numerator: 3 -burnt_gas_reward_denominator: 10 -pessimistic_gas_price_inflation_numerator: 103 -pessimistic_gas_price_inflation_denominator: 100 +burnt_gas_reward: { + numerator: 3, + denominator: 10, +} +pessimistic_gas_price_inflation: { + numerator: 103, + denominator: 100, +} # Account creation config min_allowed_top_level_account_length: 32 @@ -66,6 +70,10 @@ action_add_function_call_key_per_byte_execution: 1_925_331 action_delete_key_send_sir: 94_946_625_000 action_delete_key_send_not_sir: 94_946_625_000 action_delete_key_execution: 94_946_625_000 +# TODO: place-holder values, needs estimation, tracked in #8114 +action_delegate_send_sir: 2_319_861_500_000 +action_delegate_send_not_sir: 2_319_861_500_000 +action_delegate_execution: 2_319_861_500_000 # Smart contract dynamic gas costs wasm_regular_op_cost: 3_856_371 diff --git a/core/primitives/res/runtime_configs/parameters_testnet.yaml b/core/primitives/res/runtime_configs/parameters_testnet.yaml index 84c72591272..489af03f731 100644 --- a/core/primitives/res/runtime_configs/parameters_testnet.yaml +++ b/core/primitives/res/runtime_configs/parameters_testnet.yaml @@ -1,8 +1,12 @@ # Gas economics config -burnt_gas_reward_numerator: 3 -burnt_gas_reward_denominator: 10 -pessimistic_gas_price_inflation_numerator: 103 -pessimistic_gas_price_inflation_denominator: 100 +burnt_gas_reward: { + numerator: 3, + denominator: 10, +} +pessimistic_gas_price_inflation: { + numerator: 103, + denominator: 100, +} # Account creation config min_allowed_top_level_account_length: 0 @@ -62,6 +66,10 @@ action_add_function_call_key_per_byte_execution: 1_925_331 action_delete_key_send_sir: 94_946_625_000 action_delete_key_send_not_sir: 94_946_625_000 action_delete_key_execution: 94_946_625_000 +# TODO: place-holder values, needs estimation, tracked in #8114 +action_delegate_send_sir: 2_319_861_500_000 +action_delegate_send_not_sir: 2_319_861_500_000 +action_delegate_execution: 2_319_861_500_000 # Smart contract dynamic gas costs wasm_regular_op_cost: 3_856_371 diff --git a/core/primitives/src/errors.rs b/core/primitives/src/errors.rs index 8354409b1f7..d5a98b8cb3f 100644 --- a/core/primitives/src/errors.rs +++ b/core/primitives/src/errors.rs @@ -198,6 +198,10 @@ pub enum ActionsValidationError { UnsuitableStakingKey { public_key: PublicKey }, /// The attached amount of gas in a FunctionCall action has to be a positive number. FunctionCallZeroAttachedGas, + /// DelegateAction actions contain another DelegateAction. This is not allowed. + DelegateActionCantContainNestedOne, + /// There should be the only one DelegateAction + DelegateActionMustBeOnlyOne, } /// Describes the error for validating a receipt. @@ -314,6 +318,14 @@ impl Display for ActionsValidationError { f, "The attached amount of gas in a FunctionCall action has to be a positive number", ), + ActionsValidationError::DelegateActionCantContainNestedOne => write!( + f, + "DelegateAction must not contain another DelegateAction" + ), + ActionsValidationError::DelegateActionMustBeOnlyOne => write!( + f, + "The actions can contain the ony one DelegateAction" + ) } } } @@ -397,6 +409,18 @@ pub enum ActionErrorKind { OnlyImplicitAccountCreationAllowed { account_id: AccountId }, /// Delete account whose state is large is temporarily banned. DeleteAccountWithLargeState { account_id: AccountId }, + /// Signature does not match the provided actions and given signer public key. + DelegateActionInvalidSignature, + /// Receiver of the transaction doesn't match Sender of the delegate action + DelegateActionSenderDoesNotMatchTxReceiver { sender_id: AccountId, receiver_id: AccountId }, + /// Delegate action has expired. `max_block_height` is less than actual block height. + DelegateActionExpired, + /// The given public key doesn't exist for Sender account + DelegateActionAccessKeyError(InvalidAccessKeyError), + /// DelegateAction nonce must be greater sender[public_key].nonce + DelegateActionInvalidNonce { delegate_nonce: Nonce, ak_nonce: Nonce }, + /// DelegateAction nonce is larger than the upper bound given by the block height + DelegateActionNonceTooLarge { delegate_nonce: Nonce, upper_bound: Nonce }, } impl From for ActionError { @@ -707,6 +731,12 @@ impl Display for ActionErrorKind { ActionErrorKind::InsufficientStake { account_id, stake, minimum_stake } => write!(f, "Account {} tries to stake {} but minimum required stake is {}", account_id, stake, minimum_stake), ActionErrorKind::OnlyImplicitAccountCreationAllowed { account_id } => write!(f, "CreateAccount action is called on hex-characters account of length 64 {}", account_id), ActionErrorKind::DeleteAccountWithLargeState { account_id } => write!(f, "The state of account {} is too large and therefore cannot be deleted", account_id), + ActionErrorKind::DelegateActionInvalidSignature => write!(f, "DelegateAction is not signed with the given public key"), + ActionErrorKind::DelegateActionSenderDoesNotMatchTxReceiver { sender_id, receiver_id } => write!(f, "Transaction receiver {} doesn't match DelegateAction sender {}", receiver_id, sender_id), + ActionErrorKind::DelegateActionExpired => write!(f, "DelegateAction has expired"), + ActionErrorKind::DelegateActionAccessKeyError(access_key_error) => Display::fmt(&access_key_error, f), + ActionErrorKind::DelegateActionInvalidNonce { delegate_nonce, ak_nonce } => write!(f, "DelegateAction nonce {} must be larger than nonce of the used access key {}", delegate_nonce, ak_nonce), + ActionErrorKind::DelegateActionNonceTooLarge { delegate_nonce, upper_bound } => write!(f, "DelegateAction nonce {} must be smaller than the access key nonce upper bound {}", delegate_nonce, upper_bound), } } } diff --git a/core/primitives/src/runtime/parameter_table.rs b/core/primitives/src/runtime/parameter_table.rs index a61ba17dd1d..899093e29a0 100644 --- a/core/primitives/src/runtime/parameter_table.rs +++ b/core/primitives/src/runtime/parameter_table.rs @@ -1,57 +1,95 @@ use super::config::{AccountCreationConfig, RuntimeConfig}; +use near_primitives_core::account::id::ParseAccountError; use near_primitives_core::config::{ExtCostsConfig, VMConfig}; use near_primitives_core::parameter::{FeeParameter, Parameter}; use near_primitives_core::runtime::fees::{RuntimeFeesConfig, StorageUsageConfig}; -use num_rational::Rational; -use serde::de::DeserializeOwned; -use serde_json::json; -use std::any::Any; +use near_primitives_core::types::AccountId; +use num_rational::Rational32; use std::collections::BTreeMap; +/// Represents values supported by parameter config. +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug, PartialEq)] +#[serde(untagged)] +pub(crate) enum ParameterValue { + U64(u64), + Rational { numerator: i32, denominator: i32 }, + String(String), +} + +impl ParameterValue { + fn as_u64(&self) -> Option { + match self { + ParameterValue::U64(v) => Some(*v), + _ => None, + } + } + + fn as_rational(&self) -> Option { + match self { + ParameterValue::Rational { numerator, denominator } => { + Some(Rational32::new(*numerator, *denominator)) + } + _ => None, + } + } + + fn as_u128(&self) -> Option { + match self { + ParameterValue::U64(v) => Some(u128::from(*v)), + // TODO(akashin): Refactor this to use `TryFrom` and properly propagate an error. + ParameterValue::String(s) => s.parse().ok(), + _ => None, + } + } + + fn as_str(&self) -> Option<&str> { + match self { + ParameterValue::String(v) => Some(v), + _ => None, + } + } +} + pub(crate) struct ParameterTable { - parameters: BTreeMap, + parameters: BTreeMap, } /// Changes made to parameters between versions. pub(crate) struct ParameterTableDiff { - parameters: BTreeMap, + parameters: BTreeMap, Option)>, } -/// Error returned by ParameterTable::from_txt() that parses a runtime -/// configuration TXT file. +/// Error returned by ParameterTable::from_str() that parses a runtime configuration YAML file. #[derive(thiserror::Error, Debug)] pub(crate) enum InvalidConfigError { #[error("could not parse `{1}` as a parameter")] UnknownParameter(#[source] strum::ParseError, String), #[error("could not parse `{1}` as a value")] - ValueParseError(#[source] serde_json::Error, String), - #[error("intermediate JSON created by parser does not match `RuntimeConfig`")] - WrongStructure(#[source] serde_json::Error), + ValueParseError(#[source] serde_yaml::Error, String), #[error("could not parse YAML that defines the structure of the config")] InvalidYaml(#[source] serde_yaml::Error), - #[error("config diff expected to contain old value `{1}` for parameter `{0}`")] - OldValueExists(Parameter, String), + #[error("config diff expected to contain old value `{1:?}` for parameter `{0}`")] + OldValueExists(Parameter, ParameterValue), #[error( - "unexpected old value `{1}` for parameter `{0}` in config diff, previous version does not have such a value" + "unexpected old value `{1:?}` for parameter `{0}` in config diff, previous version does not have such a value" )] - NoOldValueExists(Parameter, String), - #[error("expected old value `{1}` but found `{2}` for parameter `{0}` in config diff")] - WrongOldValue(Parameter, String, String), + NoOldValueExists(Parameter, ParameterValue), + #[error("expected old value `{1:?}` but found `{2:?}` for parameter `{0}` in config diff")] + WrongOldValue(Parameter, ParameterValue, ParameterValue), #[error("expected a value for `{0}` but found none")] MissingParameter(Parameter), - #[error("expected a value of type `{2}` for `{1}` but could not parse it from `{3}`")] - WrongValueType(#[source] serde_json::Error, Parameter, &'static str, String), + #[error("expected a value of type `{1}` for `{0}` but could not parse it from `{2:?}`")] + WrongValueType(Parameter, &'static str, ParameterValue), + #[error("expected an integer of type `{2}` for `{1}` but could not parse it from `{3}`")] + WrongIntegerType(#[source] std::num::TryFromIntError, Parameter, &'static str, u64), + #[error("expected an account id for `{1}` but could not parse it from `{2}`")] + WrongAccountId(#[source] ParseAccountError, Parameter, String), } impl std::str::FromStr for ParameterTable { type Err = InvalidConfigError; fn from_str(arg: &str) -> Result { - // TODO(#8320): Remove this after migration to `serde_yaml` 0.9 that supports empty strings. - if arg.is_empty() { - return Ok(ParameterTable { parameters: BTreeMap::new() }); - } - - let yaml_map: BTreeMap = + let yaml_map: BTreeMap = serde_yaml::from_str(arg).map_err(|err| InvalidConfigError::InvalidYaml(err))?; let parameters = yaml_map @@ -60,7 +98,7 @@ impl std::str::FromStr for ParameterTable { let typed_key: Parameter = key .parse() .map_err(|err| InvalidConfigError::UnknownParameter(err, key.to_owned()))?; - Ok((typed_key, parse_parameter_txt_value(value)?)) + Ok((typed_key, parse_parameter_value(value)?)) }) .collect::, _>>()?; @@ -75,38 +113,33 @@ impl TryFrom<&ParameterTable> for RuntimeConfig { Ok(RuntimeConfig { fees: RuntimeFeesConfig { action_fees: enum_map::enum_map! { - action_cost => params.fee(action_cost) + action_cost => params.get_fee(action_cost)? }, - burnt_gas_reward: Rational::new( - params.get_parsed(Parameter::BurntGasRewardNumerator)?, - params.get_parsed(Parameter::BurntGasRewardDenominator)?, - ), - pessimistic_gas_price_inflation_ratio: Rational::new( - params.get_parsed(Parameter::PessimisticGasPriceInflationNumerator)?, - params.get_parsed(Parameter::PessimisticGasPriceInflationDenominator)?, - ), + burnt_gas_reward: params.get_rational(Parameter::BurntGasReward)?, + pessimistic_gas_price_inflation_ratio: params + .get_rational(Parameter::PessimisticGasPriceInflation)?, storage_usage_config: StorageUsageConfig { storage_amount_per_byte: params.get_u128(Parameter::StorageAmountPerByte)?, - num_bytes_account: params.get_parsed(Parameter::StorageNumBytesAccount)?, + num_bytes_account: params.get_number(Parameter::StorageNumBytesAccount)?, num_extra_bytes_record: params - .get_parsed(Parameter::StorageNumExtraBytesRecord)?, + .get_number(Parameter::StorageNumExtraBytesRecord)?, }, }, wasm_config: VMConfig { ext_costs: ExtCostsConfig { costs: enum_map::enum_map! { - cost => params.get_parsed(cost.param())? + cost => params.get_number(cost.param())? }, }, - grow_mem_cost: params.get_parsed(Parameter::WasmGrowMemCost)?, - regular_op_cost: params.get_parsed(Parameter::WasmRegularOpCost)?, - limit_config: serde_json::from_value(params.json_map(Parameter::vm_limits(), "")) - .map_err(InvalidConfigError::WrongStructure)?, + grow_mem_cost: params.get_number(Parameter::WasmGrowMemCost)?, + regular_op_cost: params.get_number(Parameter::WasmRegularOpCost)?, + limit_config: serde_yaml::from_value(params.yaml_map(Parameter::vm_limits(), "")) + .map_err(InvalidConfigError::InvalidYaml)?, }, account_creation_config: AccountCreationConfig { min_allowed_top_level_account_length: params - .get_parsed(Parameter::MinAllowedTopLevelAccountLength)?, - registrar_account_id: params.get_parsed(Parameter::RegistrarAccountId)?, + .get_number(Parameter::MinAllowedTopLevelAccountLength)?, + registrar_account_id: params.get_account_id(Parameter::RegistrarAccountId)?, }, }) } @@ -118,117 +151,143 @@ impl ParameterTable { diff: ParameterTableDiff, ) -> Result<(), InvalidConfigError> { for (key, (before, after)) in diff.parameters { - if before.is_null() { - match self.parameters.get(&key) { - Some(serde_json::Value::Null) | None => { - self.parameters.insert(key, after); - } - Some(old_value) => { - return Err(InvalidConfigError::OldValueExists(key, old_value.to_string())) - } + let old_value = self.parameters.get(&key); + if old_value != before.as_ref() { + if old_value.is_none() { + return Err(InvalidConfigError::NoOldValueExists(key, before.unwrap().clone())); } - } else { - match self.parameters.get(&key) { - Some(serde_json::Value::Null) | None => { - return Err(InvalidConfigError::NoOldValueExists(key, before.to_string())) - } - Some(old_value) => { - if *old_value != before { - return Err(InvalidConfigError::WrongOldValue( - key, - old_value.to_string(), - before.to_string(), - )); - } else { - self.parameters.insert(key, after); - } - } + if before.is_none() { + return Err(InvalidConfigError::OldValueExists( + key, + old_value.unwrap().clone(), + )); } + return Err(InvalidConfigError::WrongOldValue( + key, + old_value.unwrap().clone(), + before.unwrap().clone(), + )); + } + + if let Some(new_value) = after { + self.parameters.insert(key, new_value); + } else { + self.parameters.remove(&key); } } Ok(()) } - fn json_map( + fn yaml_map( &self, params: impl Iterator, remove_prefix: &'static str, - ) -> serde_json::Value { - let mut json = serde_json::Map::new(); + ) -> serde_yaml::Value { + let mut yaml = serde_yaml::Mapping::new(); for param in params { let mut key: &'static str = param.into(); key = key.strip_prefix(remove_prefix).unwrap_or(key); if let Some(value) = self.get(*param) { - json.insert(key.to_owned(), value.clone()); + yaml.insert( + key.into(), + // All parameter values can be serialized as YAML, so we don't ever expect this + // to fail. + serde_yaml::to_value(value.clone()) + .expect("failed to convert parameter value to YAML"), + ); } } - json.into() + yaml.into() } - fn get(&self, key: Parameter) -> Option<&serde_json::Value> { + fn get(&self, key: Parameter) -> Option<&ParameterValue> { self.parameters.get(&key) } /// Access action fee by `ActionCosts`. - fn fee( + fn get_fee( &self, cost: near_primitives_core::config::ActionCosts, - ) -> near_primitives_core::runtime::fees::Fee { - let json = self.fee_json(FeeParameter::from(cost)); - serde_json::from_value::(json) - .expect("just constructed a Fee JSON") + ) -> Result { + let key = FeeParameter::from(cost); + Ok(near_primitives_core::runtime::fees::Fee { + send_sir: self.get_number(format!("{key}_send_sir").parse().unwrap())?, + send_not_sir: self.get_number(format!("{key}_send_not_sir").parse().unwrap())?, + execution: self.get_number(format!("{key}_execution").parse().unwrap())?, + }) } - /// Read and parse a parameter from the `ParameterTable`. - fn get_parsed( - &self, - key: Parameter, - ) -> Result { + /// Read and parse a number parameter from the `ParameterTable`. + fn get_number(&self, key: Parameter) -> Result + where + T: TryFrom, + T::Error: Into, + { let value = self.parameters.get(&key).ok_or(InvalidConfigError::MissingParameter(key))?; - serde_json::from_value(value.clone()).map_err(|parse_err| { - InvalidConfigError::WrongValueType( - parse_err, + let value_u64 = value.as_u64().ok_or(InvalidConfigError::WrongValueType( + key, + std::any::type_name::(), + value.clone(), + ))?; + T::try_from(value_u64).map_err(|err| { + InvalidConfigError::WrongIntegerType( + err.into(), key, std::any::type_name::(), - value.to_string(), + value_u64, ) }) } - /// Read and parse a parameter from the `ParameterTable`. + /// Read and parse a u128 parameter from the `ParameterTable`. fn get_u128(&self, key: Parameter) -> Result { let value = self.parameters.get(&key).ok_or(InvalidConfigError::MissingParameter(key))?; + value.as_u128().ok_or(InvalidConfigError::WrongValueType( + key, + std::any::type_name::(), + value.clone(), + )) + } - near_primitives_core::serialize::dec_format::deserialize(value).map_err(|parse_err| { - InvalidConfigError::WrongValueType( - parse_err, - key, - std::any::type_name::(), - value.to_string(), + /// Read and parse a string parameter from the `ParameterTable`. + fn get_account_id(&self, key: Parameter) -> Result { + let value = self.parameters.get(&key).ok_or(InvalidConfigError::MissingParameter(key))?; + let value_string = value.as_str().ok_or(InvalidConfigError::WrongValueType( + key, + std::any::type_name::(), + value.clone(), + ))?; + value_string.parse().map_err(|err| { + InvalidConfigError::WrongAccountId( + err, + Parameter::RegistrarAccountId, + value_string.to_string(), ) }) } - fn fee_json(&self, key: FeeParameter) -> serde_json::Value { - json!( { - "send_sir": self.get(format!("{key}_send_sir").parse().unwrap()), - "send_not_sir": self.get(format!("{key}_send_not_sir").parse().unwrap()), - "execution": self.get(format!("{key}_execution").parse().unwrap()), - }) + /// Read and parse a rational parameter from the `ParameterTable`. + fn get_rational(&self, key: Parameter) -> Result { + let value = self.parameters.get(&key).ok_or(InvalidConfigError::MissingParameter(key))?; + value.as_rational().ok_or(InvalidConfigError::WrongValueType( + key, + std::any::type_name::(), + value.clone(), + )) } } -/// Represents YAML values supported by parameter diff config. +/// Represents values supported by parameter diff config. #[derive(serde::Deserialize, Clone, Debug)] -struct ParameterDiffValue { - old: Option, - new: Option, +struct ParameterDiffConfigValue { + old: Option, + new: Option, } impl std::str::FromStr for ParameterTableDiff { type Err = InvalidConfigError; fn from_str(arg: &str) -> Result { - let yaml_map: BTreeMap = + let yaml_map: BTreeMap = serde_yaml::from_str(arg).map_err(|err| InvalidConfigError::InvalidYaml(err))?; let parameters = yaml_map @@ -238,17 +297,11 @@ impl std::str::FromStr for ParameterTableDiff { .parse() .map_err(|err| InvalidConfigError::UnknownParameter(err, key.to_owned()))?; - let old_value = if let Some(s) = &value.old { - parse_parameter_txt_value(s)? - } else { - serde_json::Value::Null - }; + let old_value = + if let Some(s) = &value.old { Some(parse_parameter_value(s)?) } else { None }; - let new_value = if let Some(s) = &value.new { - parse_parameter_txt_value(s)? - } else { - serde_json::Value::Null - }; + let new_value = + if let Some(s) = &value.new { Some(parse_parameter_value(s)?) } else { None }; Ok((typed_key, (old_value, new_value))) }) @@ -257,13 +310,40 @@ impl std::str::FromStr for ParameterTableDiff { } } +/// Parses a value from YAML to a more restricted type of parameter values. +fn parse_parameter_value(value: &serde_yaml::Value) -> Result { + Ok(serde_yaml::from_value(canonicalize_yaml_value(value)?) + .map_err(|err| InvalidConfigError::InvalidYaml(err))?) +} + +/// Recursively canonicalizes values inside of the YAML structure. +fn canonicalize_yaml_value( + value: &serde_yaml::Value, +) -> Result { + Ok(match value { + serde_yaml::Value::String(s) => canonicalize_yaml_string(s)?, + serde_yaml::Value::Mapping(m) => serde_yaml::Value::Mapping( + m.iter() + .map(|(key, value)| { + let canonical_value = canonicalize_yaml_value(value)?; + Ok((key.clone(), canonical_value)) + }) + .collect::>()?, + ), + _ => value.clone(), + }) +} + /// Parses a value from the custom format for runtime parameter definitions. /// -/// A value can be a positive integer or a string, both written without quotes. +/// A value can be a positive integer or a string, with or without quotes. /// Integers can use underlines as separators (for readability). -fn parse_parameter_txt_value(value: &str) -> Result { +/// +/// The main purpose of this function is to add support for integers with underscore digit +/// separators which we use in the config but are not supported in YAML. +fn canonicalize_yaml_string(value: &str) -> Result { if value.is_empty() { - return Ok(serde_json::Value::Null); + return Ok(serde_yaml::Value::Null); } if value.bytes().all(|c| c.is_ascii_digit() || c == '_' as u8) { let mut raw_number = value.to_owned(); @@ -272,22 +352,22 @@ fn parse_parameter_txt_value(value: &str) -> Result { - assert_eq!(expected, "3200000000"); - assert_eq!(found, "3200000"); + assert_eq!(expected, ParameterValue::U64(3200000000)); + assert_eq!(found, ParameterValue::U64(3200000)); } ); } @@ -547,7 +642,7 @@ max_memory_pages: { new: 512 } &["min_allowed_top_level_account_length: { new: 1_600_000 }"] ), InvalidConfigError::OldValueExists(Parameter::MinAllowedTopLevelAccountLength, expected) => { - assert_eq!(expected, "3200000000"); + assert_eq!(expected, ParameterValue::U64(3200000000)); } ); } @@ -560,8 +655,36 @@ max_memory_pages: { new: 512 } &["wasm_regular_op_cost: { old: 3_200_000, new: 1_600_000 }"] ), InvalidConfigError::NoOldValueExists(Parameter::WasmRegularOpCost, found) => { - assert_eq!(found, "3200000"); + assert_eq!(found, ParameterValue::U64(3200000)); } ); } + + #[test] + fn test_parameter_table_yaml_map() { + let params: ParameterTable = BASE_0.parse().unwrap(); + let yaml = params.yaml_map( + [ + Parameter::RegistrarAccountId, + Parameter::MinAllowedTopLevelAccountLength, + Parameter::StorageAmountPerByte, + Parameter::StorageNumBytesAccount, + Parameter::StorageNumExtraBytesRecord, + Parameter::BurntGasReward, + ] + .iter(), + "", + ); + assert_eq!( + yaml, + serde_yaml::to_value( + params + .parameters + .iter() + .map(|(key, value)| (key.to_string(), value)) + .collect::>() + ) + .unwrap() + ); + } } diff --git a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__0.json.snap b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__0.json.snap index 27967578168..e2b56d1e207 100644 --- a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__0.json.snap +++ b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__0.json.snap @@ -1,6 +1,6 @@ --- source: core/primitives/src/runtime/config_store.rs -expression: store.get_config(*version) +expression: config_view --- { "storage_amount_per_byte": "100000000000000000000", diff --git a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__42.json.snap b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__42.json.snap index cdd3a59e144..c55199a66e3 100644 --- a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__42.json.snap +++ b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__42.json.snap @@ -1,6 +1,6 @@ --- source: core/primitives/src/runtime/config_store.rs -expression: store.get_config(*version) +expression: config_view --- { "storage_amount_per_byte": "10000000000000000000", diff --git a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__48.json.snap b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__48.json.snap index 9cfea46f7ce..f5f5c3c9f9c 100644 --- a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__48.json.snap +++ b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__48.json.snap @@ -1,6 +1,6 @@ --- source: core/primitives/src/runtime/config_store.rs -expression: store.get_config(*version) +expression: config_view --- { "storage_amount_per_byte": "10000000000000000000", diff --git a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__49.json.snap b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__49.json.snap index c3c4c0abf98..cf2cc240f9f 100644 --- a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__49.json.snap +++ b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__49.json.snap @@ -1,6 +1,6 @@ --- source: core/primitives/src/runtime/config_store.rs -expression: store.get_config(*version) +expression: config_view --- { "storage_amount_per_byte": "10000000000000000000", diff --git a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__50.json.snap b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__50.json.snap index 07bb9527bde..2ec6212599c 100644 --- a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__50.json.snap +++ b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__50.json.snap @@ -1,6 +1,6 @@ --- source: core/primitives/src/runtime/config_store.rs -expression: store.get_config(*version) +expression: config_view --- { "storage_amount_per_byte": "10000000000000000000", diff --git a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__52.json.snap b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__52.json.snap index 080e25788d1..7db6d711dc7 100644 --- a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__52.json.snap +++ b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__52.json.snap @@ -1,6 +1,6 @@ --- source: core/primitives/src/runtime/config_store.rs -expression: store.get_config(*version) +expression: config_view --- { "storage_amount_per_byte": "10000000000000000000", diff --git a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__53.json.snap b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__53.json.snap index 541b8e67187..a499aa7e8bc 100644 --- a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__53.json.snap +++ b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__53.json.snap @@ -1,6 +1,6 @@ --- source: core/primitives/src/runtime/config_store.rs -expression: store.get_config(*version) +expression: config_view --- { "storage_amount_per_byte": "10000000000000000000", diff --git a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__57.json.snap b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__57.json.snap index 529a8593a87..a96cd4b18af 100644 --- a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__57.json.snap +++ b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__57.json.snap @@ -1,6 +1,6 @@ --- source: core/primitives/src/runtime/config_store.rs -expression: store.get_config(*version) +expression: config_view --- { "storage_amount_per_byte": "10000000000000000000", diff --git a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_0.json.snap b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_0.json.snap index 27967578168..e2b56d1e207 100644 --- a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_0.json.snap +++ b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_0.json.snap @@ -1,6 +1,6 @@ --- source: core/primitives/src/runtime/config_store.rs -expression: store.get_config(*version) +expression: config_view --- { "storage_amount_per_byte": "100000000000000000000", diff --git a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_42.json.snap b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_42.json.snap index cdd3a59e144..c55199a66e3 100644 --- a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_42.json.snap +++ b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_42.json.snap @@ -1,6 +1,6 @@ --- source: core/primitives/src/runtime/config_store.rs -expression: store.get_config(*version) +expression: config_view --- { "storage_amount_per_byte": "10000000000000000000", diff --git a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_48.json.snap b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_48.json.snap index 9cfea46f7ce..f5f5c3c9f9c 100644 --- a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_48.json.snap +++ b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_48.json.snap @@ -1,6 +1,6 @@ --- source: core/primitives/src/runtime/config_store.rs -expression: store.get_config(*version) +expression: config_view --- { "storage_amount_per_byte": "10000000000000000000", diff --git a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_49.json.snap b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_49.json.snap index c3c4c0abf98..cf2cc240f9f 100644 --- a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_49.json.snap +++ b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_49.json.snap @@ -1,6 +1,6 @@ --- source: core/primitives/src/runtime/config_store.rs -expression: store.get_config(*version) +expression: config_view --- { "storage_amount_per_byte": "10000000000000000000", diff --git a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_50.json.snap b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_50.json.snap index 07bb9527bde..2ec6212599c 100644 --- a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_50.json.snap +++ b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_50.json.snap @@ -1,6 +1,6 @@ --- source: core/primitives/src/runtime/config_store.rs -expression: store.get_config(*version) +expression: config_view --- { "storage_amount_per_byte": "10000000000000000000", diff --git a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_52.json.snap b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_52.json.snap index 080e25788d1..7db6d711dc7 100644 --- a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_52.json.snap +++ b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_52.json.snap @@ -1,6 +1,6 @@ --- source: core/primitives/src/runtime/config_store.rs -expression: store.get_config(*version) +expression: config_view --- { "storage_amount_per_byte": "10000000000000000000", diff --git a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_53.json.snap b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_53.json.snap index 541b8e67187..a499aa7e8bc 100644 --- a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_53.json.snap +++ b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_53.json.snap @@ -1,6 +1,6 @@ --- source: core/primitives/src/runtime/config_store.rs -expression: store.get_config(*version) +expression: config_view --- { "storage_amount_per_byte": "10000000000000000000", diff --git a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_57.json.snap b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_57.json.snap index 529a8593a87..a96cd4b18af 100644 --- a/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_57.json.snap +++ b/core/primitives/src/runtime/snapshots/near_primitives__runtime__config_store__tests__testnet_57.json.snap @@ -1,6 +1,6 @@ --- source: core/primitives/src/runtime/config_store.rs -expression: store.get_config(*version) +expression: config_view --- { "storage_amount_per_byte": "10000000000000000000", diff --git a/core/primitives/src/transaction.rs b/core/primitives/src/transaction.rs index 40f32a32d4d..fa6e6112568 100644 --- a/core/primitives/src/transaction.rs +++ b/core/primitives/src/transaction.rs @@ -1,8 +1,10 @@ use std::borrow::Borrow; use std::fmt; use std::hash::{Hash, Hasher}; +use std::io::{Error, ErrorKind}; use borsh::{BorshDeserialize, BorshSerialize}; +use near_primitives_core::types::BlockHeight; use serde::{Deserialize, Serialize}; use near_crypto::{PublicKey, Signature}; @@ -18,6 +20,9 @@ use crate::types::{AccountId, Balance, Gas, Nonce}; pub type LogEntry = String; +// This is an index number of Action::Delegate in Action enumeration +const ACTION_DELEGATE_NUMBER: u8 = 8; + #[derive(BorshSerialize, BorshDeserialize, Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct Transaction { /// An account on which behalf transaction is signed @@ -68,6 +73,8 @@ pub enum Action { AddKey(AddKeyAction), DeleteKey(DeleteKeyAction), DeleteAccount(DeleteAccountAction), + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + Delegate(SignedDelegateAction), } impl Action { @@ -210,6 +217,95 @@ impl From for Action { } } +/// This is Action which mustn't contain DelegateAction. +// This struct is needed to avoid the recursion when Action/DelegateAction is deserialized. +#[derive(Serialize, BorshSerialize, Deserialize, PartialEq, Eq, Clone, Debug)] +pub struct NonDelegateAction(pub Action); + +impl From for Action { + fn from(action: NonDelegateAction) -> Self { + action.0 + } +} + +impl borsh::de::BorshDeserialize for NonDelegateAction { + fn deserialize(buf: &mut &[u8]) -> ::core::result::Result { + if buf.is_empty() { + return Err(Error::new( + ErrorKind::InvalidInput, + "Failed to deserialize DelegateAction", + )); + } + match buf[0] { + ACTION_DELEGATE_NUMBER => Err(Error::new( + ErrorKind::InvalidInput, + "DelegateAction mustn't contain a nested one", + )), + _ => Ok(Self(borsh::BorshDeserialize::deserialize(buf)?)), + } + } +} + +/// This action allows to execute the inner actions behalf of the defined sender. +#[derive(BorshSerialize, BorshDeserialize, Serialize, Deserialize, PartialEq, Eq, Clone, Debug)] +pub struct DelegateAction { + /// Signer of the delegated actions + pub sender_id: AccountId, + /// Receiver of the delegated actions. + pub receiver_id: AccountId, + /// List of actions to be executed. + pub actions: Vec, + /// Nonce to ensure that the same delegate action is not sent twice by a relayer and should match for given account's `public_key`. + /// After this action is processed it will increment. + pub nonce: Nonce, + /// The maximal height of the block in the blockchain below which the given DelegateAction is valid. + pub max_block_height: BlockHeight, + /// Public key that is used to sign this delegated action. + pub public_key: PublicKey, +} + +#[cfg_attr(feature = "protocol_feature_nep366_delegate_action", derive(BorshDeserialize))] +#[derive(BorshSerialize, Serialize, Deserialize, PartialEq, Eq, Clone, Debug)] +pub struct SignedDelegateAction { + pub delegate_action: DelegateAction, + pub signature: Signature, +} + +#[cfg(not(feature = "protocol_feature_nep366_delegate_action"))] +impl borsh::de::BorshDeserialize for SignedDelegateAction { + fn deserialize(_buf: &mut &[u8]) -> ::core::result::Result { + return Err(Error::new(ErrorKind::InvalidInput, "Delegate action isn't supported")); + } +} + +impl SignedDelegateAction { + pub fn verify(&self) -> bool { + let delegate_action = &self.delegate_action; + let hash = delegate_action.get_hash(); + let public_key = &delegate_action.public_key; + + self.signature.verify(hash.as_ref(), public_key) + } +} + +#[cfg(feature = "protocol_feature_nep366_delegate_action")] +impl From for Action { + fn from(delegate_action: SignedDelegateAction) -> Self { + Self::Delegate(delegate_action) + } +} + +impl DelegateAction { + pub fn get_actions(&self) -> Vec { + self.actions.iter().map(|a| a.clone().into()).collect() + } + + pub fn get_hash(&self) -> CryptoHash { + let bytes = self.try_to_vec().expect("Failed to deserialize"); + hash(&bytes) + } +} + #[derive(BorshSerialize, BorshDeserialize, Serialize, Deserialize, Eq, Debug, Clone)] #[borsh_init(init)] pub struct SignedTransaction { @@ -446,13 +542,23 @@ pub struct ExecutionOutcomeWithProof { } #[cfg(test)] mod tests { + use super::*; + use crate::account::{AccessKeyPermission, FunctionCallPermission}; use borsh::BorshDeserialize; - use near_crypto::{InMemorySigner, KeyType, Signature, Signer}; - use crate::account::{AccessKeyPermission, FunctionCallPermission}; - - use super::*; + /// A serialized `Action::Delegate(SignedDelegateAction)` for testing. + /// + /// We want this to be parseable and accepted by protocol versions with meta + /// transactions enabled. But it should fail either in parsing or in + /// validation when this is included in a receipt for a block of an earlier + /// version. For now, it just fails to parse, as a test below checks. + const DELEGATE_ACTION_HEX: &str = concat!( + "0803000000616161030000006262620100000000010000000000000002000000000000", + "0000000000000000000000000000000000000000000000000000000000000000000000", + "0000000000000000000000000000000000000000000000000000000000000000000000", + "0000000000000000000000000000000000000000000000000000000000" + ); #[test] fn test_verify_transaction() { @@ -550,4 +656,92 @@ mod tests { outcome.to_hashes() ); } + + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn create_delegate_action(actions: Vec) -> Action { + Action::Delegate(SignedDelegateAction { + delegate_action: DelegateAction { + sender_id: "aaa".parse().unwrap(), + receiver_id: "bbb".parse().unwrap(), + actions: actions.iter().map(|a| NonDelegateAction(a.clone())).collect(), + nonce: 1, + max_block_height: 2, + public_key: PublicKey::empty(KeyType::ED25519), + }, + signature: Signature::empty(KeyType::ED25519), + }) + } + + #[test] + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_delegate_action_deserialization() { + // Expected an error. Buffer is empty + assert_eq!( + NonDelegateAction::try_from_slice(Vec::new().as_ref()).map_err(|e| e.kind()), + Err(ErrorKind::InvalidInput) + ); + + let delegate_action = create_delegate_action(Vec::::new()); + let serialized_non_delegate_action = + create_delegate_action(vec![delegate_action]).try_to_vec().expect("Expect ok"); + + // Expected Action::Delegate has not been moved in enum Action + assert_eq!(serialized_non_delegate_action[0], ACTION_DELEGATE_NUMBER); + + // Expected a nested DelegateAction error + assert_eq!( + NonDelegateAction::try_from_slice(&serialized_non_delegate_action) + .map_err(|e| e.kind()), + Err(ErrorKind::InvalidInput) + ); + + let delegate_action = + create_delegate_action(vec![Action::CreateAccount(CreateAccountAction {})]); + let serialized_delegate_action = delegate_action.try_to_vec().expect("Expect ok"); + + // Valid action + assert_eq!( + Action::try_from_slice(&serialized_delegate_action).expect("Expect ok"), + delegate_action + ); + } + + /// Check that we will not accept delegate actions with the feature + /// disabled. + /// + /// This test is to ensure that while working on meta transactions, we don't + /// accientally start accepting delegate actions in receipts. Otherwise, a + /// malicious validator could create receipts that include delegate actions + /// and other nodes will accept such a receipt. + /// + /// TODO: Before stabilizing "protocol_feature_nep366_delegate_action" we + /// have to replace this rest with a test that checks that we discard + /// delegate actions for earlier versions somewhere in validation. + #[test] + #[cfg(not(feature = "protocol_feature_nep366_delegate_action"))] + fn test_delegate_action_deserialization() { + let serialized_delegate_action = hex::decode(DELEGATE_ACTION_HEX).expect("invalid hex"); + + // DelegateAction isn't supported + assert_eq!( + Action::try_from_slice(&serialized_delegate_action).map_err(|e| e.kind()), + Err(ErrorKind::InvalidInput) + ); + } + + /// Check that the hard-coded delegate action is valid. + #[test] + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_delegate_action_deserialization_hard_coded() { + let serialized_delegate_action = hex::decode(DELEGATE_ACTION_HEX).expect("invalid hex"); + // The hex data is the same as the one we create below. + let delegate_action = + create_delegate_action(vec![Action::CreateAccount(CreateAccountAction {})]); + + // Valid action + assert_eq!( + Action::try_from_slice(&serialized_delegate_action).expect("Expect ok"), + delegate_action + ); + } } diff --git a/core/primitives/src/version.rs b/core/primitives/src/version.rs index fa5d7a5feaf..7ecc76c4ce0 100644 --- a/core/primitives/src/version.rs +++ b/core/primitives/src/version.rs @@ -147,6 +147,8 @@ pub enum ProtocolFeature { Ed25519Verify, #[cfg(feature = "protocol_feature_reject_blocks_with_outdated_protocol_version")] RejectBlocksWithOutdatedProtocolVersions, + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + DelegateAction, #[cfg(feature = "protocol_feature_zero_balance_account")] /// NEP 448: https://github.com/near/NEPs/pull/448 ZeroBalanceAccount, @@ -159,12 +161,12 @@ pub const PEER_MIN_ALLOWED_PROTOCOL_VERSION: ProtocolVersion = STABLE_PROTOCOL_V /// Current protocol version used on the mainnet. /// Some features (e. g. FixStorageUsage) require that there is at least one epoch with exactly /// the corresponding version -const STABLE_PROTOCOL_VERSION: ProtocolVersion = 57; +const STABLE_PROTOCOL_VERSION: ProtocolVersion = 58; /// Largest protocol version supported by the current binary. pub const PROTOCOL_VERSION: ProtocolVersion = if cfg!(feature = "nightly_protocol") { // On nightly, pick big enough version to support all features. - 133 + 134 } else { // Enable all stable features. STABLE_PROTOCOL_VERSION @@ -237,8 +239,10 @@ impl ProtocolFeature { ProtocolFeature::Ed25519Verify => 131, #[cfg(feature = "protocol_feature_reject_blocks_with_outdated_protocol_version")] ProtocolFeature::RejectBlocksWithOutdatedProtocolVersions => 132, + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + ProtocolFeature::DelegateAction => 133, #[cfg(feature = "protocol_feature_zero_balance_account")] - ProtocolFeature::ZeroBalanceAccount => 133, + ProtocolFeature::ZeroBalanceAccount => 134, } } } diff --git a/core/primitives/src/views.rs b/core/primitives/src/views.rs index ccac1d4f5c7..def9c35993c 100644 --- a/core/primitives/src/views.rs +++ b/core/primitives/src/views.rs @@ -12,7 +12,7 @@ use borsh::{BorshDeserialize, BorshSerialize}; use chrono::DateTime; use near_primitives_core::config::{ActionCosts, ExtCosts, VMConfig}; use near_primitives_core::runtime::fees::Fee; -use num_rational::Rational; +use num_rational::Rational32; use serde::{Deserialize, Serialize}; use near_crypto::{PublicKey, Signature}; @@ -53,6 +53,9 @@ use crate::types::{ use crate::version::{ProtocolVersion, Version}; use validator_stake_view::ValidatorStakeView; +#[cfg(feature = "protocol_feature_nep366_delegate_action")] +use crate::transaction::{DelegateAction, SignedDelegateAction}; + /// A view of the account #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct AccountView { @@ -1073,6 +1076,11 @@ pub enum ActionView { DeleteAccount { beneficiary_id: AccountId, }, + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + Delegate { + delegate_action: DelegateAction, + signature: Signature, + }, } impl From for ActionView { @@ -1101,6 +1109,11 @@ impl From for ActionView { Action::DeleteAccount(action) => { ActionView::DeleteAccount { beneficiary_id: action.beneficiary_id } } + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + Action::Delegate(action) => ActionView::Delegate { + delegate_action: action.delegate_action, + signature: action.signature, + }, } } } @@ -1130,6 +1143,13 @@ impl TryFrom for Action { ActionView::DeleteAccount { beneficiary_id } => { Action::DeleteAccount(DeleteAccountAction { beneficiary_id }) } + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + ActionView::Delegate { delegate_action, signature } => { + Action::Delegate(SignedDelegateAction { + delegate_action: delegate_action, + signature, + }) + } }) } } @@ -2046,10 +2066,10 @@ pub struct RuntimeFeesConfigView { pub storage_usage_config: StorageUsageConfigView, /// Fraction of the burnt gas to reward to the contract account for execution. - pub burnt_gas_reward: Rational, + pub burnt_gas_reward: Rational32, /// Pessimistic gas price inflation ratio. - pub pessimistic_gas_price_inflation_ratio: Rational, + pub pessimistic_gas_price_inflation_ratio: Rational32, } /// The structure describes configuration for creation of new accounts. @@ -2109,6 +2129,12 @@ pub struct ActionCreationConfigView { /// Base cost of deleting an account. pub delete_account_cost: Fee, + + /// Base cost for processing a delegate action. + /// + /// This is on top of the costs for the actions inside the delegate action. + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + pub delegate_cost: Fee, } /// Describes the cost of creating an access key. @@ -2174,6 +2200,8 @@ impl From for RuntimeConfigView { }, delete_key_cost: config.fees.fee(ActionCosts::delete_key).clone(), delete_account_cost: config.fees.fee(ActionCosts::delete_account).clone(), + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + delegate_cost: config.fees.fee(ActionCosts::delegate).clone(), }, storage_usage_config: StorageUsageConfigView { num_bytes_account: config.fees.storage_usage_config.num_bytes_account, @@ -2218,6 +2246,8 @@ impl From for RuntimeConfig { action_fees: enum_map::enum_map! { ActionCosts::create_account => config.transaction_costs.action_creation_config.create_account_cost.clone(), ActionCosts::delete_account => config.transaction_costs.action_creation_config.delete_account_cost.clone(), + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + ActionCosts::delegate => config.transaction_costs.action_creation_config.delegate_cost.clone(), ActionCosts::deploy_contract_base => config.transaction_costs.action_creation_config.deploy_contract_cost.clone(), ActionCosts::deploy_contract_byte => config.transaction_costs.action_creation_config.deploy_contract_cost_per_byte.clone(), ActionCosts::function_call_base => config.transaction_costs.action_creation_config.function_call_cost.clone(), diff --git a/core/store/Cargo.toml b/core/store/Cargo.toml index dd8f4c3f4f9..f65f0c862e2 100644 --- a/core/store/Cargo.toml +++ b/core/store/Cargo.toml @@ -56,7 +56,6 @@ no_cache = [] single_thread_rocksdb = [] # Deactivate RocksDB IO background threads test_features = [] protocol_feature_flat_state = [] -cold_store = [] nightly_protocol = [] nightly = [ diff --git a/core/store/src/cold_storage.rs b/core/store/src/cold_storage.rs index 82e3e68b748..4c4d144e3d0 100644 --- a/core/store/src/cold_storage.rs +++ b/core/store/src/cold_storage.rs @@ -1,7 +1,7 @@ use crate::columns::DBKeyType; -use crate::db::ColdDB; +use crate::db::{ColdDB, COLD_HEAD_KEY, HEAD_KEY}; use crate::trie::TrieRefcountChange; -use crate::{DBCol, DBTransaction, Database, Store, TrieChanges, HEAD_KEY}; +use crate::{DBCol, DBTransaction, Database, Store, TrieChanges}; use borsh::{BorshDeserialize, BorshSerialize}; use near_primitives::block::{Block, BlockHeader, Tip}; @@ -97,7 +97,9 @@ fn copy_from_store( return Ok(()); } -/// This function sets HEAD key in BlockMisc column to the Tip that reflect provided height. +/// This function sets the cold head to the Tip that reflect provided height in two places: +/// - In cold storage in HEAD key in BlockMisc column. +/// - In hot storage in COLD_HEAD key in BlockMisc column. /// This function should be used after all of the blocks from genesis to `height` inclusive had been copied. /// /// This method relies on the fact that BlockHeight and BlockHeader are not garbage collectable. @@ -114,17 +116,23 @@ pub fn update_cold_head( let height_key = height.to_le_bytes(); let block_hash_key = store.get_or_err(DBCol::BlockHeight, &height_key)?.as_slice().to_vec(); + let tip_header = &store.get_ser_or_err::(DBCol::BlockHeader, &block_hash_key)?; + let tip = Tip::from_header(tip_header); + + // Write HEAD to the cold db. + { + let mut transaction = DBTransaction::new(); + transaction.set(DBCol::BlockMisc, HEAD_KEY.to_vec(), tip.try_to_vec()?); + cold_db.write(transaction)?; + } + + // Write COLD_HEAD to the hot db. + { + let mut transaction = DBTransaction::new(); + transaction.set(DBCol::BlockMisc, COLD_HEAD_KEY.to_vec(), tip.try_to_vec()?); + hot_store.storage.write(transaction)?; + } - let mut transaction = DBTransaction::new(); - transaction.set( - DBCol::BlockMisc, - HEAD_KEY.to_vec(), - Tip::from_header( - &store.get_ser_or_err::(DBCol::BlockHeader, &block_hash_key)?, - ) - .try_to_vec()?, - ); - cold_db.write(transaction)?; return Ok(()); } diff --git a/core/store/src/columns.rs b/core/store/src/columns.rs index 5111d2cca08..3749e9b58f6 100644 --- a/core/store/src/columns.rs +++ b/core/store/src/columns.rs @@ -35,7 +35,9 @@ pub enum DBCol { /// - *Rows*: block hash (CryptoHash) /// - *Content type*: [near_primitives::block_header::BlockHeader] BlockHeader, - /// Column that stores mapping from block height to block hash. + /// Column that stores mapping from block height to block hash on the current canonical chain. + /// (if you want to see all the blocks that we got for a given height, for example due to double signing etc, + /// look at BlockPerHeight column). /// - *Rows*: height (u64) /// - *Content type*: block hash (CryptoHash) BlockHeight, @@ -105,6 +107,8 @@ pub enum DBCol { /// - *Content type*: BlockExtra BlockExtra, /// Store hash of all block per each height, to detect double signs. + /// In most cases, it is better to get the value from BlockHeight column instead (which + /// keeps the hash of the block from canonical chain) /// - *Rows*: int (height of the block) /// - *Content type*: Map: EpochId -> Set of BlockHash(CryptoHash) BlockPerHeight, @@ -180,7 +184,12 @@ pub enum DBCol { /// - *Rows*: BlockHash || TrieKey (TrieKey is written via custom to_vec) /// - *Column type*: TrieKey, new value and reason for change (RawStateChangesWithTrieKey) StateChanges, - /// Mapping from Block to its refcount. (Refcounts are used in handling chain forks) + /// Mapping from Block to its refcount (number of blocks that use this block as a parent). (Refcounts are used in handling chain forks). + /// In following example: + /// 1 -> 2 -> 3 -> 5 + /// \ --> 4 + /// The block '2' will have a refcount equal to 2. + /// /// - *Rows*: BlockHash (CryptoHash) /// - *Column type*: refcount (u64) BlockRefCount, @@ -197,6 +206,8 @@ pub enum DBCol { /// - *Column type*: Vec ChunkHashesByHeight, /// Mapping from block ordinal number (number of the block in the chain) to the BlockHash. + /// Note: that it can be different than BlockHeight - if we have skipped some heights when creating the blocks. + /// for example in chain 1->3, the second block has height 3, but ordinal 2. /// - *Rows*: ordinal (u64) /// - *Column type*: BlockHash (CryptoHash) BlockOrdinal, @@ -397,7 +408,6 @@ impl DBCol { } /// Whether this column exists in cold storage. - #[cfg(feature = "cold_store")] pub(crate) const fn is_in_colddb(&self) -> bool { matches!(*self, DBCol::DbVersion | DBCol::BlockMisc) || self.is_cold() } diff --git a/core/store/src/db.rs b/core/store/src/db.rs index 9742886e16a..066bde58634 100644 --- a/core/store/src/db.rs +++ b/core/store/src/db.rs @@ -2,14 +2,12 @@ use std::io; use crate::DBCol; -#[cfg(feature = "cold_store")] mod colddb; pub mod refcount; pub(crate) mod rocksdb; mod slice; mod testdb; -#[cfg(feature = "cold_store")] pub use self::colddb::ColdDB; pub use self::rocksdb::RocksDB; pub use self::slice::DBSlice; @@ -25,6 +23,7 @@ pub const LATEST_KNOWN_KEY: &[u8; 12] = b"LATEST_KNOWN"; pub const LARGEST_TARGET_HEIGHT_KEY: &[u8; 21] = b"LARGEST_TARGET_HEIGHT"; pub const GENESIS_JSON_HASH_KEY: &[u8; 17] = b"GENESIS_JSON_HASH"; pub const GENESIS_STATE_ROOTS_KEY: &[u8; 19] = b"GENESIS_STATE_ROOTS"; +pub const COLD_HEAD_KEY: &[u8; 9] = b"COLD_HEAD"; #[derive(Default)] pub struct DBTransaction { diff --git a/core/store/src/flat_state.rs b/core/store/src/flat_state.rs index 2db94ca758b..2625922ffc6 100644 --- a/core/store/src/flat_state.rs +++ b/core/store/src/flat_state.rs @@ -519,7 +519,7 @@ pub const NUM_PARTS_IN_ONE_STEP: u64 = 20; pub const STATE_PART_MEMORY_LIMIT: bytesize::ByteSize = bytesize::ByteSize(10 * bytesize::MIB); /// Current step of fetching state to fill flat storage. -#[derive(BorshSerialize, BorshDeserialize, Clone, Debug, PartialEq, Eq)] +#[derive(BorshSerialize, BorshDeserialize, Copy, Clone, Debug, PartialEq, Eq)] pub struct FetchingStateStatus { /// Number of the first state part to be fetched in this step. pub part_id: u64, @@ -533,8 +533,8 @@ pub struct FetchingStateStatus { /// Because this is a heavy work requiring ~5h for testnet rpc node and ~10h for testnet archival node, we do it on /// background during regular block processing. /// This struct reveals what is the current status of creating flat storage data on disk. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum FlatStorageStateStatus { +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum FlatStorageCreationStatus { /// Flat storage state does not exist. We are saving `FlatStorageDelta`s to disk. /// During this step, we save current chain head, start saving all deltas for blocks after chain head and wait until /// final chain head moves after saved chain head. @@ -558,16 +558,16 @@ pub enum FlatStorageStateStatus { DontCreate, } -impl Into for &FlatStorageStateStatus { +impl Into for &FlatStorageCreationStatus { /// Converts status to integer to export to prometheus later. /// Cast inside enum does not work because it is not fieldless. fn into(self) -> i64 { match self { - FlatStorageStateStatus::Ready => 0, - FlatStorageStateStatus::SavingDeltas => 1, - FlatStorageStateStatus::FetchingState(_) => 2, - FlatStorageStateStatus::CatchingUp => 3, - FlatStorageStateStatus::DontCreate => 4, + FlatStorageCreationStatus::SavingDeltas => 0, + FlatStorageCreationStatus::FetchingState(_) => 1, + FlatStorageCreationStatus::CatchingUp => 2, + FlatStorageCreationStatus::Ready => 3, + FlatStorageCreationStatus::DontCreate => 4, } } } @@ -575,7 +575,7 @@ impl Into for &FlatStorageStateStatus { #[cfg(feature = "protocol_feature_flat_state")] pub mod store_helper { use crate::flat_state::{ - FetchingStateStatus, FlatStorageError, FlatStorageStateStatus, KeyForFlatStateDelta, + FetchingStateStatus, FlatStorageCreationStatus, FlatStorageError, KeyForFlatStateDelta, }; use crate::{FlatStateDelta, Store, StoreUpdate}; use borsh::BorshSerialize; @@ -730,19 +730,19 @@ pub mod store_helper { store_update.delete(crate::DBCol::FlatStateMisc, &catchup_status_key(shard_id)); } - pub fn get_flat_storage_state_status( + pub fn get_flat_storage_creation_status( store: &Store, shard_id: ShardId, - ) -> FlatStorageStateStatus { + ) -> FlatStorageCreationStatus { match get_flat_head(store, shard_id) { - None => FlatStorageStateStatus::SavingDeltas, + None => FlatStorageCreationStatus::SavingDeltas, Some(_) => { if let Some(fetching_state_status) = get_fetching_state_status(store, shard_id) { - FlatStorageStateStatus::FetchingState(fetching_state_status) + FlatStorageCreationStatus::FetchingState(fetching_state_status) } else if get_catchup_status(store, shard_id) { - FlatStorageStateStatus::CatchingUp + FlatStorageCreationStatus::CatchingUp } else { - FlatStorageStateStatus::Ready + FlatStorageCreationStatus::Ready } } } @@ -751,7 +751,7 @@ pub mod store_helper { #[cfg(not(feature = "protocol_feature_flat_state"))] pub mod store_helper { - use crate::flat_state::{FlatStateDelta, FlatStorageError, FlatStorageStateStatus}; + use crate::flat_state::{FlatStateDelta, FlatStorageCreationStatus, FlatStorageError}; use crate::Store; use near_primitives::hash::CryptoHash; use near_primitives::types::ShardId; @@ -769,11 +769,11 @@ pub mod store_helper { Err(FlatStorageError::StorageInternalError) } - pub fn get_flat_storage_state_status( + pub fn get_flat_storage_creation_status( _store: &Store, _shard_id: ShardId, - ) -> FlatStorageStateStatus { - FlatStorageStateStatus::DontCreate + ) -> FlatStorageCreationStatus { + FlatStorageCreationStatus::DontCreate } } diff --git a/core/store/src/lib.rs b/core/store/src/lib.rs index 70e304c78e1..69a8976766a 100644 --- a/core/store/src/lib.rs +++ b/core/store/src/lib.rs @@ -8,11 +8,12 @@ use std::{fmt, io}; use borsh::{BorshDeserialize, BorshSerialize}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; +use metadata::{DbKind, DbVersion, KIND_KEY, VERSION_KEY}; use once_cell::sync::Lazy; pub use columns::DBCol; pub use db::{ - CHUNK_TAIL_KEY, FINAL_HEAD_KEY, FORK_TAIL_KEY, HEADER_HEAD_KEY, HEAD_KEY, + CHUNK_TAIL_KEY, COLD_HEAD_KEY, FINAL_HEAD_KEY, FORK_TAIL_KEY, HEADER_HEAD_KEY, HEAD_KEY, LARGEST_TARGET_HEIGHT_KEY, LATEST_KNOWN_KEY, TAIL_KEY, }; use near_crypto::PublicKey; @@ -40,7 +41,6 @@ pub use crate::trie::{ }; pub use flat_state::FlatStateDelta; -#[cfg(feature = "cold_store")] pub mod cold_storage; mod columns; pub mod config; @@ -67,7 +67,6 @@ pub use crate::opener::{StoreMigrator, StoreOpener, StoreOpenerError}; #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Temperature { Hot, - #[cfg(feature = "cold_store")] Cold, } @@ -78,7 +77,6 @@ impl FromStr for Temperature { let s = s.to_lowercase(); match s.as_str() { "hot" => Ok(Temperature::Hot), - #[cfg(feature = "cold_store")] "cold" => Ok(Temperature::Cold), _ => Err(String::from(format!("invalid temperature string {s}"))), } @@ -93,17 +91,14 @@ impl FromStr for Temperature { /// the storage. pub struct NodeStorage { hot_storage: Arc, - #[cfg(feature = "cold_store")] cold_storage: Option>>, - #[cfg(not(feature = "cold_store"))] - cold_storage: Option, _phantom: PhantomData, } /// Node’s single storage source. /// /// Currently, this is somewhat equivalent to [`NodeStorage`] in that for given -/// note storage you can get only a single [`Store`] object. This will change +/// node storage you can get only a single [`Store`] object. This will change /// as we implement cold storage in which case this structure will provide an /// interface to access either hot or cold data. At that point, [`NodeStorage`] /// will map to one of two [`Store`] objects depending on the temperature of the @@ -113,41 +108,25 @@ pub struct Store { storage: Arc, } -// Those are temporary. While cold_store feature is stabilised, remove those -// type aliases and just use the type directly. -#[cfg(feature = "cold_store")] -pub type ColdConfig<'a> = Option<&'a StoreConfig>; -#[cfg(not(feature = "cold_store"))] -pub type ColdConfig<'a> = Option; - impl NodeStorage { /// Initialises a new opener with given home directory and hot and cold /// store config. pub fn opener<'a>( home_dir: &std::path::Path, config: &'a StoreConfig, - #[allow(unused_variables)] cold_config: ColdConfig<'a>, + cold_config: Option<&'a StoreConfig>, ) -> StoreOpener<'a> { - StoreOpener::new( - home_dir, - config, - #[cfg(feature = "cold_store")] - cold_config, - ) + StoreOpener::new(home_dir, config, cold_config) } /// Constructs new object backed by given database. fn from_rocksdb( hot_storage: crate::db::RocksDB, - #[cfg(feature = "cold_store")] cold_storage: Option, - #[cfg(not(feature = "cold_store"))] cold_storage: Option, + cold_storage: Option, ) -> Self { let hot_storage = Arc::new(hot_storage); - #[cfg(feature = "cold_store")] let cold_storage = cold_storage .map(|cold_db| Arc::new(crate::db::ColdDB::new(hot_storage.clone(), cold_db))); - #[cfg(not(feature = "cold_store"))] - let cold_storage = cold_storage.map(|_| unreachable!()); Self { hot_storage, cold_storage, _phantom: PhantomData {} } } @@ -162,12 +141,7 @@ impl NodeStorage { pub fn test_opener() -> (tempfile::TempDir, StoreOpener<'static>) { static CONFIG: Lazy = Lazy::new(StoreConfig::test_config); let dir = tempfile::tempdir().unwrap(); - let opener = StoreOpener::new( - dir.path(), - &CONFIG, - #[cfg(feature = "cold_store")] - None, - ); + let opener = StoreOpener::new(dir.path(), &CONFIG, None); (dir, opener) } @@ -198,19 +172,31 @@ impl NodeStorage { /// data is, simplifying slightly, determined based on height of the block. /// Anything above the tail of hot storage is hot and everything else is /// cold. + /// + /// This method panics if trying to access cold store but it wasn't configured. + /// Please consider using the get_hot_store and get_cold_store methods to avoid panics. pub fn get_store(&self, temp: Temperature) -> Store { match temp { - Temperature::Hot => Store { storage: self.hot_storage.clone() }, - #[cfg(feature = "cold_store")] - Temperature::Cold => Store { storage: self.cold_storage.as_ref().unwrap().clone() }, + Temperature::Hot => self.get_hot_store(), + Temperature::Cold => self.get_cold_store().unwrap(), + } + } + + pub fn get_hot_store(&self) -> Store { + Store { storage: self.hot_storage.clone() } + } + + pub fn get_cold_store(&self) -> Option { + match &self.cold_storage { + Some(cold_storage) => Some(Store { storage: cold_storage.clone() }), + None => None, } } /// Returns underlying database for given temperature. /// - /// With (currently unimplemented) cold storage, this allows accessing - /// underlying hot and cold databases directly bypassing any abstractions - /// offered by [`NodeStorage`] or [`Store`] interfaces. + /// This allows accessing underlying hot and cold databases directly + /// bypassing any abstractions offered by [`NodeStorage`] or [`Store`] interfaces. /// /// This is useful for certain data which only lives in hot storage and /// interfaces which deal with it. For example, peer store uses hot @@ -221,25 +207,22 @@ impl NodeStorage { /// well. For example, garbage collection only ever touches hot storage but /// it should go through [`Store`] interface since data it manipulates /// (e.g. blocks) are live in both databases. - pub fn _get_inner(&self, temp: Temperature) -> &Arc { - match temp { - Temperature::Hot => &self.hot_storage, - #[cfg(feature = "cold_store")] - Temperature::Cold => todo!(), - } - } - - /// Returns underlying database for given temperature. /// - /// This is like [`Self::get_inner`] but consumes `self` thus avoiding - /// `Arc::clone`. + /// This method panics if trying to access cold store but it wasn't configured. pub fn into_inner(self, temp: Temperature) -> Arc { match temp { Temperature::Hot => self.hot_storage, - #[cfg(feature = "cold_store")] Temperature::Cold => self.cold_storage.unwrap(), } } + + pub fn set_version(&self, version: DbVersion) -> std::io::Result<()> { + self.get_hot_store().set_db_version(version)?; + if let Some(cold_store) = self.get_cold_store() { + cold_store.set_db_version(version)?; + } + Ok(()) + } } impl NodeStorage { @@ -256,12 +239,10 @@ impl NodeStorage { Ok(match metadata::DbMetadata::read(self.hot_storage.as_ref())?.kind.unwrap() { metadata::DbKind::RPC => false, metadata::DbKind::Archive => true, - #[cfg(feature = "cold_store")] - metadata::DbKind::Hot | metadata::DbKind::Cold => unreachable!(), + metadata::DbKind::Hot | metadata::DbKind::Cold => todo!(), }) } - #[cfg(feature = "cold_store")] pub fn new_with_cold(hot: Arc, cold: D) -> Self { Self { hot_storage: hot.clone(), @@ -270,7 +251,6 @@ impl NodeStorage { } } - #[cfg(feature = "cold_store")] pub fn cold_db(&self) -> io::Result<&Arc>> { self.cold_storage .as_ref() @@ -391,6 +371,30 @@ impl Store { } } +impl Store { + pub fn get_db_version(&self) -> io::Result { + let metadata = metadata::DbMetadata::read(self.storage.as_ref())?; + Ok(metadata.version) + } + + pub fn set_db_version(&self, version: DbVersion) -> io::Result<()> { + let mut store_update = self.store_update(); + store_update.set(DBCol::DbVersion, VERSION_KEY, version.to_string().as_bytes()); + store_update.commit() + } + + pub fn get_db_kind(&self) -> io::Result> { + let metadata = metadata::DbMetadata::read(self.storage.as_ref())?; + Ok(metadata.kind) + } + + pub fn set_db_kind(&self, kind: DbKind) -> io::Result<()> { + let mut store_update = self.store_update(); + store_update.set(DBCol::DbVersion, KIND_KEY, <&str>::from(kind).as_bytes()); + store_update.commit() + } +} + /// Keeps track of current changes to the database and can commit all of them to the database. pub struct StoreUpdate { transaction: DBTransaction, diff --git a/core/store/src/metadata.rs b/core/store/src/metadata.rs index 620717739c8..14e16fd45ac 100644 --- a/core/store/src/metadata.rs +++ b/core/store/src/metadata.rs @@ -36,11 +36,9 @@ pub enum DbKind { /// The database is an archive database meaning that it is not garbage /// collected and stores all chain data. Archive, - #[cfg(feature = "cold_store")] /// The database is Hot meaning that the node runs in archival mode with /// a paired Cold database. Hot, - #[cfg(feature = "cold_store")] /// The database is Cold meaning that the node runs in archival mode with /// a paired Hot database. Cold, @@ -58,14 +56,12 @@ fn set_db_metadata( let mut store_update = storage.get_store(temp).store_update(); store_update.set(DBCol::DbVersion, VERSION_KEY, metadata.version.to_string().as_bytes()); if metadata.version >= DB_VERSION_WITH_KIND { - #[allow(unused_mut)] let mut kind = metadata.kind; - #[cfg(feature = "cold_store")] - if matches!(temp, Temperature::Cold) || storage.has_cold() { - kind = Some(if matches!(temp, Temperature::Hot) { DbKind::Hot } else { DbKind::Cold }); + if temp == Temperature::Cold { + kind = Some(DbKind::Cold); } - if let Some(kind) = kind.map(|kind| <&str>::from(kind).as_bytes()) { - store_update.set(DBCol::DbVersion, KIND_KEY, kind); + if let Some(kind) = kind { + store_update.set(DBCol::DbVersion, KIND_KEY, <&str>::from(kind).as_bytes()); } } store_update.commit() @@ -76,7 +72,6 @@ pub(super) fn set_store_metadata( metadata: DbMetadata, ) -> std::io::Result<()> { set_db_metadata(storage, Temperature::Hot, metadata)?; - #[cfg(feature = "cold_store")] if storage.has_cold() { set_db_metadata(storage, Temperature::Cold, metadata)?; } diff --git a/core/store/src/metrics.rs b/core/store/src/metrics.rs index db7f249d7ca..992fdda34e4 100644 --- a/core/store/src/metrics.rs +++ b/core/store/src/metrics.rs @@ -213,7 +213,6 @@ pub static PREFETCH_STAGED_SLOTS: Lazy = Lazy::new(|| { ) .unwrap() }); -#[cfg(feature = "cold_store")] pub static COLD_MIGRATION_READS: Lazy = Lazy::new(|| { try_create_int_counter_vec( "near_cold_migration_reads", diff --git a/core/store/src/opener.rs b/core/store/src/opener.rs index 40b71e2a07a..df22a1225dc 100644 --- a/core/store/src/opener.rs +++ b/core/store/src/opener.rs @@ -131,7 +131,7 @@ pub struct StoreOpener<'a> { hot: DBOpener<'a>, /// Opener for an instance of Cold RocksDB store if one was configured. - cold: Option>, + cold: Option>, /// What kind of database we should expect; if `None`, the kind of the /// database is not checked. @@ -166,14 +166,11 @@ impl<'a> StoreOpener<'a> { pub(crate) fn new( home_dir: &std::path::Path, config: &'a StoreConfig, - #[cfg(feature = "cold_store")] cold_config: super::ColdConfig<'a>, + cold_config: Option<&'a StoreConfig>, ) -> Self { Self { hot: DBOpener::new(home_dir, config, Temperature::Hot), - #[cfg(feature = "cold_store")] - cold: cold_config.map(|config| ColdDBOpener::new(home_dir, config, Temperature::Cold)), - #[cfg(not(feature = "cold_store"))] - cold: None, + cold: cold_config.map(|config| DBOpener::new(home_dir, config, Temperature::Cold)), expected_kind: None, migrator: None, } @@ -232,25 +229,22 @@ impl<'a> StoreOpener<'a> { if let Some(hot_meta) = hot_meta { if let Some(Some(cold_meta)) = cold_meta { - assert!(cfg!(feature = "cold_store")); // If cold database exists, hot and cold databases must have the - // same version and to be Hot and Cold kinds respectively. + // same version and to be Hot and Cold or Archive and Cold kinds respectively. if hot_meta.version != cold_meta.version { return Err(StoreOpenerError::HotColdVersionMismatch { hot_version: hot_meta.version, cold_version: cold_meta.version, }); } - #[cfg(feature = "cold_store")] - if hot_meta.kind != Some(DbKind::Hot) { + if !matches!(hot_meta.kind, Some(DbKind::Hot) | Some(DbKind::Archive)) { return Err(StoreOpenerError::DbKindMismatch { which: "Hot", got: hot_meta.kind, want: DbKind::Hot, }); } - #[cfg(feature = "cold_store")] - if cold_meta.kind != Some(DbKind::Cold) { + if !matches!(cold_meta.kind, Some(DbKind::Cold)) { return Err(StoreOpenerError::DbKindMismatch { which: "Cold", got: cold_meta.kind, @@ -260,7 +254,6 @@ impl<'a> StoreOpener<'a> { } else if cold_meta.is_some() { // If cold database is configured and hot database exists, // cold database must exist as well. - assert!(cfg!(feature = "cold_store")); return Err(StoreOpenerError::HotColdExistenceMismatch); } else if !matches!(hot_meta.kind, None | Some(DbKind::RPC | DbKind::Archive)) { // If cold database is not configured, hot database must be @@ -291,13 +284,9 @@ impl<'a> StoreOpener<'a> { tracing::info!(target: "near", path=%self.path().display(), "Opening an existing RocksDB database"); let (storage, hot_meta, cold_meta) = self.open_storage(mode, DB_VERSION)?; - if let Some(_cold_meta) = cold_meta { - assert!(cfg!(feature = "cold_store")); - // open_storage has verified this. - #[cfg(feature = "cold_store")] - assert_eq!(Some(DbKind::Hot), hot_meta.kind); - #[cfg(feature = "cold_store")] - assert_eq!(Some(DbKind::Cold), _cold_meta.kind); + if let Some(cold_meta) = cold_meta { + assert!(matches!(hot_meta.kind, Some(DbKind::Hot) | Some(DbKind::Archive))); + assert!(matches!(cold_meta.kind, Some(DbKind::Cold))); } else { self.ensure_kind(&storage, hot_meta)?; } @@ -427,17 +416,14 @@ impl<'a> StoreOpener<'a> { // Those are mostly sanity checks. If any of those conditions fails // than either there’s bug in code or someone does something weird on // the file system and tries to switch databases under us. - if let Some(_cold_meta) = cold_meta { - #[cfg(feature = "cold_store")] - if hot_meta.kind != Some(DbKind::Hot) { + if let Some(cold_meta) = cold_meta { + if !matches!(hot_meta.kind, Some(DbKind::Hot) | Some(DbKind::Archive)) { Err((hot_meta.kind, "Hot")) - } else if _cold_meta.kind != Some(DbKind::Cold) { - Err((_cold_meta.kind, "Cold")) + } else if !matches!(cold_meta.kind, Some(DbKind::Cold)) { + Err((cold_meta.kind, "Cold")) } else { Ok(()) } - #[cfg(not(feature = "cold_store"))] - Ok(()) } else if matches!(hot_meta.kind, None | Some(DbKind::RPC | DbKind::Archive)) { Ok(()) } else { @@ -539,43 +525,3 @@ pub trait StoreMigrator { /// equal to [`DB_VERSION`]. fn migrate(&self, storage: &NodeStorage, version: DbVersion) -> anyhow::Result<()>; } - -// This is only here to make conditional compilation simpler. Once cold_store -// feature is stabilised, get rid of it and use DBOpener directly. -use cold_db_opener::ColdDBOpener; - -#[cfg(feature = "cold_store")] -mod cold_db_opener { - pub(super) type ColdDBOpener<'a> = super::DBOpener<'a>; -} - -#[cfg(not(feature = "cold_store"))] -mod cold_db_opener { - use super::*; - - pub(super) enum OpenerImpl {} - - impl OpenerImpl { - pub(super) fn get_metadata(&self) -> std::io::Result> { - unreachable!() - } - - pub(super) fn open( - &self, - _mode: Mode, - _want_version: DbVersion, - ) -> std::io::Result<(std::convert::Infallible, DbMetadata)> { - unreachable!() - } - - pub(super) fn create(&self) -> std::io::Result { - unreachable!() - } - - pub(super) fn snapshot(&self) -> Result { - Ok(Snapshot::none()) - } - } - - pub(super) type ColdDBOpener<'a> = OpenerImpl; -} diff --git a/core/store/src/test_utils.rs b/core/store/src/test_utils.rs index cc4ae321dc3..eb5e7fd0450 100644 --- a/core/store/src/test_utils.rs +++ b/core/store/src/test_utils.rs @@ -4,7 +4,8 @@ use rand::seq::SliceRandom; use rand::Rng; use crate::db::TestDB; -use crate::{NodeStorage, ShardTries, Store}; +use crate::metadata::{DbKind, DbVersion, DB_VERSION}; +use crate::{NodeStorage, ShardTries, Store, Temperature}; use near_primitives::account::id::AccountId; use near_primitives::hash::CryptoHash; use near_primitives::receipt::{DataReceipt, Receipt, ReceiptEnum}; @@ -15,19 +16,42 @@ use std::str::from_utf8; /// Creates an in-memory node storage. /// /// In tests you’ll often want to use [`create_test_store`] instead. -pub fn create_test_node_storage() -> NodeStorage { - NodeStorage::new(TestDB::new()) +pub fn create_test_node_storage(version: DbVersion, hot_kind: DbKind) -> NodeStorage { + let storage = NodeStorage::new(TestDB::new()); + + storage.get_store(Temperature::Hot).set_db_version(version).unwrap(); + storage.get_store(Temperature::Hot).set_db_kind(hot_kind).unwrap(); + + storage +} + +/// Creates an in-memory node storage. +/// +/// In tests you’ll often want to use [`create_test_store`] instead. +/// It initializes the db version and db kind to sensible defaults - +/// the current version and rpc kind. +pub fn create_test_node_storage_default() -> NodeStorage { + create_test_node_storage(DB_VERSION, DbKind::RPC) } /// Creates an in-memory node storage with ColdDB -#[cfg(feature = "cold_store")] -pub fn create_test_node_storage_with_cold() -> NodeStorage { - NodeStorage::new_with_cold(TestDB::new(), TestDB::default()) +pub fn create_test_node_storage_with_cold( + version: DbVersion, + hot_kind: DbKind, +) -> NodeStorage { + let storage = NodeStorage::new_with_cold(TestDB::new(), TestDB::default()); + + storage.get_store(Temperature::Hot).set_db_version(version).unwrap(); + storage.get_store(Temperature::Hot).set_db_kind(hot_kind).unwrap(); + storage.get_store(Temperature::Cold).set_db_version(version).unwrap(); + storage.get_store(Temperature::Cold).set_db_kind(DbKind::Cold).unwrap(); + + storage } /// Creates an in-memory database. pub fn create_test_store() -> Store { - create_test_node_storage().get_store(crate::Temperature::Hot) + create_test_node_storage(DB_VERSION, DbKind::RPC).get_store(crate::Temperature::Hot) } /// Creates a Trie using an in-memory database. diff --git a/core/store/src/trie/mod.rs b/core/store/src/trie/mod.rs index abd2e5ef353..75596c3bbf0 100644 --- a/core/store/src/trie/mod.rs +++ b/core/store/src/trie/mod.rs @@ -322,8 +322,11 @@ impl std::fmt::Debug for TrieNode { #[derive(Debug, Eq, PartialEq)] #[allow(clippy::large_enum_variant)] pub enum RawTrieNode { + /// Leaf(key, value_length, value_hash) Leaf(Vec, u32, CryptoHash), + /// Branch(children, (value_length, value_hash)) Branch([Option; 16], Option<(u32, CryptoHash)>), + /// Extension(key, child) Extension(Vec, CryptoHash), } @@ -1112,13 +1115,14 @@ mod tests { fn test_encode_decode() { fn test(node: RawTrieNode, encoded: &[u8]) { let mut buf = Vec::new(); + let node = RawTrieNodeWithSize { node, memory_usage: 42 }; node.encode_into(&mut buf); assert_eq!(encoded, buf.as_slice()); - assert_eq!(node, RawTrieNode::decode(&buf).unwrap()); + assert_eq!(node, RawTrieNodeWithSize::decode(&buf).unwrap()); // Test that adding garbage at the end fails decoding. buf.push(b'!'); - let got = RawTrieNode::decode(&buf); + let got = RawTrieNodeWithSize::decode(&buf); assert!(got.is_err(), "got: {got:?}"); } @@ -1128,7 +1132,16 @@ mod tests { let encoded = [ 0, 3, 0, 0, 0, 1, 2, 3, 3, 0, 0, 0, 194, 40, 8, 24, 64, 219, 69, 132, 86, 52, 110, 175, 57, 198, 165, 200, 83, 237, 211, 11, 194, 83, 251, 33, 145, 138, 234, 226, 7, 242, 186, - 73, + 73, 42, 0, 0, 0, 0, 0, 0, 0, + ]; + test(node, &encoded); + + let mut children: [Option; 16] = Default::default(); + children[3] = Some(Trie::EMPTY_ROOT); + let node = RawTrieNode::Branch(children, None); + let encoded = [ + 1, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0, ]; test(node, &encoded); @@ -1139,13 +1152,14 @@ mod tests { 2, 3, 0, 0, 0, 194, 40, 8, 24, 64, 219, 69, 132, 86, 52, 110, 175, 57, 198, 165, 200, 83, 237, 211, 11, 194, 83, 251, 33, 145, 138, 234, 226, 7, 242, 186, 73, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 42, 0, 0, 0, 0, 0, 0, 0, ]; test(node, &encoded); let node = RawTrieNode::Extension(vec![123, 245, 255], Trie::EMPTY_ROOT); let encoded = [ 3, 3, 0, 0, 0, 123, 245, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0, ]; test(node, &encoded); } diff --git a/deny.toml b/deny.toml index 7581c21a25d..92c8d3f6e2d 100644 --- a/deny.toml +++ b/deny.toml @@ -17,11 +17,6 @@ deny = [ ] skip = [ - # See https://github.com/camshaft/bolero/issues/85 - { name = "crossbeam-channel", version = "=0.4.4" }, - { name = "crossbeam-utils", version = "=0.7.2" }, - { name = "strsim", version = "=0.8.0" }, - { name = "clap", version = "=2.34.0" }, # criterion uses clap=2.34.0 which relies on an older textwrap { name = "textwrap", version = "=0.11.0" }, @@ -41,8 +36,6 @@ skip = [ # near-vm-runner and wasmer-compiler-near use 0.78.2 { name = "wasmparser", version = "=0.78.2" }, - # Wasmer 2.0 uses both region 2.2.0 and 3.0.0 via dependencies - { name = "region", version = "=2.2.0" }, # Need this specific version of pwasm-utils for backwards-compatible # stack limiting. { name = "pwasm-utils", version = "=0.12.0" }, @@ -50,6 +43,7 @@ skip = [ # wasmer and wasmtime { name = "target-lexicon", version = "=0.10.0" }, + { name = "wasmparser", version = "=0.84.0" }, # chain and param estimator { name = "num-rational", version = "=0.3.2" }, @@ -60,6 +54,10 @@ skip = [ { name = "lock_api", version = "=0.3.4" }, { name = "digest", version = "=0.8.1" }, + # old version of tokio, parking_lot + { name = "windows-sys", version = "=0.36.1" }, + { name = "windows_x86_64_msvc", version = "=0.36.1" }, + # chrono uses old time crate { name = "time", version = "=0.1.44" }, @@ -100,4 +98,10 @@ skip = [ # redis we’re using uses ancient sha { name = "sha1", version = "=0.6.1" }, + + # rust-s3 is using an old version of smartstring + { name = "smartstring", version = "=0.2.10" }, + + # validator 0.12 ~ 0.16 is still using an old version of idna + { name = "idna", version = "=0.2.3" }, ] diff --git a/docs/architecture/README.md b/docs/architecture/README.md index 06a697064aa..8149612c99c 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -141,7 +141,7 @@ This crate contains most of the chain logic (consensus, block processing, etc). happens. **Architecture Invariant**: interface between chain and runtime is defined by -`RuntimeAdapter`. All invocations of runtime go through `RuntimeAdapter` +`RuntimeWithEpochManagerAdapter`. All invocations of runtime go through `RuntimeWithEpochManagerAdapter` **State update** @@ -244,8 +244,8 @@ contracts on NEAR. As mentioned before, `neard` is the crate that contains that main entry points. All the actors are spawned in `start_with_config`. It is also worth noting that -`NightshadeRuntime` is the struct that implements `RuntimeAdapter`. - +`NightshadeRuntime` is the struct that implements `RuntimeWithEpochManagerAdapter`. + ### `core/store/src/db.rs` diff --git a/docs/practices/testing/README.md b/docs/practices/testing/README.md index 532b0309379..5a221e3bf7f 100644 --- a/docs/practices/testing/README.md +++ b/docs/practices/testing/README.md @@ -59,7 +59,7 @@ available to test them. ## Client -Client is separated from the runtime via a `RuntimeAdapter` trait. In production +Client is separated from the runtime via a `RuntimeWithEpochManagerAdapter` trait. In production it uses `NightshadeRuntime` that uses real runtime and epoch managers. To test client without instantiating runtime and epoch manager, we have a mock runtime `KeyValueRuntime`. diff --git a/genesis-tools/genesis-populate/src/lib.rs b/genesis-tools/genesis-populate/src/lib.rs index 4de8a535600..a364c28e9e1 100644 --- a/genesis-tools/genesis-populate/src/lib.rs +++ b/genesis-tools/genesis-populate/src/lib.rs @@ -4,8 +4,8 @@ pub mod state_dump; use crate::state_dump::StateDump; use indicatif::{ProgressBar, ProgressStyle}; -use near_chain::types::BlockHeaderInfo; -use near_chain::{Block, Chain, ChainStore, RuntimeAdapter}; +use near_chain::types::{BlockHeaderInfo, RuntimeAdapter}; +use near_chain::{Block, Chain, ChainStore}; use near_chain_configs::Genesis; use near_crypto::{InMemorySigner, KeyType}; use near_primitives::account::{AccessKey, Account}; diff --git a/integration-tests/Cargo.toml b/integration-tests/Cargo.toml index 38fc129ad90..15884e0a4fc 100644 --- a/integration-tests/Cargo.toml +++ b/integration-tests/Cargo.toml @@ -92,6 +92,3 @@ sandbox = [ "near-client/sandbox", ] no_cache = ["nearcore/no_cache"] -cold_store = [ - "near-store/cold_store" -] diff --git a/integration-tests/src/tests/client/chunks_management.rs b/integration-tests/src/tests/client/chunks_management.rs index 7632ea7b837..90609b29282 100644 --- a/integration-tests/src/tests/client/chunks_management.rs +++ b/integration-tests/src/tests/client/chunks_management.rs @@ -362,18 +362,20 @@ fn chunks_produced_and_distributed_one_val_per_shard_should_succeed_even_without /// Note that due to #7385 (which sends chunk forwarding messages irrespective of shard assignment), /// we disable chunk forwarding messages for the following tests, so we can focus on chunk /// requesting behavior. -#[test] -#[cfg_attr(not(feature = "expensive_tests"), ignore)] -fn chunks_recovered_from_others() { - Test { - validator_groups: 2, - chunk_only_producers: false, - drop_to_4_from: &["test1"], - drop_all_chunk_forward_msgs: true, - block_timeout: 4 * CHUNK_REQUEST_SWITCH_TO_OTHERS_MS, - } - .run() -} +/// TODO: this test is broken due to (#8395) - with fix in #8211 + +//#[test] +//#[cfg_attr(not(feature = "expensive_tests"), ignore)] +//fn chunks_recovered_from_others() { +// Test { +// validator_groups: 2, +// chunk_only_producers: false, +// drop_to_4_from: &["test1"], +// drop_all_chunk_forward_msgs: true, +// block_timeout: 4 * CHUNK_REQUEST_SWITCH_TO_OTHERS_MS, +// } +// .run() +//} /// Same test as above, but the number of validator groups is four, therefore test2 doesn't have the /// part test4 needs. The only way test4 can recover the part is by reconstructing the whole chunk, diff --git a/integration-tests/src/tests/client/cold_storage.rs b/integration-tests/src/tests/client/cold_storage.rs index fbadf2f3704..e287c825c66 100644 --- a/integration-tests/src/tests/client/cold_storage.rs +++ b/integration-tests/src/tests/client/cold_storage.rs @@ -1,3 +1,4 @@ +use crate::tests::client::process_blocks::create_nightshade_runtime_with_store; use crate::tests::client::process_blocks::create_nightshade_runtimes; use borsh::BorshDeserialize; use near_chain::{ChainGenesis, Provenance}; @@ -13,8 +14,10 @@ use near_primitives::transaction::{ use near_store::cold_storage::{ test_cold_genesis_update, test_get_store_reads, update_cold_db, update_cold_head, }; +use near_store::metadata::DbKind; +use near_store::metadata::DB_VERSION; use near_store::test_utils::create_test_node_storage_with_cold; -use near_store::{DBCol, Store, Temperature, HEAD_KEY}; +use near_store::{DBCol, Store, Temperature, COLD_HEAD_KEY, HEAD_KEY}; use nearcore::config::GenesisExt; use strum::IntoEnumIterator; @@ -71,8 +74,7 @@ fn test_storage_after_commit_of_cold_update() { .runtime_adapters(create_nightshade_runtimes(&genesis, 1)) .build(); - // TODO construct cold_db with appropriate hot storage - let store = create_test_node_storage_with_cold(); + let store = create_test_node_storage_with_cold(DB_VERSION, DbKind::Hot); let mut last_hash = *env.clients[0].chain.genesis().hash(); @@ -188,7 +190,7 @@ fn test_storage_after_commit_of_cold_update() { } /// Producing 10 * 5 blocks and updating HEAD of cold storage after each one. -/// After every update checking that HEAD of cold db and FINAL_HEAD of hot store are equal. +/// After every update checking that HEAD in cold db, COLD_HEAD in hot db and HEAD in hot store are equal. #[test] fn test_cold_db_head_update() { init_test_logger(); @@ -201,24 +203,26 @@ fn test_cold_db_head_update() { genesis.config.epoch_length = epoch_length; let mut chain_genesis = ChainGenesis::test(); chain_genesis.epoch_length = epoch_length; - let mut env = TestEnv::builder(chain_genesis) - .runtime_adapters(create_nightshade_runtimes(&genesis, 1)) - .build(); - - let store = create_test_node_storage_with_cold(); + let store = create_test_node_storage_with_cold(DB_VERSION, DbKind::Hot); + let hot_store = &store.get_store(Temperature::Hot); + let cold_store = &store.get_store(Temperature::Cold); + let runtime_adapter = create_nightshade_runtime_with_store(&genesis, &hot_store); + let mut env = TestEnv::builder(chain_genesis).runtime_adapters(vec![runtime_adapter]).build(); for h in 1..max_height { env.produce_block(0, h); update_cold_head(&*store.cold_db().unwrap(), &env.clients[0].runtime_adapter.store(), &h) .unwrap(); - assert_eq!( - &store.get_store(Temperature::Cold).get_ser::(DBCol::BlockMisc, HEAD_KEY).unwrap(), - &env.clients[0] - .runtime_adapter - .store() - .get_ser::(DBCol::BlockMisc, HEAD_KEY) - .unwrap() - ); + let head = &env.clients[0] + .runtime_adapter + .store() + .get_ser::(DBCol::BlockMisc, HEAD_KEY) + .unwrap(); + let cold_head_in_hot = hot_store.get_ser::(DBCol::BlockMisc, COLD_HEAD_KEY).unwrap(); + let cold_head_in_cold = cold_store.get_ser::(DBCol::BlockMisc, HEAD_KEY).unwrap(); + + assert_eq!(head, &cold_head_in_cold); + assert_eq!(head, &cold_head_in_hot); } } diff --git a/integration-tests/src/tests/client/features/access_key_nonce_for_implicit_accounts.rs b/integration-tests/src/tests/client/features/access_key_nonce_for_implicit_accounts.rs index f9070223eec..6a86b445328 100644 --- a/integration-tests/src/tests/client/features/access_key_nonce_for_implicit_accounts.rs +++ b/integration-tests/src/tests/client/features/access_key_nonce_for_implicit_accounts.rs @@ -3,7 +3,7 @@ use crate::tests::client::process_blocks::{ }; use assert_matches::assert_matches; use near_chain::chain::NUM_ORPHAN_ANCESTORS_CHECK; -use near_chain::{ChainGenesis, Error, Provenance, RuntimeAdapter}; +use near_chain::{ChainGenesis, Error, Provenance, RuntimeWithEpochManagerAdapter}; use near_chain_configs::Genesis; use near_client::test_utils::{create_chunk_with_transactions, TestEnv}; use near_client::ProcessTxResponse; @@ -317,7 +317,7 @@ fn test_request_chunks_for_orphan() { genesis.config.num_block_producer_seats_per_shard = vec![num_validators, num_validators, num_validators, num_validators]; let chain_genesis = ChainGenesis::new(&genesis); - let runtimes: Vec> = (0..2) + let runtimes: Vec> = (0..2) .map(|_| { Arc::new(nearcore::NightshadeRuntime::test_with_runtime_config_store( Path::new("."), @@ -325,7 +325,7 @@ fn test_request_chunks_for_orphan() { &genesis, TrackedConfig::AllShards, RuntimeConfigStore::test(), - )) as Arc + )) as Arc }) .collect(); let mut env = TestEnv::builder(chain_genesis) @@ -464,7 +464,7 @@ fn test_processing_chunks_sanity() { genesis.config.num_block_producer_seats_per_shard = vec![num_validators, num_validators, num_validators, num_validators]; let chain_genesis = ChainGenesis::new(&genesis); - let runtimes: Vec> = (0..2) + let runtimes: Vec> = (0..2) .map(|_| { Arc::new(nearcore::NightshadeRuntime::test_with_runtime_config_store( Path::new("."), @@ -472,7 +472,7 @@ fn test_processing_chunks_sanity() { &genesis, TrackedConfig::AllShards, RuntimeConfigStore::test(), - )) as Arc + )) as Arc }) .collect(); let mut env = TestEnv::builder(chain_genesis) @@ -574,7 +574,7 @@ impl ChunkForwardingOptimizationTestData { config.num_block_producer_seats = num_block_producers as u64; } let chain_genesis = ChainGenesis::new(&genesis); - let runtimes: Vec> = (0..num_clients) + let runtimes: Vec> = (0..num_clients) .map(|_| { Arc::new(nearcore::NightshadeRuntime::test_with_runtime_config_store( Path::new("."), @@ -582,7 +582,7 @@ impl ChunkForwardingOptimizationTestData { &genesis, TrackedConfig::AllShards, RuntimeConfigStore::test(), - )) as Arc + )) as Arc }) .collect(); let env = TestEnv::builder(chain_genesis) @@ -813,7 +813,7 @@ fn test_processing_blocks_async() { genesis.config.num_block_producer_seats_per_shard = vec![num_validators, num_validators, num_validators, num_validators]; let chain_genesis = ChainGenesis::new(&genesis); - let runtimes: Vec> = (0..2) + let runtimes: Vec> = (0..2) .map(|_| { Arc::new(nearcore::NightshadeRuntime::test_with_runtime_config_store( Path::new("."), @@ -821,7 +821,7 @@ fn test_processing_blocks_async() { &genesis, TrackedConfig::AllShards, RuntimeConfigStore::test(), - )) as Arc + )) as Arc }) .collect(); let mut env = TestEnv::builder(chain_genesis) diff --git a/integration-tests/src/tests/client/features/account_id_in_function_call_permission.rs b/integration-tests/src/tests/client/features/account_id_in_function_call_permission.rs index 7af8b9546fa..ff6fbf8cdd6 100644 --- a/integration-tests/src/tests/client/features/account_id_in_function_call_permission.rs +++ b/integration-tests/src/tests/client/features/account_id_in_function_call_permission.rs @@ -1,4 +1,4 @@ -use near_chain::{ChainGenesis, RuntimeAdapter}; +use near_chain::{ChainGenesis, RuntimeWithEpochManagerAdapter}; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; use near_client::ProcessTxResponse; @@ -39,7 +39,7 @@ fn test_account_id_in_function_call_permission_upgrade() { TrackedConfig::new_empty(), RuntimeConfigStore::new(None), ), - ) as Arc]) + ) as Arc]) .build() }; @@ -106,7 +106,7 @@ fn test_very_long_account_id() { TrackedConfig::new_empty(), RuntimeConfigStore::new(None), ), - ) as Arc]) + ) as Arc]) .build() }; diff --git a/integration-tests/src/tests/client/features/adversarial_behaviors.rs b/integration-tests/src/tests/client/features/adversarial_behaviors.rs index 9b9cf58656e..91f4aa323fe 100644 --- a/integration-tests/src/tests/client/features/adversarial_behaviors.rs +++ b/integration-tests/src/tests/client/features/adversarial_behaviors.rs @@ -1,6 +1,6 @@ use std::{collections::HashSet, path::Path, sync::Arc}; -use near_chain::{ChainGenesis, Provenance, RuntimeAdapter}; +use near_chain::{ChainGenesis, Provenance, RuntimeWithEpochManagerAdapter}; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; use near_network::types::{NetworkRequests, PeerManagerMessageRequest}; @@ -48,7 +48,7 @@ impl AdversarialBehaviorTestData { config.chunk_producer_kickout_threshold = 50; } let chain_genesis = ChainGenesis::new(&genesis); - let runtimes: Vec> = (0..num_clients) + let runtimes: Vec> = (0..num_clients) .map(|_| { Arc::new(nearcore::NightshadeRuntime::test_with_runtime_config_store( Path::new("."), @@ -56,7 +56,7 @@ impl AdversarialBehaviorTestData { &genesis, TrackedConfig::AllShards, RuntimeConfigStore::test(), - )) as Arc + )) as Arc }) .collect(); let env = TestEnv::builder(chain_genesis) diff --git a/integration-tests/src/tests/client/features/chunk_nodes_cache.rs b/integration-tests/src/tests/client/features/chunk_nodes_cache.rs index b3ddd536019..bf0f2daf464 100644 --- a/integration-tests/src/tests/client/features/chunk_nodes_cache.rs +++ b/integration-tests/src/tests/client/features/chunk_nodes_cache.rs @@ -1,6 +1,6 @@ use crate::tests::client::process_blocks::{deploy_test_contract, set_block_protocol_version}; use assert_matches::assert_matches; -use near_chain::{ChainGenesis, Provenance, RuntimeAdapter}; +use near_chain::{ChainGenesis, Provenance, RuntimeWithEpochManagerAdapter}; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; use near_crypto::{InMemorySigner, KeyType, Signer}; @@ -93,7 +93,7 @@ fn compare_node_counts() { genesis.config.epoch_length = epoch_length; genesis.config.protocol_version = old_protocol_version; let chain_genesis = ChainGenesis::new(&genesis); - let runtimes: Vec> = + let runtimes: Vec> = vec![Arc::new(nearcore::NightshadeRuntime::test_with_runtime_config_store( Path::new("../../../.."), create_test_store(), diff --git a/integration-tests/src/tests/client/features/increase_deployment_cost.rs b/integration-tests/src/tests/client/features/increase_deployment_cost.rs index ab17ff7fc93..a7c47d03a3c 100644 --- a/integration-tests/src/tests/client/features/increase_deployment_cost.rs +++ b/integration-tests/src/tests/client/features/increase_deployment_cost.rs @@ -1,5 +1,5 @@ use assert_matches::assert_matches; -use near_chain::{ChainGenesis, RuntimeAdapter}; +use near_chain::{ChainGenesis, RuntimeWithEpochManagerAdapter}; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; use near_crypto::{InMemorySigner, KeyType}; @@ -32,7 +32,7 @@ fn test_deploy_cost_increased() { genesis.config.epoch_length = epoch_length; genesis.config.protocol_version = old_protocol_version; let chain_genesis = ChainGenesis::new(&genesis); - let runtimes: Vec> = + let runtimes: Vec> = vec![Arc::new(nearcore::NightshadeRuntime::test_with_runtime_config_store( Path::new("../../../.."), create_test_store(), diff --git a/integration-tests/src/tests/client/features/limit_contract_functions_number.rs b/integration-tests/src/tests/client/features/limit_contract_functions_number.rs index de6a1a56f3b..5cc7f9566ce 100644 --- a/integration-tests/src/tests/client/features/limit_contract_functions_number.rs +++ b/integration-tests/src/tests/client/features/limit_contract_functions_number.rs @@ -1,6 +1,6 @@ use crate::tests::client::process_blocks::deploy_test_contract; use assert_matches::assert_matches; -use near_chain::{ChainGenesis, RuntimeAdapter}; +use near_chain::{ChainGenesis, RuntimeWithEpochManagerAdapter}; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; use near_primitives::errors::{ActionErrorKind, TxExecutionError}; @@ -31,7 +31,7 @@ fn verify_contract_limits_upgrade( genesis.config.epoch_length = epoch_length; genesis.config.protocol_version = old_protocol_version; let chain_genesis = ChainGenesis::new(&genesis); - let runtimes: Vec> = + let runtimes: Vec> = vec![Arc::new(nearcore::NightshadeRuntime::test_with_runtime_config_store( Path::new("../../../.."), create_test_store(), diff --git a/integration-tests/src/tests/client/features/lower_storage_key_limit.rs b/integration-tests/src/tests/client/features/lower_storage_key_limit.rs index be72f1d5318..1c120d52267 100644 --- a/integration-tests/src/tests/client/features/lower_storage_key_limit.rs +++ b/integration-tests/src/tests/client/features/lower_storage_key_limit.rs @@ -1,5 +1,5 @@ use assert_matches::assert_matches; -use near_chain::{ChainGenesis, Provenance, RuntimeAdapter}; +use near_chain::{ChainGenesis, Provenance, RuntimeWithEpochManagerAdapter}; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; use near_crypto::{InMemorySigner, KeyType, Signer}; @@ -42,14 +42,14 @@ fn protocol_upgrade() { genesis.config.epoch_length = epoch_length; genesis.config.protocol_version = old_protocol_version; let chain_genesis = ChainGenesis::new(&genesis); - let runtimes: Vec> = + let runtimes: Vec> = vec![Arc::new(nearcore::NightshadeRuntime::test_with_runtime_config_store( Path::new("."), create_test_store(), &genesis, TrackedConfig::AllShards, RuntimeConfigStore::new(None), - )) as Arc]; + )) as Arc]; let mut env = TestEnv::builder(chain_genesis).runtime_adapters(runtimes).build(); deploy_test_contract_with_protocol_version( diff --git a/integration-tests/src/tests/client/features/restore_receipts_after_fix_apply_chunks.rs b/integration-tests/src/tests/client/features/restore_receipts_after_fix_apply_chunks.rs index 474fc8f65a6..4d7c7763e65 100644 --- a/integration-tests/src/tests/client/features/restore_receipts_after_fix_apply_chunks.rs +++ b/integration-tests/src/tests/client/features/restore_receipts_after_fix_apply_chunks.rs @@ -1,5 +1,5 @@ use crate::tests::client::process_blocks::set_block_protocol_version; -use near_chain::{ChainGenesis, Provenance, RuntimeAdapter}; +use near_chain::{ChainGenesis, Provenance, RuntimeWithEpochManagerAdapter}; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; use near_o11y::testonly::init_test_logger; @@ -38,7 +38,7 @@ fn run_test( let migration_data = load_migration_data(&genesis.config.chain_id); let mut env = TestEnv::builder(chain_genesis) - .runtime_adapters(vec![Arc::new(runtime) as Arc]) + .runtime_adapters(vec![Arc::new(runtime) as Arc]) .build(); let get_restored_receipt_hashes = |migration_data: &MigrationData| -> HashSet { diff --git a/integration-tests/src/tests/client/features/wasmer2.rs b/integration-tests/src/tests/client/features/wasmer2.rs index 33efef68be3..935a4267994 100644 --- a/integration-tests/src/tests/client/features/wasmer2.rs +++ b/integration-tests/src/tests/client/features/wasmer2.rs @@ -7,6 +7,8 @@ use near_primitives::hash::CryptoHash; use near_primitives::transaction::{Action, FunctionCallAction, Transaction}; use nearcore::config::GenesisExt; +// This test fails on aarch because wasmer0 and wasmer2 are not available. +#[cfg_attr(all(target_arch = "aarch64", target_vendor = "apple"), ignore)] #[test] fn test_wasmer2_upgrade() { let mut capture = near_o11y::testonly::TracingCapture::enable(); diff --git a/integration-tests/src/tests/client/flat_storage.rs b/integration-tests/src/tests/client/flat_storage.rs index d1e5d176eb4..a63151733cc 100644 --- a/integration-tests/src/tests/client/flat_storage.rs +++ b/integration-tests/src/tests/client/flat_storage.rs @@ -1,94 +1,156 @@ +/// Tests which check correctness of background flat storage creation. use assert_matches::assert_matches; -use near_chain::{ChainGenesis, RuntimeAdapter}; +use near_chain::{ChainGenesis, RuntimeWithEpochManagerAdapter}; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; use near_o11y::testonly::init_test_logger; -use near_primitives_core::types::BlockHeight; +use near_primitives::shard_layout::{ShardLayout, ShardUId}; +use near_primitives::types::AccountId; +use near_primitives_core::types::{BlockHeight, NumShards}; use near_store::flat_state::{ - store_helper, FetchingStateStatus, FlatStorageStateStatus, NUM_PARTS_IN_ONE_STEP, + store_helper, FetchingStateStatus, FlatStorageCreationStatus, NUM_PARTS_IN_ONE_STEP, }; use near_store::test_utils::create_test_store; +#[cfg(feature = "protocol_feature_flat_state")] +use near_store::DBCol; +use near_store::{Store, TrieTraversalItem}; use nearcore::config::GenesisExt; use std::path::Path; +use std::str::FromStr; use std::sync::Arc; use std::thread; use std::time::Duration; +/// Height on which we start flat storage background creation. +const START_HEIGHT: BlockHeight = 4; + +/// Number of steps which should be enough to create flat storage. +const CREATION_TIMEOUT: BlockHeight = 30; + +/// Setup environment with one Near client for testing. +fn setup_env(genesis: &Genesis, store: Store) -> TestEnv { + let chain_genesis = ChainGenesis::new(genesis); + let runtimes: Vec> = + vec![Arc::new(nearcore::NightshadeRuntime::test(Path::new("../../../.."), store, genesis))]; + TestEnv::builder(chain_genesis.clone()).runtime_adapters(runtimes).build() +} + +/// Waits for flat storage creation on shard 0 for `CREATION_TIMEOUT` blocks. +/// We have a pause after processing each block because state data is being fetched in rayon threads, +/// but we expect it to finish in <30s because state is small and there is only one state part. +/// Returns next block height available to produce. +fn wait_for_flat_storage_creation(env: &mut TestEnv, start_height: BlockHeight) -> BlockHeight { + let store = env.clients[0].runtime_adapter.store().clone(); + let mut next_height = start_height; + let mut prev_status = store_helper::get_flat_storage_creation_status(&store, 0); + while next_height < start_height + CREATION_TIMEOUT { + env.produce_block(0, next_height); + env.clients[0].run_flat_storage_creation_step().unwrap(); + + let status = store_helper::get_flat_storage_creation_status(&store, 0); + // Check validity of state transition for flat storage creation. + match &prev_status { + FlatStorageCreationStatus::SavingDeltas => assert_matches!( + status, + FlatStorageCreationStatus::SavingDeltas + | FlatStorageCreationStatus::FetchingState(_) + ), + FlatStorageCreationStatus::FetchingState(_) => assert_matches!( + status, + FlatStorageCreationStatus::FetchingState(_) | FlatStorageCreationStatus::CatchingUp + ), + FlatStorageCreationStatus::CatchingUp => assert_matches!( + status, + FlatStorageCreationStatus::CatchingUp | FlatStorageCreationStatus::Ready + ), + _ => { + panic!("Invalid status {prev_status:?} observed during flat storage creation for height {next_height}"); + } + } + + prev_status = status; + next_height += 1; + if prev_status == FlatStorageCreationStatus::Ready { + break; + } + + thread::sleep(Duration::from_secs(1)); + } + let status = store_helper::get_flat_storage_creation_status(&store, 0); + assert_eq!( + status, + FlatStorageCreationStatus::Ready, + "Client couldn't create flat storage until block {next_height}, status: {status:?}" + ); + assert!(env.clients[0].runtime_adapter.get_flat_storage_state_for_shard(0).is_some()); + next_height +} + /// Check correctness of flat storage creation. #[test] fn test_flat_storage_creation() { init_test_logger(); let genesis = Genesis::test(vec!["test0".parse().unwrap()], 1); - let chain_genesis = ChainGenesis::new(&genesis); let store = create_test_store(); - // Process some blocks with flat storage. + // Process some blocks with flat storage. Then remove flat storage data from disk. { - let runtimes: Vec> = vec![Arc::new( - nearcore::NightshadeRuntime::test(Path::new("../../../.."), store.clone(), &genesis), - )]; - let mut env = - TestEnv::builder(chain_genesis.clone()).runtime_adapters(runtimes.clone()).build(); - for i in 1..4 { - env.produce_block(0, i); + let mut env = setup_env(&genesis, store.clone()); + for height in 1..START_HEIGHT { + env.produce_block(0, height); } if cfg!(feature = "protocol_feature_flat_state") { // If chain was initialized from scratch, flat storage state should be created. During block processing, flat - // storage head should be moved to block 1. + // storage head should be moved to block `START_HEIGHT - 3`. assert_eq!( - store_helper::get_flat_storage_state_status(&store, 0), - FlatStorageStateStatus::Ready + store_helper::get_flat_storage_creation_status(&store, 0), + FlatStorageCreationStatus::Ready ); let expected_flat_storage_head = - env.clients[0].chain.get_block_hash_by_height(1).unwrap(); + env.clients[0].chain.get_block_hash_by_height(START_HEIGHT - 3).unwrap(); assert_eq!(store_helper::get_flat_head(&store, 0), Some(expected_flat_storage_head)); - // Deltas for blocks 0 and 1 should not exist. - for i in 0..2 { - let block_hash = env.clients[0].chain.get_block_hash_by_height(i).unwrap(); + // Deltas for blocks until `START_HEIGHT - 2` should not exist. + for height in 0..START_HEIGHT - 2 { + let block_hash = env.clients[0].chain.get_block_hash_by_height(height).unwrap(); assert_eq!(store_helper::get_delta(&store, 0, block_hash), Ok(None)); } - // Deltas for blocks 2 and 3 should still exist, because they come after flat storage head. - for i in 2..4 { - let block_hash = env.clients[0].chain.get_block_hash_by_height(i).unwrap(); + // Deltas for blocks until `START_HEIGHT` should still exist, + // because they come after flat storage head. + for height in START_HEIGHT - 2..START_HEIGHT { + let block_hash = env.clients[0].chain.get_block_hash_by_height(height).unwrap(); assert_matches!(store_helper::get_delta(&store, 0, block_hash), Ok(Some(_))); } } else { assert_eq!( - store_helper::get_flat_storage_state_status(&store, 0), - FlatStorageStateStatus::DontCreate + store_helper::get_flat_storage_creation_status(&store, 0), + FlatStorageCreationStatus::DontCreate ); assert_eq!(store_helper::get_flat_head(&store, 0), None); } - } - // Remove flat storage head using low-level disk operation. Flat storage is implemented in such way that its - // existence is determined by existence of flat storage head. - #[cfg(feature = "protocol_feature_flat_state")] - { - let mut store_update = store.store_update(); - store_helper::remove_flat_head(&mut store_update, 0); - store_update.commit().unwrap(); + let block_hash = env.clients[0].chain.get_block_hash_by_height(START_HEIGHT - 1).unwrap(); + let epoch_id = env.clients[0].chain.runtime_adapter.get_epoch_id(&block_hash).unwrap(); + env.clients[0] + .chain + .runtime_adapter + .remove_flat_storage_state_for_shard(0, &epoch_id) + .unwrap(); } // Create new chain and runtime using the same store. It should produce next blocks normally, but now it should // think that flat storage does not exist and background creation should be initiated. - let runtimes: Vec> = vec![Arc::new(nearcore::NightshadeRuntime::test( - Path::new("../../../.."), - store.clone(), - &genesis, - ))]; - let mut env = TestEnv::builder(chain_genesis).runtime_adapters(runtimes.clone()).build(); - for i in 4..6 { - env.produce_block(0, i); + let mut env = setup_env(&genesis, store.clone()); + for height in START_HEIGHT..START_HEIGHT + 2 { + env.produce_block(0, height); } assert!(env.clients[0].runtime_adapter.get_flat_storage_state_for_shard(0).is_none()); if !cfg!(feature = "protocol_feature_flat_state") { assert_eq!( - store_helper::get_flat_storage_state_status(&store, 0), - FlatStorageStateStatus::DontCreate + store_helper::get_flat_storage_creation_status(&store, 0), + FlatStorageCreationStatus::DontCreate ); assert_eq!(store_helper::get_flat_head(&store, 0), None); // Stop the test here. @@ -98,71 +160,198 @@ fn test_flat_storage_creation() { // At first, flat storage state should start saving deltas. Deltas for all newly processed blocks should be saved to // disk. assert_eq!( - store_helper::get_flat_storage_state_status(&store, 0), - FlatStorageStateStatus::SavingDeltas + store_helper::get_flat_storage_creation_status(&store, 0), + FlatStorageCreationStatus::SavingDeltas ); - for i in 4..6 { - let block_hash = env.clients[0].chain.get_block_hash_by_height(i).unwrap(); + for height in START_HEIGHT..START_HEIGHT + 2 { + let block_hash = env.clients[0].chain.get_block_hash_by_height(height).unwrap(); assert_matches!(store_helper::get_delta(&store, 0, block_hash), Ok(Some(_))); } // Produce new block and run flat storage creation step. - // We started the node from height 3, and now final head should move to height 4. + // We started the node from height `START_HEIGHT - 1`, and now final head should move to height `START_HEIGHT`. // Because final head height became greater than height on which node started, // we must start fetching the state. - env.produce_block(0, 6); + env.produce_block(0, START_HEIGHT + 2); assert!(!env.clients[0].run_flat_storage_creation_step().unwrap()); - let final_block_hash = env.clients[0].chain.get_block_hash_by_height(4).unwrap(); + let final_block_hash = env.clients[0].chain.get_block_hash_by_height(START_HEIGHT).unwrap(); assert_eq!(store_helper::get_flat_head(&store, 0), Some(final_block_hash)); assert_eq!( - store_helper::get_flat_storage_state_status(&store, 0), - FlatStorageStateStatus::FetchingState(FetchingStateStatus { + store_helper::get_flat_storage_creation_status(&store, 0), + FlatStorageCreationStatus::FetchingState(FetchingStateStatus { part_id: 0, num_parts_in_step: NUM_PARTS_IN_ONE_STEP, num_parts: 1, }) ); - // Run chain for a couple of blocks and check that statuses switch to `CatchingUp` and then to `Ready`. - // State is being fetched in rayon threads, but we expect it to finish in <30s because state is small and there is - // only one state part. - const BLOCKS_TIMEOUT: BlockHeight = 30; - let start_height = 8; - let mut next_height = start_height; - let mut was_catching_up = false; - while next_height < start_height + BLOCKS_TIMEOUT { - env.produce_block(0, next_height); - env.clients[0].run_flat_storage_creation_step().unwrap(); - next_height += 1; - match store_helper::get_flat_storage_state_status(&store, 0) { - FlatStorageStateStatus::FetchingState(..) => { - assert!(!was_catching_up, "Flat storage state status inconsistency: it was catching up before fetching state"); - } - FlatStorageStateStatus::CatchingUp => { - was_catching_up = true; - } - FlatStorageStateStatus::Ready => { - assert!( - was_catching_up, - "Flat storage state is ready but there was no flat storage catchup observed" + wait_for_flat_storage_creation(&mut env, START_HEIGHT + 3); +} + +/// Check that client can create flat storage on some shard while it already exists on another shard. +#[test] +fn test_flat_storage_creation_two_shards() { + init_test_logger(); + let num_shards: NumShards = 2; + let genesis = Genesis::test_sharded_new_version( + vec!["test0".parse().unwrap()], + 1, + vec![1; num_shards as usize], + ); + let store = create_test_store(); + + // Process some blocks with flat storages for two shards. Then remove flat storage data from disk for shard 0. + { + let mut env = setup_env(&genesis, store.clone()); + for height in 1..START_HEIGHT { + env.produce_block(0, height); + } + + for shard_id in 0..num_shards { + if cfg!(feature = "protocol_feature_flat_state") { + assert_eq!( + store_helper::get_flat_storage_creation_status(&store, shard_id), + FlatStorageCreationStatus::Ready ); - break; - } - status @ _ => { - panic!( - "Unexpected flat storage state status for height {next_height}: {:?}", - status + } else { + assert_eq!( + store_helper::get_flat_storage_creation_status(&store, shard_id), + FlatStorageCreationStatus::DontCreate ); } } - thread::sleep(Duration::from_secs(1)); + + let block_hash = env.clients[0].chain.get_block_hash_by_height(START_HEIGHT - 1).unwrap(); + let epoch_id = env.clients[0].chain.runtime_adapter.get_epoch_id(&block_hash).unwrap(); + env.clients[0] + .chain + .runtime_adapter + .remove_flat_storage_state_for_shard(0, &epoch_id) + .unwrap(); } - if next_height == start_height + BLOCKS_TIMEOUT { - let status = store_helper::get_flat_storage_state_status(&store, 0); - panic!("Apparently, node didn't fetch the whole state in {BLOCKS_TIMEOUT} blocks. Current status: {:?}", status); + + if !cfg!(feature = "protocol_feature_flat_state") { + return; } - // Finally, check that flat storage state was created. - assert!(env.clients[0].run_flat_storage_creation_step().unwrap()); - assert!(env.clients[0].runtime_adapter.get_flat_storage_state_for_shard(0).is_some()); + // Check that flat storage is not ready for shard 0 but ready for shard 1. + let mut env = setup_env(&genesis, store.clone()); + assert!(env.clients[0].runtime_adapter.get_flat_storage_state_for_shard(0).is_none()); + assert_eq!( + store_helper::get_flat_storage_creation_status(&store, 0), + FlatStorageCreationStatus::SavingDeltas + ); + assert!(env.clients[0].runtime_adapter.get_flat_storage_state_for_shard(1).is_some()); + assert_eq!( + store_helper::get_flat_storage_creation_status(&store, 1), + FlatStorageCreationStatus::Ready + ); + + wait_for_flat_storage_creation(&mut env, START_HEIGHT); +} + +/// Check that flat storage creation can be started from intermediate state where one +/// of state parts is already fetched. +#[test] +fn test_flat_storage_creation_start_from_state_part() { + init_test_logger(); + // Create several accounts to ensure that state is non-trivial. + let accounts = + (0..4).map(|i| AccountId::from_str(&format!("test{}", i)).unwrap()).collect::>(); + let genesis = Genesis::test(accounts, 1); + let store = create_test_store(); + let shard_layout = ShardLayout::v0_single_shard(); + + // Process some blocks with flat storage. + // Split state into two parts and return trie keys corresponding to each part. + const NUM_PARTS: u64 = 2; + let trie_keys: Vec<_> = { + let mut env = setup_env(&genesis, store.clone()); + for height in 1..START_HEIGHT { + env.produce_block(0, height); + } + + if cfg!(feature = "protocol_feature_flat_state") { + assert_eq!( + store_helper::get_flat_storage_creation_status(&store, 0), + FlatStorageCreationStatus::Ready + ); + } else { + assert_eq!( + store_helper::get_flat_storage_creation_status(&store, 0), + FlatStorageCreationStatus::DontCreate + ); + return; + } + + let block_hash = env.clients[0].chain.get_block_hash_by_height(START_HEIGHT - 1).unwrap(); + let state_root = *env.clients[0] + .chain + .get_chunk_extra(&block_hash, &ShardUId::from_shard_id_and_layout(0, &shard_layout)) + .unwrap() + .state_root(); + let trie = env.clients[0] + .chain + .runtime_adapter + .get_trie_for_shard(0, &block_hash, state_root, true) + .unwrap(); + (0..NUM_PARTS) + .map(|part_id| { + let path_begin = trie.find_path_for_part_boundary(part_id, NUM_PARTS).unwrap(); + let path_end = trie.find_path_for_part_boundary(part_id + 1, NUM_PARTS).unwrap(); + let mut trie_iter = trie.iter().unwrap(); + let mut keys = vec![]; + for item in trie_iter.visit_nodes_interval(&path_begin, &path_end).unwrap() { + if let TrieTraversalItem { key: Some(trie_key), .. } = item { + keys.push(trie_key); + } + } + keys + }) + .collect() + }; + assert!(!trie_keys[0].is_empty()); + assert!(!trie_keys[1].is_empty()); + + #[cfg(feature = "protocol_feature_flat_state")] + { + // Remove keys of part 1 from the flat state. + // Manually set flat storage creation status to the step when it should start from fetching part 1. + let mut store_update = store.store_update(); + for key in trie_keys[1].iter() { + store_update.delete(DBCol::FlatState, key); + } + store_helper::set_fetching_state_status( + &mut store_update, + 0, + FetchingStateStatus { part_id: 1, num_parts_in_step: 1, num_parts: NUM_PARTS }, + ); + store_update.commit().unwrap(); + + // Re-create runtime, check that flat storage is not created yet. + let mut env = setup_env(&genesis, store.clone()); + assert!(env.clients[0].runtime_adapter.get_flat_storage_state_for_shard(0).is_none()); + + // Run chain for a couple of blocks and check that flat storage for shard 0 is eventually created. + let next_height = wait_for_flat_storage_creation(&mut env, START_HEIGHT); + + // Check that all the keys are present in flat storage. + let block_hash = env.clients[0].chain.get_block_hash_by_height(next_height - 1).unwrap(); + let state_root = *env.clients[0] + .chain + .get_chunk_extra(&block_hash, &ShardUId::from_shard_id_and_layout(0, &shard_layout)) + .unwrap() + .state_root(); + let trie = env.clients[0] + .chain + .runtime_adapter + .get_trie_for_shard(0, &block_hash, state_root, true) + .unwrap(); + let flat_state = trie.flat_state.unwrap(); + for part_trie_keys in trie_keys.iter() { + for trie_key in part_trie_keys.iter() { + assert_matches!(flat_state.get_ref(trie_key), Ok(Some(_))); + } + } + } } diff --git a/integration-tests/src/tests/client/mod.rs b/integration-tests/src/tests/client/mod.rs index 5bf997639d0..a4732b73829 100644 --- a/integration-tests/src/tests/client/mod.rs +++ b/integration-tests/src/tests/client/mod.rs @@ -1,7 +1,6 @@ mod benchmarks; mod challenges; mod chunks_management; -#[cfg(feature = "cold_store")] mod cold_storage; mod features; mod flat_storage; diff --git a/integration-tests/src/tests/client/process_blocks.rs b/integration-tests/src/tests/client/process_blocks.rs index 4b62ce03fbb..06c8250a2ec 100644 --- a/integration-tests/src/tests/client/process_blocks.rs +++ b/integration-tests/src/tests/client/process_blocks.rs @@ -18,7 +18,7 @@ use near_chain::types::LatestKnown; use near_chain::validate::validate_chunk_with_chunk_extra; use near_chain::{ Block, BlockProcessingArtifact, ChainGenesis, ChainStore, ChainStoreAccess, Error, Provenance, - RuntimeAdapter, + RuntimeWithEpochManagerAdapter, }; use near_chain_configs::{ClientConfig, Genesis, DEFAULT_GC_NUM_EPOCHS_TO_KEEP}; use near_chunks::{ChunkStatus, ShardsManager}; @@ -71,8 +71,14 @@ use near_primitives::version::PROTOCOL_VERSION; use near_primitives::views::{ BlockHeaderView, FinalExecutionStatus, QueryRequest, QueryResponseKind, }; +use near_store::cold_storage::{update_cold_db, update_cold_head}; +use near_store::db::TestDB; +use near_store::metadata::DbKind; +use near_store::metadata::DB_VERSION; +use near_store::test_utils::create_test_node_storage_with_cold; use near_store::test_utils::create_test_store; -use near_store::{get, DBCol, TrieChanges}; +use near_store::{get, DBCol, Store, TrieChanges}; +use near_store::{NodeStorage, Temperature}; use nearcore::config::{GenesisExt, TESTING_INIT_BALANCE, TESTING_INIT_STAKE}; use nearcore::NEAR_BASE; use rand::prelude::StdRng; @@ -89,16 +95,19 @@ pub fn set_block_protocol_version( block.mut_header().resign(&validator_signer); } -pub fn create_nightshade_runtimes(genesis: &Genesis, n: usize) -> Vec> { - (0..n) - .map(|_| { - Arc::new(nearcore::NightshadeRuntime::test( - Path::new("../../../.."), - create_test_store(), - genesis, - )) as Arc - }) - .collect() +pub fn create_nightshade_runtimes( + genesis: &Genesis, + n: usize, +) -> Vec> { + (0..n).map(|_| create_nightshade_runtime_with_store(genesis, &create_test_store())).collect() +} + +pub fn create_nightshade_runtime_with_store( + genesis: &Genesis, + store: &Store, +) -> Arc { + Arc::new(nearcore::NightshadeRuntime::test(Path::new("../../../.."), store.clone(), genesis)) + as Arc } /// Produce `blocks_number` block in the given environment, starting from the given height. @@ -1487,6 +1496,12 @@ fn test_gc_with_epoch_length() { } } +/// When an epoch is very long there should not be anything garbage collected unexpectedly +#[test] +fn test_gc_long_epoch() { + test_gc_with_epoch_length_common(200); +} + /// Test that producing blocks works in archival mode with save_trie_changes enabled. /// In that case garbage collection should not happen but trie changes should be saved to the store. #[test] @@ -1502,6 +1517,9 @@ fn test_archival_save_trie_changes() { .archive(true) .save_trie_changes(true) .build(); + + env.clients[0].chain.store().store().set_db_kind(DbKind::Archive).unwrap(); + let mut blocks = vec![]; let genesis_block = env.clients[0].chain.get_block_by_height(0).unwrap(); blocks.push(genesis_block); @@ -1549,10 +1567,119 @@ fn test_archival_save_trie_changes() { } } -/// When an epoch is very long there should not be anything garbage collected unexpectedly +fn test_archival_gc_common( + storage: NodeStorage, + epoch_length: u64, + max_height: BlockHeight, + max_cold_head_height: BlockHeight, + legacy: bool, +) { + let mut genesis = Genesis::test(vec!["test0".parse().unwrap(), "test1".parse().unwrap()], 1); + genesis.config.epoch_length = epoch_length; + let mut chain_genesis = ChainGenesis::test(); + chain_genesis.epoch_length = epoch_length; + + let hot_store = &storage.get_store(Temperature::Hot); + + let runtime_adapter = create_nightshade_runtime_with_store(&genesis, &hot_store); + let mut env = TestEnv::builder(chain_genesis) + .runtime_adapters(vec![runtime_adapter]) + .archive(true) + .save_trie_changes(true) + .build(); + + let mut blocks = vec![]; + let genesis_block = env.clients[0].chain.get_block_by_height(0).unwrap(); + blocks.push(genesis_block); + + for i in 1..=max_height { + let block = env.clients[0].produce_block(i).unwrap().unwrap(); + env.process_block(0, block.clone(), Provenance::PRODUCED); + + let header = block.header(); + let epoch_id = header.epoch_id(); + let runtime_adapter = &env.clients[0].runtime_adapter; + let shard_layout = runtime_adapter.get_shard_layout(epoch_id).unwrap(); + + blocks.push(block); + + if i <= max_cold_head_height { + update_cold_db(storage.cold_db().unwrap(), hot_store, &shard_layout, &i).unwrap(); + update_cold_head(storage.cold_db().unwrap(), &hot_store, &i).unwrap(); + } + } + + // All blocks up until max_gc_height, exclusively, should be garbage collected. + // In the '_current' test this will be max_height - 5 epochs + // In the '_behind' test this will be the cold head height. + // In the '_migration' test this will be 0. + let mut max_gc_height = 0; + if !legacy { + max_gc_height = std::cmp::min( + max_height - epoch_length * DEFAULT_GC_NUM_EPOCHS_TO_KEEP, + max_cold_head_height, + ); + }; + + for i in 0..=max_height { + let client = &env.clients[0]; + let chain = &client.chain; + let block = &blocks[i as usize]; + + if i < max_gc_height { + assert!(chain.get_block(block.hash()).is_err()); + assert!(chain.get_block_by_height(i).is_err()); + } else { + assert!(chain.get_block(block.hash()).is_ok()); + assert!(chain.get_block_by_height(i).is_ok()); + assert!(chain.store().get_all_block_hashes_by_height(i as BlockHeight).is_ok()); + } + } +} + +/// This test verifies that archival node in split storage mode that is up to +/// date on the hot -> cold block copying is correctly garbage collecting +/// blocks older than 5 epochs. #[test] -fn test_gc_long_epoch() { - test_gc_with_epoch_length_common(200); +fn test_archival_gc_migration() { + // Split storage in the middle of migration has hot store kind set to archive. + let storage = create_test_node_storage_with_cold(DB_VERSION, DbKind::Archive); + + let epoch_length = 10; + let max_height = epoch_length * (DEFAULT_GC_NUM_EPOCHS_TO_KEEP + 2); + let max_cold_head_height = 5; + + test_archival_gc_common(storage, epoch_length, max_height, max_cold_head_height, true); +} + +/// This test verifies that archival node in split storage mode that is up to +/// date on the hot -> cold block copying is correctly garbage collecting +/// blocks older than 5 epochs. +#[test] +fn test_archival_gc_split_storage_current() { + // Fully migrated split storage has each store configured with kind = temperature. + let storage = create_test_node_storage_with_cold(DB_VERSION, DbKind::Hot); + + let epoch_length = 10; + let max_height = epoch_length * (DEFAULT_GC_NUM_EPOCHS_TO_KEEP + 2); + let max_cold_head_height = max_height - 2 * epoch_length; + + test_archival_gc_common(storage, epoch_length, max_height, max_cold_head_height, false); +} + +/// This test verifies that archival node in split storage mode that is behind +/// on the hot -> cold block copying is correctly garbage collecting blocks +/// older than the cold head. +#[test] +fn test_archival_gc_split_storage_behind() { + // Fully migrated split storage has each store configured with kind = temperature. + let storage = create_test_node_storage_with_cold(DB_VERSION, DbKind::Hot); + + let epoch_length = 10; + let max_height = epoch_length * (DEFAULT_GC_NUM_EPOCHS_TO_KEEP + 2); + let max_cold_head_height = 5; + + test_archival_gc_common(storage, epoch_length, max_height, max_cold_head_height, false); } #[test] @@ -2005,11 +2132,9 @@ fn test_invalid_block_root() { fn test_incorrect_validator_key_produce_block() { let genesis = Genesis::test(vec!["test0".parse().unwrap(), "test1".parse().unwrap()], 2); let chain_genesis = ChainGenesis::new(&genesis); - let runtime_adapter: Arc = Arc::new(nearcore::NightshadeRuntime::test( - Path::new("../../../.."), - create_test_store(), - &genesis, - )); + let runtime_adapter: Arc = Arc::new( + nearcore::NightshadeRuntime::test(Path::new("../../../.."), create_test_store(), &genesis), + ); let signer = Arc::new(InMemoryValidatorSigner::from_seed( "test0".parse().unwrap(), KeyType::ED25519, @@ -2901,7 +3026,7 @@ fn test_query_final_state() { } let query_final_state = |chain: &mut near_chain::Chain, - runtime_adapter: Arc, + runtime_adapter: Arc, account_id: AccountId| { let final_head = chain.store().final_head().unwrap(); let last_final_block = chain.get_block(&final_head.last_block_hash).unwrap(); @@ -3349,6 +3474,7 @@ fn test_catchup_no_sharding_change() { } } +/// These tests fail on aarch because the WasmtimeVM::precompile method doesn't populate the cache. mod contract_precompilation_tests { use super::*; use near_primitives::contract::ContractCode; @@ -3387,6 +3513,7 @@ mod contract_precompilation_tests { } #[test] + #[cfg_attr(all(target_arch = "aarch64", target_vendor = "apple"), ignore)] fn test_sync_and_call_cached_contract() { let num_clients = 2; let stores: Vec = (0..num_clients).map(|_| create_test_store()).collect(); @@ -3400,7 +3527,7 @@ mod contract_precompilation_tests { Path::new("../../../.."), store.clone(), &genesis, - )) as Arc + )) as Arc }) .collect(); @@ -3489,6 +3616,7 @@ mod contract_precompilation_tests { } #[test] + #[cfg_attr(all(target_arch = "aarch64", target_vendor = "apple"), ignore)] fn test_two_deployments() { let num_clients = 2; let stores: Vec = (0..num_clients).map(|_| create_test_store()).collect(); @@ -3502,7 +3630,7 @@ mod contract_precompilation_tests { Path::new("../../../.."), store.clone(), &genesis, - )) as Arc + )) as Arc }) .collect(); @@ -3570,6 +3698,7 @@ mod contract_precompilation_tests { } #[test] + #[cfg_attr(all(target_arch = "aarch64", target_vendor = "apple"), ignore)] fn test_sync_after_delete_account() { let num_clients = 3; let stores: Vec = (0..num_clients).map(|_| create_test_store()).collect(); @@ -3585,7 +3714,7 @@ mod contract_precompilation_tests { Path::new("../../../.."), store.clone(), &genesis, - )) as Arc + )) as Arc }) .collect(); diff --git a/integration-tests/src/tests/client/runtimes.rs b/integration-tests/src/tests/client/runtimes.rs index 2e485d9bbef..366c5ce6d29 100644 --- a/integration-tests/src/tests/client/runtimes.rs +++ b/integration-tests/src/tests/client/runtimes.rs @@ -2,7 +2,7 @@ //! This client works completely synchronously and must be operated by some async actor outside. use assert_matches::assert_matches; -use near_chain::{ChainGenesis, RuntimeAdapter}; +use near_chain::{ChainGenesis, RuntimeWithEpochManagerAdapter}; use near_chain_configs::Genesis; use near_chunks::test_utils::ChunkTestFixture; use near_chunks::ProcessPartialEncodedChunkResult; @@ -25,19 +25,22 @@ use std::collections::HashMap; use std::path::Path; use std::sync::Arc; -pub fn create_nightshade_runtimes(genesis: &Genesis, n: usize) -> Vec> { +pub fn create_nightshade_runtimes( + genesis: &Genesis, + n: usize, +) -> Vec> { (0..n) .map(|_| { Arc::new(nearcore::NightshadeRuntime::test( Path::new("../../../.."), create_test_store(), genesis, - )) as Arc + )) as Arc }) .collect() } -fn create_runtimes(n: usize) -> Vec> { +fn create_runtimes(n: usize) -> Vec> { let genesis = Genesis::test(vec!["test0".parse().unwrap(), "test1".parse().unwrap()], 1); create_nightshade_runtimes(&genesis, n) } diff --git a/integration-tests/src/tests/client/sandbox.rs b/integration-tests/src/tests/client/sandbox.rs index efcbe8a1b25..7f800c50282 100644 --- a/integration-tests/src/tests/client/sandbox.rs +++ b/integration-tests/src/tests/client/sandbox.rs @@ -1,7 +1,7 @@ use std::path::Path; use std::sync::Arc; -use near_chain::{ChainGenesis, Provenance, RuntimeAdapter}; +use near_chain::{ChainGenesis, Provenance, RuntimeWithEpochManagerAdapter}; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; use near_crypto::{InMemorySigner, KeyType}; @@ -24,7 +24,7 @@ fn test_setup() -> (TestEnv, InMemorySigner) { Path::new("../../../.."), create_test_store(), &genesis, - )) as Arc]) + )) as Arc]) .build(); let signer = InMemorySigner::from_seed("test0".parse().unwrap(), KeyType::ED25519, "test0"); send_tx( diff --git a/integration-tests/src/tests/network/runner.rs b/integration-tests/src/tests/network/runner.rs index 622948edcff..d9c78cbce5b 100644 --- a/integration-tests/src/tests/network/runner.rs +++ b/integration-tests/src/tests/network/runner.rs @@ -42,7 +42,7 @@ fn setup_network_node( chain_genesis: ChainGenesis, config: config::NetworkConfig, ) -> Addr { - let store = near_store::test_utils::create_test_node_storage(); + let store = near_store::test_utils::create_test_node_storage_default(); let num_validators = validators.len() as ValidatorId; @@ -76,6 +76,7 @@ fn setup_network_node( telemetry_actor, None, adv.clone(), + None, ) .0; let view_client_actor = start_view_client( diff --git a/integration-tests/src/tests/runtime/deployment.rs b/integration-tests/src/tests/runtime/deployment.rs index f8f9ef87a31..ccc0b6d5283 100644 --- a/integration-tests/src/tests/runtime/deployment.rs +++ b/integration-tests/src/tests/runtime/deployment.rs @@ -51,7 +51,7 @@ fn test_deploy_max_size_contract() { ) .unwrap(); assert_eq!(transaction_result.status, FinalExecutionStatus::SuccessValue(Vec::new())); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); // Deploy contract let wasm_binary = near_test_contracts::sized_contract(contract_size as usize); diff --git a/integration-tests/src/tests/runtime/sanity_checks.rs b/integration-tests/src/tests/runtime/sanity_checks.rs index 3e2956a248f..2eaa7a30933 100644 --- a/integration-tests/src/tests/runtime/sanity_checks.rs +++ b/integration-tests/src/tests/runtime/sanity_checks.rs @@ -63,7 +63,7 @@ fn setup_runtime_node_with_contract(wasm_binary: &[u8]) -> RuntimeNode { ) .unwrap(); assert_eq!(tx_result.status, FinalExecutionStatus::SuccessValue(Vec::new())); - assert_eq!(tx_result.receipts_outcome.len(), 1); + assert_eq!(tx_result.receipts_outcome.len(), 2); let tx_result = node_user.deploy_contract(test_contract_account(), wasm_binary.to_vec()).unwrap(); diff --git a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile.snap b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile.snap index 23dc32f5334..0dcf589ac3f 100644 --- a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile.snap +++ b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile.snap @@ -327,6 +327,7 @@ expression: receipts_gas_profile gas_used: 0, }, ], + [], [ CostGasUsed { cost_category: "WASM_HOST_COST", @@ -369,6 +370,7 @@ expression: receipts_gas_profile gas_used: 0, }, ], + [], [ CostGasUsed { cost_category: "WASM_HOST_COST", @@ -381,6 +383,7 @@ expression: receipts_gas_profile gas_used: 0, }, ], + [], [ CostGasUsed { cost_category: "WASM_HOST_COST", @@ -393,6 +396,7 @@ expression: receipts_gas_profile gas_used: 0, }, ], + [], [ CostGasUsed { cost_category: "WASM_HOST_COST", @@ -407,6 +411,9 @@ expression: receipts_gas_profile ], [], [], + [], + [], + [], [ CostGasUsed { cost_category: "WASM_HOST_COST", @@ -420,6 +427,9 @@ expression: receipts_gas_profile }, ], [], + [], + [], + [], [ CostGasUsed { cost_category: "WASM_HOST_COST", @@ -432,6 +442,7 @@ expression: receipts_gas_profile gas_used: 0, }, ], + [], [ CostGasUsed { cost_category: "WASM_HOST_COST", @@ -459,4 +470,6 @@ expression: receipts_gas_profile gas_used: 2865522486, }, ], + [], + [], ] diff --git a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nightly.snap b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nightly.snap index 23dc32f5334..0dcf589ac3f 100644 --- a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nightly.snap +++ b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nightly.snap @@ -327,6 +327,7 @@ expression: receipts_gas_profile gas_used: 0, }, ], + [], [ CostGasUsed { cost_category: "WASM_HOST_COST", @@ -369,6 +370,7 @@ expression: receipts_gas_profile gas_used: 0, }, ], + [], [ CostGasUsed { cost_category: "WASM_HOST_COST", @@ -381,6 +383,7 @@ expression: receipts_gas_profile gas_used: 0, }, ], + [], [ CostGasUsed { cost_category: "WASM_HOST_COST", @@ -393,6 +396,7 @@ expression: receipts_gas_profile gas_used: 0, }, ], + [], [ CostGasUsed { cost_category: "WASM_HOST_COST", @@ -407,6 +411,9 @@ expression: receipts_gas_profile ], [], [], + [], + [], + [], [ CostGasUsed { cost_category: "WASM_HOST_COST", @@ -420,6 +427,9 @@ expression: receipts_gas_profile }, ], [], + [], + [], + [], [ CostGasUsed { cost_category: "WASM_HOST_COST", @@ -432,6 +442,7 @@ expression: receipts_gas_profile gas_used: 0, }, ], + [], [ CostGasUsed { cost_category: "WASM_HOST_COST", @@ -459,4 +470,6 @@ expression: receipts_gas_profile gas_used: 2865522486, }, ], + [], + [], ] diff --git a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nondeterministic.snap b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nondeterministic.snap index 958a4ca8bef..b9616f30a16 100644 --- a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nondeterministic.snap +++ b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nondeterministic.snap @@ -20,4 +20,5 @@ expression: receipts_gas_profile gas_used: 1645512, }, ], + [], ] diff --git a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_status.snap b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_status.snap index 0b96ef9da43..906eafd23d7 100644 --- a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_status.snap +++ b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_status.snap @@ -1,6 +1,7 @@ --- source: integration-tests/src/tests/runtime/sanity_checks.rs expression: receipts_status + --- - SuccessReceiptId: "11111111111111111111111111111111" - Failure: @@ -9,6 +10,7 @@ expression: receipts_status kind: FunctionCallError: ExecutionError: "Smart contract panicked: explicit guest panic" +- SuccessValue: "" - Failure: ActionError: index: 0 @@ -24,4 +26,15 @@ expression: receipts_status - SuccessValue: "" - SuccessValue: "" - SuccessValue: "" - +- SuccessValue: "" +- SuccessValue: "" +- SuccessValue: "" +- SuccessValue: "" +- SuccessValue: "" +- SuccessValue: "" +- SuccessValue: "" +- SuccessValue: "" +- SuccessValue: "" +- SuccessValue: "" +- SuccessValue: "" +- SuccessValue: "" diff --git a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_status_nondeterministic.snap b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_status_nondeterministic.snap index 349ba5b68f8..b0d1e43f5f5 100644 --- a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_status_nondeterministic.snap +++ b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_status_nondeterministic.snap @@ -1,6 +1,8 @@ --- source: integration-tests/src/tests/runtime/sanity_checks.rs expression: receipts_status + --- - SuccessValue: "" +- SuccessValue: "" diff --git a/integration-tests/src/tests/runtime/test_evil_contracts.rs b/integration-tests/src/tests/runtime/test_evil_contracts.rs index f8e77e8878f..2c9862a5153 100644 --- a/integration-tests/src/tests/runtime/test_evil_contracts.rs +++ b/integration-tests/src/tests/runtime/test_evil_contracts.rs @@ -28,7 +28,7 @@ fn setup_test_contract(wasm_binary: &[u8]) -> RuntimeNode { ) .unwrap(); assert_eq!(transaction_result.status, FinalExecutionStatus::SuccessValue(Vec::new())); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); let transaction_result = node_user.deploy_contract("test_contract".parse().unwrap(), wasm_binary.to_vec()).unwrap(); diff --git a/integration-tests/src/tests/standard_cases/mod.rs b/integration-tests/src/tests/standard_cases/mod.rs index c38b25e1fe0..3d409b2ae6b 100644 --- a/integration-tests/src/tests/standard_cases/mod.rs +++ b/integration-tests/src/tests/standard_cases/mod.rs @@ -67,7 +67,7 @@ pub fn test_smart_contract_simple(node: impl Node) { transaction_result.status, FinalExecutionStatus::SuccessValue(10i32.to_le_bytes().to_vec()) ); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); } @@ -96,7 +96,7 @@ pub fn test_smart_contract_panic(node: impl Node) { .into() ) ); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); } pub fn test_smart_contract_self_call(node: impl Node) { @@ -110,7 +110,7 @@ pub fn test_smart_contract_self_call(node: impl Node) { transaction_result.status, FinalExecutionStatus::SuccessValue(10i32.to_le_bytes().to_vec()) ); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); } @@ -134,7 +134,7 @@ pub fn test_smart_contract_bad_method_name(node: impl Node) { .into() ) ); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); } @@ -158,7 +158,7 @@ pub fn test_smart_contract_empty_method_name_with_no_tokens(node: impl Node) { .into() ) ); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); } @@ -182,7 +182,7 @@ pub fn test_smart_contract_empty_method_name_with_tokens(node: impl Node) { .into() ) ); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 3); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); } @@ -205,7 +205,7 @@ pub fn test_smart_contract_with_args(node: impl Node) { transaction_result.status, FinalExecutionStatus::SuccessValue(5u64.to_le_bytes().to_vec()) ); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); } @@ -218,7 +218,7 @@ pub fn test_async_call_with_logs(node: impl Node) { .function_call(account_id.clone(), bob_account(), "log_something", vec![], 10u64.pow(14), 0) .unwrap(); assert_eq!(transaction_result.status, FinalExecutionStatus::SuccessValue(Vec::new())); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); assert_eq!(transaction_result.receipts_outcome[0].outcome.logs[0], "hello".to_string()); @@ -261,7 +261,7 @@ pub fn test_upload_contract(node: impl Node) { ) .unwrap(); assert_eq!(transaction_result.status, FinalExecutionStatus::SuccessValue(Vec::new())); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); node_user.view_contract_code(&eve_dot_alice_account()).expect_err( "RpcError { code: -32000, message: \"Server error\", data: Some(String(\"contract code of account eve.alice.near does not exist while viewing\")) }"); @@ -307,7 +307,7 @@ pub fn test_send_money(node: impl Node) { let transaction_result = node_user.send_money(account_id.clone(), bob_account(), money_used).unwrap(); assert_eq!(transaction_result.status, FinalExecutionStatus::SuccessValue(Vec::new())); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); assert_eq!(node_user.get_access_key_nonce_for_signer(account_id).unwrap(), 1); @@ -340,7 +340,7 @@ pub fn transfer_tokens_implicit_account(node: impl Node) { let transaction_result = node_user.send_money(account_id.clone(), receiver_id.clone(), tokens_used).unwrap(); assert_eq!(transaction_result.status, FinalExecutionStatus::SuccessValue(Vec::new())); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); assert_eq!(node_user.get_access_key_nonce_for_signer(account_id).unwrap(), 1); @@ -364,7 +364,7 @@ pub fn transfer_tokens_implicit_account(node: impl Node) { node_user.send_money(account_id.clone(), receiver_id.clone(), tokens_used).unwrap(); assert_eq!(transaction_result.status, FinalExecutionStatus::SuccessValue(Vec::new())); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); assert_eq!(node_user.get_access_key_nonce_for_signer(account_id).unwrap(), 2); @@ -423,7 +423,7 @@ pub fn trying_to_create_implicit_account(node: impl Node) { .into() ) ); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 3); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); assert_eq!(node_user.get_access_key_nonce_for_signer(account_id).unwrap(), 1); @@ -450,7 +450,7 @@ pub fn test_smart_contract_reward(node: impl Node) { transaction_result.status, FinalExecutionStatus::SuccessValue(10i32.to_le_bytes().to_vec()) ); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); @@ -504,7 +504,7 @@ pub fn test_refund_on_send_money_to_non_existent_account(node: impl Node) { .into() ) ); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 3); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); let result1 = node_user.view_account(account_id).unwrap(); @@ -535,7 +535,7 @@ pub fn test_create_account(node: impl Node) { let create_account_cost = fee_helper.create_account_transfer_full_key_cost(); assert_eq!(transaction_result.status, FinalExecutionStatus::SuccessValue(Vec::new())); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); assert_eq!(node_user.get_access_key_nonce_for_signer(account_id).unwrap(), 1); @@ -568,7 +568,7 @@ pub fn test_create_account_again(node: impl Node) { .unwrap(); assert_eq!(transaction_result.status, FinalExecutionStatus::SuccessValue(Vec::new())); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); let fee_helper = fee_helper(&node); let create_account_cost = fee_helper.create_account_transfer_full_key_cost(); @@ -600,7 +600,7 @@ pub fn test_create_account_again(node: impl Node) { .into() ) ); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 3); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); assert_eq!(node_user.get_access_key_nonce_for_signer(account_id).unwrap(), 2); @@ -658,7 +658,7 @@ pub fn test_create_account_failure_already_exists(node: impl Node) { .into() ) ); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 3); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); assert_eq!(node_user.get_access_key_nonce_for_signer(account_id).unwrap(), 1); @@ -1001,7 +1001,7 @@ pub fn test_access_key_smart_contract(node: impl Node) { prepaid_gas + exec_gas - transaction_result.receipts_outcome[0].outcome.gas_burnt, ); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); let new_root = node_user.get_state_root(); assert_ne!(root, new_root); @@ -1161,7 +1161,7 @@ pub fn test_unstake_while_not_staked(node: impl Node) { ) .unwrap(); assert_eq!(transaction_result.status, FinalExecutionStatus::SuccessValue(Vec::new())); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); let transaction_result = node_user.stake(eve_dot_alice_account(), node.block_signer().public_key(), 0).unwrap(); assert_eq!( @@ -1259,7 +1259,7 @@ pub fn test_delete_account_fail(node: impl Node) { .into() ) ); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); assert!(node.user().view_account(&bob_account()).is_ok()); assert_eq!( node.user().view_account(&node.account_id().unwrap()).unwrap().amount, @@ -1281,7 +1281,7 @@ pub fn test_delete_account_no_account(node: impl Node) { .into() ) ); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); } pub fn test_delete_account_while_staking(node: impl Node) { @@ -1384,7 +1384,7 @@ pub fn test_contract_write_key_value_cost(node: impl Node) { ) .unwrap(); assert_matches!(transaction_result.status, FinalExecutionStatus::SuccessValue(_)); - assert_eq!(transaction_result.receipts_outcome.len(), 1); + assert_eq!(transaction_result.receipts_outcome.len(), 2); let trie_nodes_count = get_trie_nodes_count( &transaction_result.receipts_outcome[0].outcome.metadata, diff --git a/integration-tests/src/user/runtime_user.rs b/integration-tests/src/user/runtime_user.rs index 26019272429..9d95be1a3ca 100644 --- a/integration-tests/src/user/runtime_user.rs +++ b/integration-tests/src/user/runtime_user.rs @@ -170,12 +170,6 @@ impl RuntimeUser { block_hash: Default::default(), }]; for hash in &receipt_ids { - if let Some(receipt) = self.receipts.borrow().get(hash) { - let is_refund = receipt.predecessor_id.is_system(); - if is_refund { - continue; - } - } transactions.extend(self.get_recursive_transaction_results(hash).into_iter()); } transactions diff --git a/nearcore/Cargo.toml b/nearcore/Cargo.toml index 88855e32052..47f3f62eb6d 100644 --- a/nearcore/Cargo.toml +++ b/nearcore/Cargo.toml @@ -105,7 +105,12 @@ protocol_feature_fix_staking_threshold = [ protocol_feature_fix_contract_loading_cost = [ "near-vm-runner/protocol_feature_fix_contract_loading_cost", ] -protocol_feature_flat_state = ["near-client/protocol_feature_flat_state", "near-store/protocol_feature_flat_state", "near-chain/protocol_feature_flat_state", "node-runtime/protocol_feature_flat_state"] +protocol_feature_flat_state = ["near-store/protocol_feature_flat_state", "near-chain/protocol_feature_flat_state", "node-runtime/protocol_feature_flat_state"] +protocol_feature_nep366_delegate_action = [ + "node-runtime/protocol_feature_nep366_delegate_action", + "near-primitives/protocol_feature_nep366_delegate_action", + "near-rosetta-rpc/protocol_feature_nep366_delegate_action", +] protocol_feature_zero_balance_account = ["node-runtime/protocol_feature_zero_balance_account"] nightly = [ @@ -114,9 +119,11 @@ nightly = [ "near-client/nightly", "near-epoch-manager/nightly", "near-store/nightly", + "near-rosetta-rpc/nightly", "protocol_feature_fix_staking_threshold", "protocol_feature_fix_contract_loading_cost", "protocol_feature_flat_state", + "protocol_feature_nep366_delegate_action", ] nightly_protocol = [ "near-primitives/nightly_protocol", @@ -135,5 +142,3 @@ sandbox = [ "near-jsonrpc/sandbox", ] io_trace = ["near-vm-runner/io_trace"] - -cold_store = ["near-store/cold_store"] diff --git a/nearcore/benches/store.rs b/nearcore/benches/store.rs index 81e4776f60f..bf7de434d47 100644 --- a/nearcore/benches/store.rs +++ b/nearcore/benches/store.rs @@ -2,7 +2,7 @@ extern crate bencher; use bencher::Bencher; -use near_chain::{ChainStore, ChainStoreAccess, RuntimeAdapter}; +use near_chain::{types::RuntimeAdapter, ChainStore, ChainStoreAccess}; use near_chain_configs::GenesisValidationMode; use near_o11y::testonly::init_integration_logger; use near_primitives::types::StateRoot; diff --git a/nearcore/src/config.rs b/nearcore/src/config.rs index f1a6db1f4f5..0cba387a96c 100644 --- a/nearcore/src/config.rs +++ b/nearcore/src/config.rs @@ -17,7 +17,7 @@ use tracing::{info, warn}; use near_chain_configs::{ get_initial_supply, ClientConfig, GCConfig, Genesis, GenesisConfig, GenesisValidationMode, - LogSummaryStyle, + LogSummaryStyle, MutableConfigValue, }; use near_crypto::{InMemorySigner, KeyFile, KeyType, PublicKey, Signer}; #[cfg(feature = "json_rpc")] @@ -31,8 +31,8 @@ use near_primitives::shard_layout::account_id_to_shard_id; use near_primitives::shard_layout::ShardLayout; use near_primitives::state_record::StateRecord; use near_primitives::types::{ - AccountId, AccountInfo, Balance, BlockHeightDelta, EpochHeight, Gas, NumBlocks, NumSeats, - NumShards, ShardId, + AccountId, AccountInfo, Balance, BlockHeight, BlockHeightDelta, EpochHeight, Gas, NumBlocks, + NumSeats, NumShards, ShardId, }; use near_primitives::utils::{generate_random_string, get_num_seats_per_shard}; use near_primitives::validator_signer::{InMemoryValidatorSigner, ValidatorSigner}; @@ -201,6 +201,12 @@ fn default_trie_viewer_state_size_limit() -> Option { Some(50_000) } +#[derive(thiserror::Error, Debug)] +pub enum ConfigValidationError { + #[error("Configuration with archive = false and save_trie_changes = false is not supported because non-archival nodes must save trie changes in order to do do garbage collection.")] + TrieChanges, +} + #[derive(Serialize, Deserialize, Clone, Debug)] pub struct Consensus { /// Minimum number of peers to start syncing. @@ -323,7 +329,7 @@ pub struct Config { /// Different parameters to configure underlying storage. pub store: near_store::StoreConfig, /// Different parameters to configure underlying cold storage. - #[cfg(feature = "cold_store")] + /// This feature is under development, do not use in production. #[serde(default, skip_serializing_if = "Option::is_none")] pub cold_store: Option, @@ -338,6 +344,8 @@ pub struct Config { /// Deprecated; use `store.migration_snapshot` instead. #[serde(skip_serializing_if = "Option::is_none")] pub db_migration_snapshot_path: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub expected_shutdown: Option, } fn is_false(value: &bool) -> bool { @@ -372,8 +380,8 @@ impl Default for Config { db_migration_snapshot_path: None, use_db_migration_snapshot: None, store: near_store::StoreConfig::default(), - #[cfg(feature = "cold_store")] cold_store: None, + expected_shutdown: None, } } } @@ -411,15 +419,23 @@ impl Config { ); } - assert!( - config.archive || config.save_trie_changes, - "Configuration with archive = false and save_trie_changes = false is not supported \ - because non-archival nodes must save trie changes in order to do do garbage collection." - ); - + config.validate()?; Ok(config) } + /// Does semantic config validation. + /// This is the place to check that all config values make sense and fit well together. + /// `validate()` is called every time `config.json` is read. + fn validate(&self) -> Result<(), ConfigValidationError> { + if !self.archive && !self.save_trie_changes { + Err(ConfigValidationError::TrieChanges) + } else { + Ok(()) + } + // TODO: Add more config validation. + // TODO: Validate `ClientConfig` instead. + } + pub fn write_to_file(&self, path: &Path) -> std::io::Result<()> { let mut file = File::create(path)?; let str = serde_json::to_string_pretty(self)?; @@ -571,6 +587,10 @@ impl NearConfig { version: Default::default(), chain_id: genesis.config.chain_id.clone(), rpc_addr: config.rpc_addr().map(|addr| addr.to_owned()), + expected_shutdown: MutableConfigValue::new( + config.expected_shutdown, + "expected_shutdown", + ), block_production_tracking_delay: config.consensus.block_production_tracking_delay, min_block_production_delay: config.consensus.min_block_production_delay, max_block_production_delay: config.consensus.max_block_production_delay, diff --git a/nearcore/src/dyn_config.rs b/nearcore/src/dyn_config.rs new file mode 100644 index 00000000000..e68fd67b3a6 --- /dev/null +++ b/nearcore/src/dyn_config.rs @@ -0,0 +1,77 @@ +use crate::config::Config; +use near_chain_configs::UpdateableClientConfig; +use near_dyn_configs::{UpdateableConfigLoaderError, UpdateableConfigs}; +use near_o11y::log_config::LogConfig; +use serde::Deserialize; +use std::path::{Path, PathBuf}; + +const LOG_CONFIG_FILENAME: &str = "log_config.json"; + +/// This function gets called at the startup and each time a config needs to be reloaded. +pub fn read_updateable_configs( + home_dir: &Path, +) -> Result { + let mut errs = vec![]; + let log_config = match read_log_config(home_dir) { + Ok(config) => config, + Err(err) => { + errs.push(err); + None + } + }; + let updateable_client_config = + match Config::from_file(&home_dir.join(crate::config::CONFIG_FILENAME)) + .map(get_updateable_client_config) + { + Ok(config) => Some(config), + Err(err) => { + errs.push(UpdateableConfigLoaderError::ConfigFileError { + file: PathBuf::from(crate::config::CONFIG_FILENAME), + err, + }); + None + } + }; + if errs.is_empty() { + crate::metrics::CONFIG_CORRECT.set(1); + Ok(UpdateableConfigs { log_config, client_config: updateable_client_config }) + } else { + tracing::warn!(target: "neard", "Dynamically updateable configs are not valid. Please fix this ASAP otherwise the node will be unable to restart: {:?}", &errs); + crate::metrics::CONFIG_CORRECT.set(0); + Err(UpdateableConfigLoaderError::Errors(errs)) + } +} + +pub fn get_updateable_client_config(config: Config) -> UpdateableClientConfig { + // All fields that can be updated while the node is running should be explicitly set here. + // Keep this list in-sync with `core/dyn-configs/README.md`. + UpdateableClientConfig { expected_shutdown: config.expected_shutdown } +} + +fn read_log_config(home_dir: &Path) -> Result, UpdateableConfigLoaderError> { + read_json_config::(&home_dir.join(LOG_CONFIG_FILENAME)) +} + +fn read_json_config( + path: &Path, +) -> Result, UpdateableConfigLoaderError> +where + for<'a> T: Deserialize<'a>, +{ + match std::fs::read_to_string(path) { + Ok(config_str) => match serde_json::from_str::(&config_str) { + Ok(config) => { + tracing::info!(target: "neard", config=?config, "Changing the config {path:?}."); + return Ok(Some(config)); + } + Err(err) => Err(UpdateableConfigLoaderError::Parse { file: path.to_path_buf(), err }), + }, + Err(err) => match err.kind() { + std::io::ErrorKind::NotFound => { + tracing::info!(target: "neard", ?err, "Reset the config {path:?} because the config file doesn't exist."); + return Ok(None); + } + _ => Err(UpdateableConfigLoaderError::OpenAndRead { file: path.to_path_buf(), err }), + }, + } +} diff --git a/nearcore/src/lib.rs b/nearcore/src/lib.rs index 3ce9d2f0da3..c452623de74 100644 --- a/nearcore/src/lib.rs +++ b/nearcore/src/lib.rs @@ -6,7 +6,7 @@ use actix_rt::ArbiterHandle; use actix_web; use anyhow::Context; use near_chain::{Chain, ChainGenesis}; -use near_client::{start_client, start_view_client, ClientActor, ViewClientActor}; +use near_client::{start_client, start_view_client, ClientActor, ConfigUpdater, ViewClientActor}; use near_network::time; use near_network::types::NetworkRecipient; use near_network::PeerManagerActor; @@ -15,12 +15,13 @@ use near_store::{DBCol, Mode, NodeStorage, StoreOpenerError, Temperature}; use near_telemetry::TelemetryActor; use std::path::{Path, PathBuf}; use std::sync::Arc; -use tokio::sync::oneshot; +use tokio::sync::broadcast; use tracing::{info, trace}; pub mod append_only_map; pub mod config; mod download_file; +pub mod dyn_config; mod metrics; pub mod migrations; mod runtime; @@ -55,10 +56,7 @@ fn open_storage(home_dir: &Path, near_config: &mut NearConfig) -> anyhow::Result let opener = NodeStorage::opener( home_dir, &near_config.config.store, - #[cfg(feature = "cold_store")] near_config.config.cold_store.as_ref(), - #[cfg(not(feature = "cold_store"))] - None, ) .with_migrator(&migrator) .expect_archive(near_config.client_config.archive); @@ -153,7 +151,7 @@ pub struct NearNode { } pub fn start_with_config(home_dir: &Path, config: NearConfig) -> anyhow::Result { - start_with_config_and_synchronization(home_dir, config, None) + start_with_config_and_synchronization(home_dir, config, None, None) } pub fn start_with_config_and_synchronization( @@ -161,7 +159,8 @@ pub fn start_with_config_and_synchronization( mut config: NearConfig, // 'shutdown_signal' will notify the corresponding `oneshot::Receiver` when an instance of // `ClientActor` gets dropped. - shutdown_signal: Option>, + shutdown_signal: Option>, + config_updater: Option, ) -> anyhow::Result { let store = open_storage(home_dir, &mut config)?; @@ -199,8 +198,9 @@ pub fn start_with_config_and_synchronization( network_adapter.clone(), config.validator_signer, telemetry, - shutdown_signal, + shutdown_signal.clone(), adv, + config_updater, ); #[allow(unused_mut)] diff --git a/nearcore/src/metrics.rs b/nearcore/src/metrics.rs index 4304119b0e4..002b2bdf2b1 100644 --- a/nearcore/src/metrics.rs +++ b/nearcore/src/metrics.rs @@ -1,4 +1,6 @@ -use near_o11y::metrics::{linear_buckets, try_create_histogram_vec, HistogramVec}; +use near_o11y::metrics::{ + linear_buckets, try_create_histogram_vec, try_create_int_gauge, HistogramVec, IntGauge, +}; use once_cell::sync::Lazy; pub static APPLY_CHUNK_DELAY: Lazy = Lazy::new(|| { @@ -8,7 +10,7 @@ pub static APPLY_CHUNK_DELAY: Lazy = Lazy::new(|| { &["tgas_ceiling"], Some(linear_buckets(0.0, 0.05, 50).unwrap()), ) - .unwrap() + .unwrap() }); pub static SECONDS_PER_PETAGAS: Lazy = Lazy::new(|| { @@ -24,3 +26,11 @@ pub static SECONDS_PER_PETAGAS: Lazy = Lazy::new(|| { ) .unwrap() }); + +pub(crate) static CONFIG_CORRECT: Lazy = Lazy::new(|| { + try_create_int_gauge( + "near_config_correct", + "Are the current dynamically loadable configs correct", + ) + .unwrap() +}); diff --git a/nearcore/src/runtime/mod.rs b/nearcore/src/runtime/mod.rs index fa2dbfca884..f47e0a40e2e 100644 --- a/nearcore/src/runtime/mod.rs +++ b/nearcore/src/runtime/mod.rs @@ -5,8 +5,10 @@ use crate::NearConfig; use borsh::ser::BorshSerialize; use borsh::BorshDeserialize; use errors::FromStateViewerErrors; -use near_chain::types::{ApplySplitStateResult, ApplyTransactionResult, BlockHeaderInfo}; -use near_chain::{Error, RuntimeAdapter}; +use near_chain::types::{ + ApplySplitStateResult, ApplyTransactionResult, BlockHeaderInfo, RuntimeAdapter, Tip, +}; +use near_chain::{Error, RuntimeWithEpochManagerAdapter}; use near_chain_configs::{ Genesis, GenesisConfig, ProtocolConfig, DEFAULT_GC_NUM_EPOCHS_TO_KEEP, MIN_GC_NUM_EPOCHS_TO_KEEP, @@ -48,13 +50,14 @@ use near_primitives::views::{ }; use near_store::flat_state::ChainAccessForFlatStorage; use near_store::flat_state::{ - store_helper, FlatStateFactory, FlatStorageState, FlatStorageStateStatus, + store_helper, FlatStateFactory, FlatStorageCreationStatus, FlatStorageState, }; +use near_store::metadata::DbKind; use near_store::split_state::get_delayed_receipts; use near_store::{ get_genesis_hash, get_genesis_state_roots, set_genesis_hash, set_genesis_state_roots, ApplyStatePartResult, DBCol, PartialStorage, ShardTries, Store, StoreCompiledContractCache, - StoreUpdate, Trie, TrieConfig, WrappedTrieChanges, + StoreUpdate, Trie, TrieConfig, WrappedTrieChanges, COLD_HEAD_KEY, }; use near_vm_runner::precompile_contract; use node_runtime::adapter::ViewRuntimeAdapter; @@ -613,6 +616,37 @@ impl NightshadeRuntime { }); Ok(()) } + + fn get_gc_stop_height_impl(&self, block_hash: &CryptoHash) -> Result { + let epoch_manager = self.epoch_manager.read(); + // an epoch must have a first block. + let epoch_first_block = *epoch_manager.get_block_info(block_hash)?.epoch_first_block(); + let epoch_first_block_info = epoch_manager.get_block_info(&epoch_first_block)?; + // maintain pointers to avoid cloning. + let mut last_block_in_prev_epoch = *epoch_first_block_info.prev_hash(); + let mut epoch_start_height = epoch_first_block_info.height(); + for _ in 0..self.gc_num_epochs_to_keep - 1 { + let epoch_first_block = + *epoch_manager.get_block_info(&last_block_in_prev_epoch)?.epoch_first_block(); + let epoch_first_block_info = epoch_manager.get_block_info(&epoch_first_block)?; + epoch_start_height = epoch_first_block_info.height(); + last_block_in_prev_epoch = *epoch_first_block_info.prev_hash(); + } + + // An archival node with split storage should perform garbage collection + // on the hot storage but not beyond the COLD_HEAD. In order to determine + // if split storage is enabled *and* that the migration to split storage + // is finished we can check the store kind. It's only set to hot after the + // migration is finished. + let kind = self.store.get_db_kind()?; + let cold_head = self.store.get_ser::(DBCol::BlockMisc, COLD_HEAD_KEY)?; + + if let (Some(DbKind::Hot), Some(cold_head)) = (kind, cold_head) { + let cold_head_height = cold_head.height; + return Ok(std::cmp::min(epoch_start_height, cold_head_height + 1)); + } + Ok(epoch_start_height) + } } fn format_total_gas_burnt(gas: Gas) -> String { @@ -704,28 +738,20 @@ impl RuntimeAdapter for NightshadeRuntime { self.flat_state_factory.get_flat_storage_state_for_shard(shard_id) } - fn try_create_flat_storage_state_for_shard( + fn get_flat_storage_creation_status(&self, shard_id: ShardId) -> FlatStorageCreationStatus { + store_helper::get_flat_storage_creation_status(&self.store, shard_id) + } + + // TODO (#7327): consider passing flat storage errors here to handle them gracefully + fn create_flat_storage_state_for_shard( &self, shard_id: ShardId, latest_block_height: BlockHeight, chain_access: &dyn ChainAccessForFlatStorage, - ) -> FlatStorageStateStatus { - let status = store_helper::get_flat_storage_state_status(&self.store, shard_id); - match &status { - FlatStorageStateStatus::Ready => { - let flat_storage_state = FlatStorageState::new( - self.store.clone(), - shard_id, - latest_block_height, - chain_access, - ); - self.flat_state_factory - .add_flat_storage_state_for_shard(shard_id, flat_storage_state); - } - _ => {} - } - info!(target: "chain", %shard_id, "Flat storage creation status: {status:?}"); - status + ) { + let flat_storage_state = + FlatStorageState::new(self.store.clone(), shard_id, latest_block_height, chain_access); + self.flat_state_factory.add_flat_storage_state_for_shard(shard_id, flat_storage_state); } fn remove_flat_storage_state_for_shard( @@ -909,24 +935,14 @@ impl RuntimeAdapter for NightshadeRuntime { } fn get_gc_stop_height(&self, block_hash: &CryptoHash) -> BlockHeight { - (|| -> Result { - let epoch_manager = self.epoch_manager.read(); - // an epoch must have a first block. - let epoch_first_block = *epoch_manager.get_block_info(block_hash)?.epoch_first_block(); - let epoch_first_block_info = epoch_manager.get_block_info(&epoch_first_block)?; - // maintain pointers to avoid cloning. - let mut last_block_in_prev_epoch = *epoch_first_block_info.prev_hash(); - let mut epoch_start_height = epoch_first_block_info.height(); - for _ in 0..self.gc_num_epochs_to_keep - 1 { - let epoch_first_block = - *epoch_manager.get_block_info(&last_block_in_prev_epoch)?.epoch_first_block(); - let epoch_first_block_info = epoch_manager.get_block_info(&epoch_first_block)?; - epoch_start_height = epoch_first_block_info.height(); - last_block_in_prev_epoch = *epoch_first_block_info.prev_hash(); + let result = self.get_gc_stop_height_impl(block_hash); + match result { + Ok(gc_stop_height) => gc_stop_height, + Err(error) => { + error!(target: "runtime", "Error when getting the gc stop height. Error: {}", error); + self.genesis_config.genesis_height } - Ok(epoch_start_height) - }()) - .unwrap_or(self.genesis_config.genesis_height) + } } fn add_validator_proposals( @@ -1443,6 +1459,8 @@ impl RuntimeAdapter for NightshadeRuntime { } } +impl RuntimeWithEpochManagerAdapter for NightshadeRuntime {} + impl node_runtime::adapter::ViewRuntimeAdapter for NightshadeRuntime { fn view_account( &self, @@ -1796,15 +1814,12 @@ mod test { store_update.commit().unwrap(); let mock_chain = MockChainForFlatStorage::new(0, genesis_hash); for shard_id in 0..runtime.num_shards(&EpochId::default()).unwrap() { - let status = runtime.try_create_flat_storage_state_for_shard( - shard_id as ShardId, - 0, - &mock_chain, - ); + let status = runtime.get_flat_storage_creation_status(shard_id); if cfg!(feature = "protocol_feature_flat_state") { - assert_eq!(status, FlatStorageStateStatus::Ready); + assert_eq!(status, FlatStorageCreationStatus::Ready); + runtime.create_flat_storage_state_for_shard(shard_id, 0, &mock_chain); } else { - assert_eq!(status, FlatStorageStateStatus::DontCreate); + assert_eq!(status, FlatStorageCreationStatus::DontCreate); } } diff --git a/nearcore/tests/economics.rs b/nearcore/tests/economics.rs index 79d5adad625..8b0427cae52 100644 --- a/nearcore/tests/economics.rs +++ b/nearcore/tests/economics.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use num_rational::Ratio; -use near_chain::{ChainGenesis, RuntimeAdapter}; +use near_chain::{ChainGenesis, RuntimeWithEpochManagerAdapter}; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; use near_crypto::{InMemorySigner, KeyType}; @@ -37,7 +37,7 @@ fn setup_env(genesis: &Genesis) -> TestEnv { Path::new("."), store1, genesis, - )) as Arc]) + )) as Arc]) .build() } diff --git a/neard/Cargo.toml b/neard/Cargo.toml index bc22bf428e7..50d449c3c47 100644 --- a/neard/Cargo.toml +++ b/neard/Cargo.toml @@ -32,6 +32,7 @@ tracing.workspace = true nearcore = { path = "../nearcore" } near-amend-genesis = { path = "../tools/amend-genesis" } near-chain-configs = { path = "../core/chain-configs" } +near-client = { path = "../chain/client" } near-cold-store-tool = { path = "../tools/cold-store", package = "cold-store-tool" } near-dyn-configs = { path = "../core/dyn-configs" } near-jsonrpc-primitives = { path = "../chain/jsonrpc-primitives" } @@ -62,7 +63,7 @@ rosetta_rpc = ["nearcore/rosetta_rpc"] json_rpc = ["nearcore/json_rpc"] protocol_feature_fix_staking_threshold = ["nearcore/protocol_feature_fix_staking_threshold"] protocol_feature_flat_state = ["nearcore/protocol_feature_flat_state"] -cold_store = ["nearcore/cold_store", "near-store/cold_store", "near-state-viewer/cold_store", "near-cold-store-tool/cold_store"] +protocol_feature_nep366_delegate_action = ["nearcore/protocol_feature_nep366_delegate_action"] nightly = [ "nightly_protocol", diff --git a/neard/src/cli.rs b/neard/src/cli.rs index e38753ed117..37a64eb0068 100644 --- a/neard/src/cli.rs +++ b/neard/src/cli.rs @@ -1,20 +1,17 @@ #[cfg(unix)] -use crate::watchers::Watcher; -use crate::watchers::{ - dyn_config_watcher::DynConfig, log_config_watcher::LogConfig, UpdateBehavior, -}; use anyhow::Context; use clap::{Args, Parser}; use near_amend_genesis::AmendGenesisCommand; use near_chain_configs::GenesisValidationMode; -#[cfg(feature = "cold_store")] +use near_client::ConfigUpdater; use near_cold_store_tool::ColdStoreCommand; +use near_dyn_configs::{UpdateableConfigLoader, UpdateableConfigLoaderError, UpdateableConfigs}; use near_jsonrpc_primitives::types::light_client::RpcLightClientExecutionProofResponse; use near_mirror::MirrorCommand; use near_o11y::tracing_subscriber::EnvFilter; use near_o11y::{ default_subscriber, default_subscriber_with_opentelemetry, BuildEnvFilterError, - EnvFilterBuilder, OpenTelemetryLevel, + EnvFilterBuilder, }; use near_ping::PingCommand; use near_primitives::hash::CryptoHash; @@ -29,8 +26,9 @@ use std::fs::File; use std::io::BufReader; use std::net::SocketAddr; use std::path::{Path, PathBuf}; -use tokio::sync::oneshot; -use tokio::sync::oneshot::Receiver; +use std::sync::Arc; +use tokio::sync::broadcast; +use tokio::sync::broadcast::Receiver; use tracing::{debug, error, info, warn}; /// NEAR Protocol Node @@ -112,7 +110,6 @@ impl NeardCmd { NeardSubCommand::AmendGenesis(cmd) => { cmd.run()?; } - #[cfg(feature = "cold_store")] NeardSubCommand::ColdStore(cmd) => { cmd.run(&home_dir); } @@ -223,7 +220,6 @@ pub(super) enum NeardSubCommand { /// Amend a genesis/records file created by `dump-state`. AmendGenesis(AmendGenesisCommand), - #[cfg(feature = "cold_store")] /// Testing tool for cold storage ColdStore(ColdStoreCommand), @@ -457,7 +453,9 @@ impl RunCmd { } } - let (tx, rx) = oneshot::channel::<()>(); + let (tx_crash, mut rx_crash) = broadcast::channel::<()>(16); + let (tx_config_update, rx_config_update) = + broadcast::channel::>>(16); let sys = actix::System::new(); sys.block_on(async move { @@ -476,11 +474,31 @@ impl RunCmd { .await .global(); - let nearcore::NearNode { rpc_servers, .. } = - nearcore::start_with_config_and_synchronization(home_dir, near_config, Some(tx)) - .expect("start_with_config"); + let updateable_configs = nearcore::dyn_config::read_updateable_configs(home_dir) + .unwrap_or_else(|e| panic!("Error reading dynamic configs: {:#}", e)); + let mut updateable_config_loader = + UpdateableConfigLoader::new(updateable_configs.clone(), tx_config_update); + let config_updater = ConfigUpdater::new(rx_config_update); - let sig = wait_for_interrupt_signal(home_dir, rx).await; + let nearcore::NearNode { rpc_servers, .. } = + nearcore::start_with_config_and_synchronization( + home_dir, + near_config, + Some(tx_crash), + Some(config_updater), + ) + .expect("start_with_config"); + + let sig = loop { + let sig = wait_for_interrupt_signal(home_dir, &mut rx_crash).await; + if sig == "SIGHUP" { + let maybe_updateable_configs = + nearcore::dyn_config::read_updateable_configs(home_dir); + updateable_config_loader.reload(maybe_updateable_configs); + } else { + break sig; + } + }; warn!(target: "neard", "{}, stopping... this may take a few minutes.", sig); futures::future::join_all(rpc_servers.iter().map(|(name, server)| async move { server.stop(true).await; @@ -488,9 +506,9 @@ impl RunCmd { })) .await; actix::System::current().stop(); - // Disable the subscriber to properly shutdown the tracer. - near_o11y::reload(Some("error"), None, Some(OpenTelemetryLevel::OFF)).unwrap(); + near_o11y::reload(Some("error"), None, Some(near_o11y::OpenTelemetryLevel::OFF)) + .unwrap(); }); sys.run().unwrap(); info!(target: "neard", "Waiting for RocksDB to gracefully shutdown"); @@ -499,38 +517,26 @@ impl RunCmd { } #[cfg(not(unix))] -async fn wait_for_interrupt_signal(_home_dir: &Path, mut _rx_crash: Receiver<()>) -> &str { +async fn wait_for_interrupt_signal(_home_dir: &Path, mut _rx_crash: &Receiver<()>) -> &str { // TODO(#6372): Support graceful shutdown on windows. tokio::signal::ctrl_c().await.unwrap(); "Ctrl+C" } #[cfg(unix)] -fn update_watchers(home_dir: &Path, behavior: UpdateBehavior) { - LogConfig::update(home_dir.join("log_config.json"), &behavior); - DynConfig::update(home_dir.join("dyn_config.json"), &behavior); -} - -#[cfg(unix)] -async fn wait_for_interrupt_signal(home_dir: &Path, mut rx_crash: Receiver<()>) -> &str { - // Apply all watcher config file if it exists. - update_watchers(&home_dir, UpdateBehavior::UpdateOnlyIfExists); - +async fn wait_for_interrupt_signal(_home_dir: &Path, rx_crash: &mut Receiver<()>) -> &'static str { use tokio::signal::unix::{signal, SignalKind}; let mut sigint = signal(SignalKind::interrupt()).unwrap(); let mut sigterm = signal(SignalKind::terminate()).unwrap(); let mut sighup = signal(SignalKind::hangup()).unwrap(); - loop { - break tokio::select! { - _ = sigint.recv() => "SIGINT", - _ = sigterm.recv() => "SIGTERM", - _ = sighup.recv() => { - update_watchers(&home_dir, UpdateBehavior::UpdateOrReset); - continue; - }, - _ = &mut rx_crash => "ClientActor died", - }; + tokio::select! { + _ = sigint.recv() => "SIGINT", + _ = sigterm.recv() => "SIGTERM", + _ = sighup.recv() => { + "SIGHUP" + }, + _ = rx_crash.recv() => "ClientActor died", } } @@ -795,7 +801,7 @@ mod tests { ) ); - // Proof with a wroing outcome (as user specified wrong shard). + // Proof with a wrong outcome (as user specified wrong shard). assert_eq!( VerifyProofSubCommand::verify_json( serde_json::from_slice(include_bytes!("../res/invalid_proof.json")).unwrap() diff --git a/neard/src/main.rs b/neard/src/main.rs index ab11ffac7c0..d322b6066a6 100644 --- a/neard/src/main.rs +++ b/neard/src/main.rs @@ -1,5 +1,4 @@ mod cli; -mod watchers; use self::cli::NeardCmd; use anyhow::Context; diff --git a/neard/src/watchers/dyn_config_watcher.rs b/neard/src/watchers/dyn_config_watcher.rs deleted file mode 100644 index 81ebeba1fb4..00000000000 --- a/neard/src/watchers/dyn_config_watcher.rs +++ /dev/null @@ -1,22 +0,0 @@ -use crate::watchers::{WatchConfigError, Watcher}; -use near_dyn_configs::reload; -use serde::{Deserialize, Serialize}; - -/// Configures logging. -#[derive(Serialize, Deserialize, Clone, Debug)] -pub(crate) struct DynConfig { - /// Graceful shutdown at expected blockheight - pub expected_shutdown: Option, -} - -impl Watcher for DynConfig { - fn reload(config: Option) -> Result<(), WatchConfigError> { - if let Some(config) = config { - reload(config.expected_shutdown); - Ok(()) - } else { - reload(None); - Ok(()) - } - } -} diff --git a/neard/src/watchers/log_config_watcher.rs b/neard/src/watchers/log_config_watcher.rs deleted file mode 100644 index 9824b98a17b..00000000000 --- a/neard/src/watchers/log_config_watcher.rs +++ /dev/null @@ -1,31 +0,0 @@ -use crate::watchers::{WatchConfigError, Watcher}; -use near_o11y::{reload, OpenTelemetryLevel, ReloadError}; -use serde::{Deserialize, Serialize}; - -/// Configures logging. -#[derive(Default, Serialize, Deserialize, Clone, Debug)] -pub(crate) struct LogConfig { - /// Comma-separated list of EnvFitler directives. - pub rust_log: Option, - /// Some("") enables global debug logging. - /// Some("module") enables debug logging for "module". - pub verbose_module: Option, - /// Verbosity level of collected traces. - pub opentelemetry_level: Option, -} - -impl Watcher for LogConfig { - fn reload(instance: Option) -> Result<(), WatchConfigError> { - if let Some(LogConfig { rust_log, verbose_module, opentelemetry_level }) = instance { - Ok(reload(rust_log.as_deref(), verbose_module.as_deref(), opentelemetry_level) - .map_err(|e| into_config_err(e))?) - } else { - Ok(reload(None, None, None).map_err(|e| into_config_err(e))?) - } - } -} - -fn into_config_err(reload_errs: Vec) -> WatchConfigError { - let error_msgs: Vec = reload_errs.iter().map(|e| e.to_string()).collect(); - WatchConfigError::Reload(error_msgs.join("")) -} diff --git a/neard/src/watchers/mod.rs b/neard/src/watchers/mod.rs deleted file mode 100644 index 2b3aba6805c..00000000000 --- a/neard/src/watchers/mod.rs +++ /dev/null @@ -1,62 +0,0 @@ -pub mod dyn_config_watcher; -pub mod log_config_watcher; - -use serde::{Deserialize, Serialize}; -use std::fmt::Debug; -use std::io; -use std::io::ErrorKind; -use std::path::PathBuf; -use tracing::{error, info}; - -pub(crate) enum UpdateBehavior { - UpdateOrReset, - UpdateOnlyIfExists, -} - -#[derive(thiserror::Error, Debug)] -#[non_exhaustive] -pub(crate) enum WatchConfigError { - #[error("Failed to reload the watcher config")] - Reload(String), - #[error("Failed to reload the logging config")] - Parse(#[source] serde_json::Error), - #[error("Can't open or read the logging config file")] - OpenAndRead(#[source] io::Error), -} - -/// Watcher helps to `reload` the change of config -/// main thread will use `update` method to trigger config watchers to reload the config they watch -pub(crate) trait Watcher -where - Self: Debug + for<'a> Deserialize<'a> + Serialize, -{ - fn reload(instance: Option) -> Result<(), WatchConfigError>; - - fn do_update(path: &PathBuf, update_behavior: &UpdateBehavior) -> Result<(), WatchConfigError> { - match std::fs::read_to_string(path) { - Ok(config_str) => match serde_json::from_str::(&config_str) { - Ok(config) => { - info!(target: "neard", config=?config, "Changing the config {path:?}."); - return Self::reload(Some(config)); - } - Err(e) => Err(WatchConfigError::Parse(e)), - }, - Err(err) => match err.kind() { - ErrorKind::NotFound => { - if let UpdateBehavior::UpdateOrReset = update_behavior { - info!(target: "neard", ?err, "Reset the config {path:?} because the logging config file doesn't exist."); - return Self::reload(None); - } - Ok(()) - } - _ => Err(err).map_err(WatchConfigError::OpenAndRead), - }, - } - } - - fn update(path: PathBuf, update_behavior: &UpdateBehavior) { - if let Err(err) = Self::do_update(&path, update_behavior) { - error!(target: "neard", "Failed to update {path:?}: {err:?}."); - } - } -} diff --git a/nightly/pytest-sanity.txt b/nightly/pytest-sanity.txt index 7cd9c8f1f93..7481efa806d 100644 --- a/nightly/pytest-sanity.txt +++ b/nightly/pytest-sanity.txt @@ -41,12 +41,14 @@ pytest --timeout=600 sanity/state_sync_routed.py manytx 115 --features nightly #pytest --timeout=300 sanity/state_sync_late.py notx --features nightly pytest --timeout=3600 sanity/state_sync_massive.py -pytest --timeout=3600 sanity/state_sync_massive.py --features nightly pytest --timeout=3600 sanity/state_sync_massive_validator.py -pytest --timeout=3600 sanity/state_sync_massive_validator.py --features nightly -pytest sanity/sync_chunks_from_archival.py -pytest sanity/sync_chunks_from_archival.py --features nightly +# TODO(#8322) - re-enable these tests when flat storage starts supporting loading from state dump +# pytest --timeout=3600 sanity/state_sync_massive.py --features nightly +# pytest --timeout=3600 sanity/state_sync_massive_validator.py --features nightly +# TODO(#8211) - tests broken due to bad behavior in chunk fetching - re-enable when that PR is submitted. +# pytest sanity/sync_chunks_from_archival.py +# pytest sanity/sync_chunks_from_archival.py --features nightly pytest sanity/rpc_tx_forwarding.py pytest sanity/rpc_tx_forwarding.py --features nightly pytest --timeout=240 sanity/skip_epoch.py @@ -139,3 +141,6 @@ pytest --skip-build --timeout=1h sanity/docker.py pytest sanity/recompress_storage.py pytest sanity/recompress_storage.py --features nightly + +# This is the test for meta transactions. +pytest sanity/meta_tx.py --features nightly \ No newline at end of file diff --git a/pytest/lib/messages/tx.py b/pytest/lib/messages/tx.py index 419efa7e2cf..214e4fedeab 100644 --- a/pytest/lib/messages/tx.py +++ b/pytest/lib/messages/tx.py @@ -45,6 +45,14 @@ class DeleteAccount: pass +class SignedDelegate: + pass + + +class DelegateAction: + pass + + class Receipt: pass @@ -96,6 +104,7 @@ class DataReceiver: ['addKey', AddKey], ['deleteKey', DeleteKey], ['deleteAccount', DeleteAccount], + ['delegate', SignedDelegate], ] } ], @@ -115,6 +124,23 @@ class DataReceiver: ['gas', 'u64'], ['deposit', 'u128']] } ], + [ + SignedDelegate, { + 'kind': + 'struct', + 'fields': [['delegateAction', DelegateAction], + ['signature', Signature]] + } + ], + [ + DelegateAction, { + 'kind': + 'struct', + 'fields': [['senderId', 'string'], ['receiverId', 'string'], + ['actions', [Action]], ['nonce', 'u64'], + ['maxBlockHeight', 'u64'], ['publicKey', PublicKey]] + } + ], [Transfer, { 'kind': 'struct', 'fields': [['deposit', 'u128']] diff --git a/pytest/lib/serializer.py b/pytest/lib/serializer.py index 692f47dae07..60808ba0cb1 100644 --- a/pytest/lib/serializer.py +++ b/pytest/lib/serializer.py @@ -115,14 +115,21 @@ def serialize_struct(self, obj): structSchema = self.schema[type(obj)] if structSchema['kind'] == 'struct': for fieldName, fieldType in structSchema['fields']: - self.serialize_field(getattr(obj, fieldName), fieldType) + try: + self.serialize_field(getattr(obj, fieldName), fieldType) + except AssertionError as exc: + raise AssertionError(f"Error in field {fieldName}") from exc elif structSchema['kind'] == 'enum': name = getattr(obj, structSchema['field']) for idx, (fieldName, fieldType) in enumerate(structSchema['values']): if fieldName == name: self.serialize_num(idx, 1) - self.serialize_field(getattr(obj, fieldName), fieldType) + try: + self.serialize_field(getattr(obj, fieldName), fieldType) + except AssertionError as exc: + raise AssertionError( + f"Error in field {fieldName}") from exc break else: assert False, name diff --git a/pytest/lib/transaction.py b/pytest/lib/transaction.py index ec3de0b7865..023ee8c220c 100644 --- a/pytest/lib/transaction.py +++ b/pytest/lib/transaction.py @@ -43,6 +43,40 @@ def sign_and_serialize_transaction(receiverId, nonce, actions, blockHash, return BinarySerializer(schema).serialize(signedTx) +def compute_delegated_action_hash(senderId, receiverId, actions, nonce, + maxBlockHeight, publicKey): + delegateAction = DelegateAction() + delegateAction.senderId = senderId + delegateAction.receiverId = receiverId + delegateAction.actions = actions + delegateAction.nonce = nonce + delegateAction.maxBlockHeight = maxBlockHeight + delegateAction.publicKey = PublicKey() + delegateAction.publicKey.keyType = 0 + delegateAction.publicKey.data = publicKey + msg = BinarySerializer(schema).serialize(delegateAction) + hash_ = hashlib.sha256(msg).digest() + + return delegateAction, hash_ + + +# Used by meta-transactions. +# Creates a SignedDelegate that is later put into the DelegateAction by relayer. +def create_signed_delegated_action(senderId, receiverId, actions, nonce, + maxBlockHeight, publicKey, sk): + delegated_action, hash_ = compute_delegated_action_hash( + senderId, receiverId, actions, nonce, maxBlockHeight, publicKey) + + signature = Signature() + signature.keyType = 0 + signature.data = SigningKey(sk).sign(hash_) + + signedDA = SignedDelegate() + signedDA.delegateAction = delegated_action + signedDA.signature = signature + return signedDA + + def create_create_account_action(): createAccount = CreateAccount() action = Action() @@ -133,6 +167,22 @@ def create_delete_account_action(beneficiary): return action +def create_delegate_action(signedDelegate): + action = Action() + action.enum = 'delegate' + action.delegate = signedDelegate + return action + + +def sign_delegate_action(signedDelegate, signer_key, contract_id, nonce, + blockHash): + action = create_delegate_action(signedDelegate) + return sign_and_serialize_transaction(contract_id, nonce, [action], + blockHash, signer_key.account_id, + signer_key.decoded_pk(), + signer_key.decoded_sk()) + + def sign_create_account_tx(creator_key, new_account_id, nonce, block_hash): action = create_create_account_action() return sign_and_serialize_transaction(new_account_id, nonce, [action], diff --git a/pytest/lib/utils.py b/pytest/lib/utils.py index 45d132c2a65..8e6e1fcff70 100644 --- a/pytest/lib/utils.py +++ b/pytest/lib/utils.py @@ -12,7 +12,8 @@ import tempfile import time import typing - +import requests +from prometheus_client.parser import text_string_to_metric_families from retrying import retry from rc import gcloud @@ -72,6 +73,10 @@ class LogTracker: """Opens up a log file, scrolls to the end and allows to check for patterns. The tracker works only on local nodes. + + PLEASE AVOID USING THE TRACKER IN NEW TESTS. + As depending on the exact log wording is making tests very fragile. + Try depending on a metric instead. """ def __init__(self, node: cluster.BaseNode) -> None: @@ -117,6 +122,61 @@ def count(self, pattern: str) -> int: return self._read_file().count(pattern) +class MetricsTracker: + """Helper class to collect prometheus metrics from the node. + + Usage: + tracker = MetricsTracker(node) + assert tracker.get_int_metric_value("near-connections") == 2 + """ + + def __init__(self, node: cluster.BaseNode) -> None: + if not isinstance(node, cluster.LocalNode): + raise NotImplementedError() + host, port = node.rpc_addr() + + self.addr = f"http://{host}:{port}/metrics" + + def get_all_metrics(self) -> str: + response = requests.get(self.addr) + if not response.ok: + raise RuntimeError( + f"Could not fetch metrics from {self.addr}: {response}") + return response.content.decode('utf-8') + + def get_metric_value( + self, + metric_name: str, + labels: typing.Optional[typing.Dict[str, str]] = None + ) -> typing.Optional[str]: + for family in text_string_to_metric_families(self.get_all_metrics()): + if family.name == metric_name: + all_samples = [sample for sample in family.samples] + if not labels: + if len(all_samples) > 1: + raise AssertionError( + f"Too many metric values ({len(all_samples)}) for {metric_name} - please specify a label" + ) + if not all_samples: + return None + return all_samples[0].value + for sample in all_samples: + if sample.labels == labels: + return sample.value + return None + + def get_int_metric_value( + self, + metric_name: str, + labels: typing.Optional[typing.Dict[str, str]] = None + ) -> typing.Optional[int]: + """Helper function to return the integer value of the metric (as function above returns strings).""" + value = self.get_metric_value(metric_name, labels) + if not value: + return None + return round(float(value)) + + def chain_query(node, block_handler, *, block_hash=None, max_blocks=-1): """ Query chain block approvals and chunks preceding of block of block_hash. diff --git a/pytest/tests/adversarial/malicious_chain.py b/pytest/tests/adversarial/malicious_chain.py index e0389906844..47b4a5ea07a 100755 --- a/pytest/tests/adversarial/malicious_chain.py +++ b/pytest/tests/adversarial/malicious_chain.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +# starts two validators and one extra RPC node, and directs one of the validators +# to produce invalid blocks. Then we check that the other two nodes have banned this peer. + import sys, time import pathlib @@ -25,9 +28,15 @@ height, _ = utils.wait_for_blocks(nodes[1], target=BLOCKS) logger.info(f'Got to {height} blocks, getting to fun stuff') +# first check that nodes 0 and 2 have two peers each, before we check later that +# they've dropped to just one peer +network_info0 = nodes[0].json_rpc('network_info', {})['result'] +network_info2 = nodes[2].json_rpc('network_info', {})['result'] +assert network_info0['num_active_peers'] == 2, network_info0['num_active_peers'] +assert network_info2['num_active_peers'] == 2, network_info2['num_active_peers'] + nodes[1].get_status(verbose=True) -tracker0 = utils.LogTracker(nodes[0]) res = nodes[1].json_rpc('adv_produce_blocks', [MALICIOUS_BLOCKS, valid_blocks_only]) assert 'result' in res, res @@ -39,6 +48,14 @@ assert height < 40 -assert tracker0.check("Banned(BadBlockHeader)") +network_info0 = nodes[0].json_rpc('network_info', {})['result'] +network_info2 = nodes[2].json_rpc('network_info', {})['result'] + +# Since we have 3 nodes, they should all have started with 2 peers. After the above +# invalid blocks sent by node1, the other two should have banned it, leaving them +# with one active peer + +assert network_info0['num_active_peers'] == 1, network_info0['num_active_peers'] +assert network_info2['num_active_peers'] == 1, network_info2['num_active_peers'] logger.info("Epic") diff --git a/pytest/tests/sanity/meta_tx.py b/pytest/tests/sanity/meta_tx.py new file mode 100644 index 00000000000..3e62af43a10 --- /dev/null +++ b/pytest/tests/sanity/meta_tx.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 +# Tests the meta transaction flow. +# Creates a new account (candidate.test0) with a fixed amount of tokens. +# Afterwards, creates the meta transaction that adds a new key to this account, but the gas is paid by someone else (test0) account. +# At the end, verifies that key has been added succesfully and that the amount of tokens in candidate didn't change. + +import base58 +import pathlib +import sys +import typing + +import unittest + +sys.path.append(str(pathlib.Path(__file__).resolve().parents[2] / 'lib')) + +from cluster import start_cluster, LocalNode +import utils +import transaction +import key + + +class Nonce: + """ Helper class to manage nonces (automatically increase them when they are used. """ + + def __init__(self, current_nonce: int = 0): + self.nonce = current_nonce + + def use_nonce(self) -> int: + self.nonce += 1 + return self.nonce + + +def create_nonce_from_node(node: LocalNode, account_id: str, pk: str) -> Nonce: + nn = node.get_nonce_for_pk(account_id, pk) + assert nn, "Nonce missing for the candidate account" + return Nonce(nn) + + +# Returns the number of keys and current amount for a given account +def check_account_status(node: LocalNode, + account_id: str) -> typing.Tuple[int, int]: + current_keys = node.get_access_key_list(account_id)['result']['keys'] + account_state = node.get_account(account_id)['result'] + return (len(current_keys), int(account_state['amount'])) + + +class TestMetaTransactions(unittest.TestCase): + + def test_meta_tx(self): + nodes: list[LocalNode] = start_cluster(2, 0, 1, None, [], {}) + _, hash_ = utils.wait_for_blocks(nodes[0], target=10) + + node0_nonce = Nonce() + + CANDIDATE_ACCOUNT = "candidate.test0" + CANDIDATE_STARTING_AMOUNT = 123 * (10**24) + + # create new account + candidate_key = key.Key.from_random(CANDIDATE_ACCOUNT) + + tx = transaction.sign_create_account_with_full_access_key_and_balance_tx( + nodes[0].signer_key, candidate_key.account_id, candidate_key, + CANDIDATE_STARTING_AMOUNT, node0_nonce.use_nonce(), + base58.b58decode(hash_.encode('utf8'))) + nodes[0].send_tx_and_wait(tx, 100) + + self.assertEqual(check_account_status(nodes[0], CANDIDATE_ACCOUNT), + (1, CANDIDATE_STARTING_AMOUNT)) + + candidate_nonce = create_nonce_from_node(nodes[0], + candidate_key.account_id, + candidate_key.pk) + + # Now let's prepare the meta transaction. + new_key = key.Key.from_random("new_key") + add_new_key_tx = transaction.create_full_access_key_action( + new_key.decoded_pk()) + signed_meta_tx = transaction.create_signed_delegated_action( + CANDIDATE_ACCOUNT, CANDIDATE_ACCOUNT, [add_new_key_tx], + candidate_nonce.use_nonce(), 1000, candidate_key.decoded_pk(), + candidate_key.decoded_sk()) + + meta_tx = transaction.sign_delegate_action( + signed_meta_tx, nodes[0].signer_key, CANDIDATE_ACCOUNT, + node0_nonce.use_nonce(), base58.b58decode(hash_.encode('utf8'))) + + nodes[0].send_tx_and_wait(meta_tx, 100) + + self.assertEqual(check_account_status(nodes[0], CANDIDATE_ACCOUNT), + (2, CANDIDATE_STARTING_AMOUNT)) + + +if __name__ == '__main__': + unittest.main() diff --git a/pytest/tests/sanity/rpc_finality.py b/pytest/tests/sanity/rpc_finality.py old mode 100755 new mode 100644 index a88a16f8ace..1430708b493 --- a/pytest/tests/sanity/rpc_finality.py +++ b/pytest/tests/sanity/rpc_finality.py @@ -1,19 +1,18 @@ #!/usr/bin/env python3 -# The test launches two validating node out of three validators. -# Transfer some tokens between two accounts (thus changing state). -# Query for no finality, doomslug finality -# Nov 2021 - the test was fixed, but it now check for both finalities. -# We might want to update it in the future in order to be able to 'exactly' find -# the moment when doomslug is there, but finality is not. - -import sys, time, base58 +# The test does the token transfer between the accounts, and tries to +# stop the network just in the right moment (so that the block with the refund receipt +# is not finalized). +# This way, we can verify that our json RPC returns correct values for different finality requests. + +import sys import pathlib import unittest +from typing import List sys.path.append(str(pathlib.Path(__file__).resolve().parents[2] / 'lib')) - -from cluster import start_cluster +import utils +from cluster import start_cluster, LocalNode from configured_logger import logger from transaction import sign_payment_tx @@ -21,46 +20,79 @@ class TestRpcFinality(unittest.TestCase): def test_finality(self): - nodes = start_cluster(4, 1, 1, None, - [["min_gas_price", 0], ["epoch_length", 100]], {}) + # set higher block delay to make test more reliable. + min_block_delay = 3 - time.sleep(3) - # kill one validating node so that no block can be finalized - nodes[2].kill() - time.sleep(1) + consensus = { + "consensus": { + "min_block_production_delay": { + "secs": min_block_delay, + "nanos": 0, + }, + "max_block_production_delay": { + "secs": min_block_delay * 2, + "nanos": 0, + }, + "max_block_wait_delay": { + "secs": min_block_delay * 3, + "nanos": 0, + } + } + } + + config = {node_id: {"consensus": consensus} for node_id in range(3)} + + nodes: List[LocalNode] = start_cluster(3, 0, 1, None, [ + ["min_gas_price", 0], + ["epoch_length", 100], + ], config) + + utils.wait_for_blocks(nodes[0], target=3) - acc0_balance = int(nodes[0].get_account('test0')['result']['amount']) - acc1_balance = int(nodes[0].get_account('test1')['result']['amount']) + balances = { + account: int(nodes[0].get_account('test0')['result']['amount']) + for account in ['test0', 'test1'] + } token_transfer = 10 latest_block_hash = nodes[0].get_latest_block().hash_bytes tx = sign_payment_tx(nodes[0].signer_key, 'test1', token_transfer, 1, latest_block_hash) logger.info("About to send payment") - logger.info(nodes[0].send_tx_and_wait(tx, timeout=200)) + # this transaction will be added to the block (probably around block 5) + # and the the receipts & transfers will happen in the next block (block 6). + # This function should return as soon as block 6 arrives in node0. + logger.info(nodes[0].send_tx_and_wait(tx, timeout=10)) logger.info("Done") - # wait for doomslug finality - time.sleep(5) - for i in range(2): - acc_id = 'test0' if i == 0 else 'test1' - acc_no_finality = nodes[0].get_account(acc_id) - acc_doomslug_finality = nodes[0].get_account(acc_id, "near-final") - acc_nfg_finality = nodes[0].get_account(acc_id, "final") - if i == 0: - self.assertEqual(int(acc_no_finality['result']['amount']), - acc0_balance - token_transfer) - self.assertEqual(int(acc_doomslug_finality['result']['amount']), - acc0_balance - token_transfer) - self.assertEqual(int(acc_nfg_finality['result']['amount']), - acc0_balance - token_transfer) + # kill one validating node so that block cannot be finalized. + nodes[2].kill() + + print( + f"Block height is {nodes[0].get_latest_block().height} (should be 6)" + ) + + # So now the situation is following: + # Block 6 (head) - has the final receipt (that adds state to test1) + # Block 5 (doomslug) - has the transaction (so this is the moment when state is removed from test0) + # Block 4 (final) - has no information about the transaction. + + # So with optimistic finality: test0 = -10, test1 = +10 + # with doomslug (state as of block 5): test0 = -10, test1 = 0 + # with final (state as of block 4): test0 = 0, test1 = 0 + + for acc_id in ['test0', 'test1']: + amounts = [ + int(nodes[0].get_account(acc_id, finality)['result']['amount']) + - balances[acc_id] + for finality in ["optimistic", "near-final", "final"] + ] + print(f"Account amounts: {acc_id}: {amounts}") + + if acc_id == 'test0': + self.assertEqual([-10, -10, 0], amounts) else: - self.assertEqual(int(acc_no_finality['result']['amount']), - acc1_balance + token_transfer) - self.assertEqual(int(acc_doomslug_finality['result']['amount']), - acc1_balance + token_transfer) - self.assertEqual(int(acc_nfg_finality['result']['amount']), - acc1_balance + token_transfer) + self.assertEqual([10, 0, 0], amounts) if __name__ == '__main__': diff --git a/pytest/tests/sanity/state_sync_routed.py b/pytest/tests/sanity/state_sync_routed.py index 304c7618e3b..75d633de9f6 100755 --- a/pytest/tests/sanity/state_sync_routed.py +++ b/pytest/tests/sanity/state_sync_routed.py @@ -17,6 +17,9 @@ import pathlib sys.path.append(str(pathlib.Path(__file__).resolve().parents[2] / 'lib')) +from configured_logger import logger +from cluster import init_cluster, spin_up_node, load_config +import utils if len(sys.argv) < 3: logger.info("python state_sync.py [notx, onetx, manytx] ") @@ -25,10 +28,6 @@ mode = sys.argv[1] assert mode in ['notx', 'onetx', 'manytx'] -from cluster import init_cluster, spin_up_node, load_config -from configured_logger import logger -import utils - START_AT_BLOCK = int(sys.argv[2]) TIMEOUT = 150 + START_AT_BLOCK * 10 @@ -105,7 +104,8 @@ 4, boot_node=boot_node, blacklist=[0, 1]) -tracker4 = utils.LogTracker(node4) + +metrics4 = utils.MetricsTracker(node4) time.sleep(3) catch_up_height = 0 @@ -134,25 +134,13 @@ assert catch_up_height in boot_heights, "%s not in %s" % (catch_up_height, boot_heights) -tracker4.reset( -) # the transition might have happened before we initialized the tracker -if catch_up_height >= 100: - assert tracker4.check("transition to State Sync") -elif catch_up_height <= 30: - assert not tracker4.check("transition to State Sync") - while True: assert time.time( ) - started < TIMEOUT, "Waiting for node 4 to connect to two peers" - tracker4.reset() - if tracker4.count("Consolidated connection") == 2: + if metrics4.get_int_metric_value("near_peer_connections_total") == 2: break time.sleep(0.1) -tracker4.reset() -# Check that no message is dropped because a peer is disconnected -assert tracker4.count("Reason Disconnected") == 0 - if mode == 'manytx': while ctx.get_balances() != ctx.expected_balances: assert time.time() - started < TIMEOUT diff --git a/runtime/near-test-contracts/Cargo.toml b/runtime/near-test-contracts/Cargo.toml index 35f665712c2..203df213533 100644 --- a/runtime/near-test-contracts/Cargo.toml +++ b/runtime/near-test-contracts/Cargo.toml @@ -12,7 +12,7 @@ wat = "1.0.40" wasm-encoder = "0.11.0" wasm-smith = "0.10.0" rand = { version = "0.8.5", features = ["small_rng"] } -arbitrary = "1.1.0" +arbitrary = "1.2.3" [features] nightly = [] diff --git a/runtime/near-vm-logic/Cargo.toml b/runtime/near-vm-logic/Cargo.toml index eace29d00d0..c3c4a49c31b 100644 --- a/runtime/near-vm-logic/Cargo.toml +++ b/runtime/near-vm-logic/Cargo.toml @@ -17,7 +17,6 @@ This crate implements the specification of the interface that Near blockchain ex [dependencies] bn.workspace = true borsh.workspace = true -byteorder.workspace = true ed25519-dalek.workspace = true ripemd.workspace = true serde.workspace = true diff --git a/runtime/near-vm-logic/src/logic.rs b/runtime/near-vm-logic/src/logic.rs index ad1c42c3c6b..beb442c2471 100644 --- a/runtime/near-vm-logic/src/logic.rs +++ b/runtime/near-vm-logic/src/logic.rs @@ -5,7 +5,6 @@ use crate::receipt_manager::ReceiptManager; use crate::types::{PromiseIndex, PromiseResult, ReceiptIndex, ReturnData}; use crate::utils::split_method_names; use crate::{ReceiptMetadata, StorageGetMode, ValuePtr}; -use byteorder::ByteOrder; use near_crypto::Secp256K1Signature; use near_primitives::checked_feature; use near_primitives::config::ViewConfig; @@ -307,11 +306,7 @@ impl<'a> VMLogic<'a> { self.config.limit_config.max_total_log_length.saturating_sub(self.total_log_length); if len != u64::MAX { if len > max_len { - return Err(HostError::TotalLogLengthExceeded { - length: self.total_log_length.saturating_add(len), - limit: self.config.limit_config.max_total_log_length, - } - .into()); + return self.total_log_length_exceeded(len); } buf = self.memory.view(&mut self.gas_counter, MemSlice { ptr, len })?.into_owned(); } else { @@ -323,11 +318,7 @@ impl<'a> VMLogic<'a> { break; } if i == max_len { - return Err(HostError::TotalLogLengthExceeded { - length: self.total_log_length.saturating_add(max_len).saturating_add(1), - limit: self.config.limit_config.max_total_log_length, - } - .into()); + return self.total_log_length_exceeded(max_len.saturating_add(1)); } buf.push(el); } @@ -361,52 +352,46 @@ impl<'a> VMLogic<'a> { /// /// For nul-terminated string: /// `read_memory_base * num_bytes / 2 + read_memory_byte * num_bytes + utf16_decoding_base + utf16_decoding_byte * num_bytes` - fn get_utf16_string(&mut self, len: u64, ptr: u64) -> Result { + fn get_utf16_string(&mut self, mut len: u64, ptr: u64) -> Result { self.gas_counter.pay_base(utf16_decoding_base)?; - let mut u16_buffer; let max_len = self.config.limit_config.max_total_log_length.saturating_sub(self.total_log_length); - if len != u64::MAX { - let input = self.memory.view(&mut self.gas_counter, MemSlice { ptr, len })?; - if len % 2 != 0 { - return Err(HostError::BadUTF16.into()); - } - if len > max_len { - return Err(HostError::TotalLogLengthExceeded { - length: self.total_log_length.saturating_add(len), - limit: self.config.limit_config.max_total_log_length, - } - .into()); - } - u16_buffer = vec![0u16; len as usize / 2]; - byteorder::LittleEndian::read_u16_into(&input, &mut u16_buffer); + + let mem_view = if len == u64::MAX { + len = self.get_nul_terminated_utf16_len(ptr, max_len)?; + self.memory.view_for_free(MemSlice { ptr, len }) } else { - u16_buffer = vec![]; - let limit = max_len / size_of::() as u64; - // Takes 2 bytes each iter - for i in 0..=limit { - // self.memory_get_u16 will check for u64 overflow on the first iteration (i == 0) - let start = ptr + i * size_of::() as u64; - let el = self.memory.get_u16(&mut self.gas_counter, start)?; - if el == 0 { - break; - } - if i == limit { - return Err(HostError::TotalLogLengthExceeded { - length: self - .total_log_length - .saturating_add(i * size_of::() as u64) - .saturating_add(size_of::() as u64), - limit: self.config.limit_config.max_total_log_length, - } - .into()); - } - u16_buffer.push(el); + self.memory.view(&mut self.gas_counter, MemSlice { ptr, len }) + }?; + + let input = stdx::as_chunks_exact(&mem_view).map_err(|_| HostError::BadUTF16)?; + if len > max_len { + return self.total_log_length_exceeded(len); + } + + self.gas_counter.pay_per(utf16_decoding_byte, len)?; + char::decode_utf16(input.into_iter().copied().map(u16::from_le_bytes)) + .collect::>() + .map_err(|_| HostError::BadUTF16.into()) + } + + /// Helper function to get length of NUL-terminated UTF-16 formatted string + /// in guest memory. + /// + /// In other words, counts how many bytes are there to first pair of NUL + /// bytes. + fn get_nul_terminated_utf16_len(&mut self, ptr: u64, max_len: u64) -> Result { + let mut len = 0; + loop { + if self.memory.get_u16(&mut self.gas_counter, ptr.saturating_add(len))? == 0 { + return Ok(len); } + len = match len.checked_add(2) { + Some(len) if len <= max_len => len, + Some(len) => return self.total_log_length_exceeded(len), + None => return self.total_log_length_exceeded(u64::MAX), + }; } - self.gas_counter - .pay_per(utf16_decoding_byte, u16_buffer.len() as u64 * size_of::() as u64)?; - String::from_utf16(&u16_buffer).map_err(|_| HostError::BadUTF16.into()) } // #################################################### @@ -445,16 +430,20 @@ impl<'a> VMLogic<'a> { // The size of logged data can't be too large. No overflow. self.total_log_length += message.len() as u64; if self.total_log_length > self.config.limit_config.max_total_log_length { - return Err(HostError::TotalLogLengthExceeded { - length: self.total_log_length, - limit: self.config.limit_config.max_total_log_length, - } - .into()); + return self.total_log_length_exceeded(0); } self.logs.push(message); Ok(()) } + fn total_log_length_exceeded(&self, add_len: u64) -> Result { + Err(HostError::TotalLogLengthExceeded { + length: self.total_log_length.saturating_add(add_len), + limit: self.config.limit_config.max_total_log_length, + } + .into()) + } + fn get_public_key(&mut self, ptr: u64, len: u64) -> Result { Ok(PublicKeyBuffer::new(&get_memory_or_register!(self, ptr, len)?)) } diff --git a/runtime/near-vm-logic/src/tests/alt_bn128.rs b/runtime/near-vm-logic/src/tests/alt_bn128.rs index 04a3e096515..88399e70c51 100644 --- a/runtime/near-vm-logic/src/tests/alt_bn128.rs +++ b/runtime/near-vm-logic/src/tests/alt_bn128.rs @@ -1,4 +1,4 @@ -use super::{fixtures::get_context, vm_logic_builder::VMLogicBuilder}; +use super::vm_logic_builder::VMLogicBuilder; use near_vm_errors::{HostError, VMLogicError}; use std::fmt::Write; @@ -89,7 +89,7 @@ fn test_alt_bn128_g1_multiexp() { #[track_caller] fn check(input: &[u8], expected: Result<&[u8], &str>) { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let input = logic.internal_mem_write(input); let res = logic.alt_bn128_g1_multiexp(input.len, input.ptr, 0); @@ -153,7 +153,7 @@ fn test_alt_bn128_g1_sum() { #[track_caller] fn check(input: &[u8], expected: Result<&[u8], &str>) { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let input = logic.internal_mem_write(input); let res = logic.alt_bn128_g1_sum(input.len, input.ptr, 0); @@ -217,7 +217,7 @@ fn test_alt_bn128_pairing_check() { #[track_caller] fn check(input: &[u8], expected: Result) { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let input = logic.internal_mem_write(input); let res = logic.alt_bn128_pairing_check(input.len, input.ptr); diff --git a/runtime/near-vm-logic/src/tests/context.rs b/runtime/near-vm-logic/src/tests/context.rs index 5ca0c2d7537..445533a06b8 100644 --- a/runtime/near-vm-logic/src/tests/context.rs +++ b/runtime/near-vm-logic/src/tests/context.rs @@ -1,61 +1,46 @@ use crate::tests::vm_logic_builder::VMLogicBuilder; -use crate::VMContext; - -pub fn create_context() -> VMContext { - VMContext { - current_account_id: "alice".parse().unwrap(), - signer_account_id: "bob".parse().unwrap(), - signer_account_pk: vec![0, 1, 2, 3, 4], - predecessor_account_id: "carol".parse().unwrap(), - input: vec![0, 1, 2, 3, 5], - block_height: 10, - block_timestamp: 42, - epoch_height: 1, - account_balance: 2u128, - account_locked_balance: 1u128, - storage_usage: 12, - attached_deposit: 2u128, - prepaid_gas: 10_u64.pow(14), - random_seed: vec![0, 1, 2], - view_config: None, - output_data_receivers: vec![], - } -} macro_rules! decl_test_bytes { - ($testname:ident, $method:ident, $input:expr) => { + ($testname:ident, $method:ident, $ctx:ident, $want:expr) => { #[test] fn $testname() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(create_context()); + let $ctx = &logic_builder.context; + let want = $want.to_vec(); + let mut logic = logic_builder.build(); logic.$method(0).expect("read bytes into register from context should be ok"); - logic.assert_read_register($input, 0); + logic.assert_read_register(&want[..], 0); } }; } macro_rules! decl_test_u64 { - ($testname:ident, $method:ident, $input:expr) => { + ($testname:ident, $method:ident, $ctx:ident, $want:expr) => { #[test] fn $testname() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(create_context()); - let res = logic.$method().expect("read from context should be ok"); - assert_eq!(res, $input); + let $ctx = &logic_builder.context; + let want = $want; + + let mut logic = logic_builder.build(); + let got = logic.$method().expect("read from context should be ok"); + assert_eq!(want, got); } }; } macro_rules! decl_test_u128 { - ($testname:ident, $method:ident, $input:expr) => { + ($testname:ident, $method:ident, $ctx:ident, $want:expr) => { #[test] fn $testname() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(create_context()); + let $ctx = &logic_builder.context; + let want = $want; + let mut logic = logic_builder.build(); logic.$method(0).expect("read from context should be ok"); let got = logic.internal_mem_read(0, 16).try_into().unwrap(); - assert_eq!(u128::from_le_bytes(got), $input); + assert_eq!(u128::from_le_bytes(got), want); } }; } @@ -63,41 +48,42 @@ macro_rules! decl_test_u128 { decl_test_bytes!( test_current_account_id, current_account_id, - create_context().current_account_id.as_ref().as_bytes() + ctx, + ctx.current_account_id.as_ref().as_bytes() ); decl_test_bytes!( test_signer_account_id, signer_account_id, - create_context().signer_account_id.as_ref().as_bytes() + ctx, + ctx.signer_account_id.as_ref().as_bytes() ); decl_test_bytes!( test_predecessor_account_id, predecessor_account_id, - create_context().predecessor_account_id.as_ref().as_bytes() -); -decl_test_bytes!( - test_signer_account_pk, - signer_account_pk, - create_context().signer_account_pk.as_slice() + ctx, + ctx.predecessor_account_id.as_ref().as_bytes() ); +decl_test_bytes!(test_signer_account_pk, signer_account_pk, ctx, ctx.signer_account_pk); -decl_test_bytes!(test_random_seed, random_seed, create_context().random_seed.as_slice()); +decl_test_bytes!(test_random_seed, random_seed, ctx, ctx.random_seed); -decl_test_bytes!(test_input, input, create_context().input.as_slice()); +decl_test_bytes!(test_input, input, ctx, ctx.input); -decl_test_u64!(test_block_index, block_index, create_context().block_height); -decl_test_u64!(test_block_timestamp, block_timestamp, create_context().block_timestamp); -decl_test_u64!(test_storage_usage, storage_usage, create_context().storage_usage); -decl_test_u64!(test_prepaid_gas, prepaid_gas, create_context().prepaid_gas); +decl_test_u64!(test_block_index, block_index, ctx, ctx.block_height); +decl_test_u64!(test_block_timestamp, block_timestamp, ctx, ctx.block_timestamp); +decl_test_u64!(test_storage_usage, storage_usage, ctx, ctx.storage_usage); +decl_test_u64!(test_prepaid_gas, prepaid_gas, ctx, ctx.prepaid_gas); decl_test_u128!( test_account_balance, account_balance, - create_context().account_balance + create_context().attached_deposit + ctx, + ctx.account_balance + ctx.attached_deposit ); decl_test_u128!( test_account_locked_balance, account_locked_balance, - create_context().account_locked_balance + ctx, + ctx.account_locked_balance ); -decl_test_u128!(test_attached_deposit, attached_deposit, create_context().attached_deposit); +decl_test_u128!(test_attached_deposit, attached_deposit, ctx, ctx.attached_deposit); diff --git a/runtime/near-vm-logic/src/tests/ed25519_verify.rs b/runtime/near-vm-logic/src/tests/ed25519_verify.rs index 2494485808b..4679c4f9dc4 100644 --- a/runtime/near-vm-logic/src/tests/ed25519_verify.rs +++ b/runtime/near-vm-logic/src/tests/ed25519_verify.rs @@ -1,4 +1,3 @@ -use crate::tests::fixtures::get_context; use crate::tests::helpers::*; use crate::tests::vm_logic_builder::VMLogicBuilder; use crate::{map, ExtCosts}; @@ -54,7 +53,7 @@ fn check_ed25519_verify( want_costs: HashMap, ) { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let signature_ptr = if signature_len == u64::MAX { logic.wrapped_internal_write_register(1, &signature).unwrap(); diff --git a/runtime/near-vm-logic/src/tests/fixtures.rs b/runtime/near-vm-logic/src/tests/fixtures.rs deleted file mode 100644 index 14b3df5add8..00000000000 --- a/runtime/near-vm-logic/src/tests/fixtures.rs +++ /dev/null @@ -1,33 +0,0 @@ -use crate::{VMContext, VMLimitConfig}; -use near_primitives_core::config::ViewConfig; -use near_primitives_core::types::Gas; - -pub fn get_context(input: Vec, is_view: bool) -> VMContext { - VMContext { - current_account_id: "alice.near".parse().unwrap(), - signer_account_id: "bob.near".parse().unwrap(), - signer_account_pk: vec![0, 1, 2], - predecessor_account_id: "carol.near".parse().unwrap(), - input, - block_height: 0, - block_timestamp: 0, - epoch_height: 0, - account_balance: 100, - storage_usage: 0, - account_locked_balance: 0, - attached_deposit: 10, - prepaid_gas: 10_u64.pow(14), - random_seed: vec![], - view_config: match is_view { - true => Some(ViewConfig { max_gas_burnt: VMLimitConfig::test().max_gas_burnt }), - false => None, - }, - output_data_receivers: vec![], - } -} - -pub fn get_context_with_prepaid_gas(prepaid_gas: Gas) -> VMContext { - let mut context = get_context(vec![], false); - context.prepaid_gas = prepaid_gas; - context -} diff --git a/runtime/near-vm-logic/src/tests/gas_counter.rs b/runtime/near-vm-logic/src/tests/gas_counter.rs index 3d3a838f06e..ebbe997d0f2 100644 --- a/runtime/near-vm-logic/src/tests/gas_counter.rs +++ b/runtime/near-vm-logic/src/tests/gas_counter.rs @@ -1,9 +1,9 @@ use crate::receipt_manager::ReceiptMetadata; -use crate::tests::fixtures::get_context_with_prepaid_gas; use crate::tests::helpers::*; use crate::tests::vm_logic_builder::{TestVMLogic, VMLogicBuilder}; use crate::types::Gas; -use crate::VMConfig; +use crate::{MemSlice, VMConfig}; +use borsh::BorshSerialize; use expect_test::expect; use near_primitives::config::{ActionCosts, ExtCosts}; use near_primitives::runtime::fees::Fee; @@ -14,8 +14,10 @@ use near_vm_errors::{HostError, VMLogicError}; fn test_dont_burn_gas_when_exceeding_attached_gas_limit() { let gas_limit = 10u64.pow(14); - let mut logic_builder = VMLogicBuilder::default().max_gas_burnt(gas_limit * 2); - let mut logic = logic_builder.build(get_context_with_prepaid_gas(gas_limit)); + let mut logic_builder = VMLogicBuilder::default(); + logic_builder.config.limit_config.max_gas_burnt = gas_limit * 2; + logic_builder.context.prepaid_gas = gas_limit; + let mut logic = logic_builder.build(); let index = promise_create(&mut logic, b"rick.test", 0, 0).expect("should create a promise"); promise_batch_action_function_call(&mut logic, index, 0, gas_limit * 2) @@ -32,8 +34,10 @@ fn test_limit_wasm_gas_after_attaching_gas() { let gas_limit = 10u64.pow(14); let op_limit = op_limit(gas_limit); - let mut logic_builder = VMLogicBuilder::default().max_gas_burnt(gas_limit * 2); - let mut logic = logic_builder.build(get_context_with_prepaid_gas(gas_limit)); + let mut logic_builder = VMLogicBuilder::default(); + logic_builder.config.limit_config.max_gas_burnt = gas_limit * 2; + logic_builder.context.prepaid_gas = gas_limit; + let mut logic = logic_builder.build(); let index = promise_create(&mut logic, b"rick.test", 0, 0).expect("should create a promise"); promise_batch_action_function_call(&mut logic, index, 0, gas_limit / 2) @@ -51,8 +55,10 @@ fn test_cant_burn_more_than_max_gas_burnt_gas() { let gas_limit = 10u64.pow(14); let op_limit = op_limit(gas_limit); - let mut logic_builder = VMLogicBuilder::default().max_gas_burnt(gas_limit); - let mut logic = logic_builder.build(get_context_with_prepaid_gas(gas_limit * 2)); + let mut logic_builder = VMLogicBuilder::default(); + logic_builder.config.limit_config.max_gas_burnt = gas_limit; + logic_builder.context.prepaid_gas = gas_limit * 2; + let mut logic = logic_builder.build(); logic.gas(op_limit * 3).expect_err("should fail with gas limit"); let outcome = logic.compute_outcome_and_distribute_gas(); @@ -66,8 +72,10 @@ fn test_cant_burn_more_than_prepaid_gas() { let gas_limit = 10u64.pow(14); let op_limit = op_limit(gas_limit); - let mut logic_builder = VMLogicBuilder::default().max_gas_burnt(gas_limit * 2); - let mut logic = logic_builder.build(get_context_with_prepaid_gas(gas_limit)); + let mut logic_builder = VMLogicBuilder::default(); + logic_builder.config.limit_config.max_gas_burnt = gas_limit * 2; + logic_builder.context.prepaid_gas = gas_limit; + let mut logic = logic_builder.build(); logic.gas(op_limit * 3).expect_err("should fail with gas limit"); let outcome = logic.compute_outcome_and_distribute_gas(); @@ -81,8 +89,10 @@ fn test_hit_max_gas_burnt_limit() { let gas_limit = 10u64.pow(14); let op_limit = op_limit(gas_limit); - let mut logic_builder = VMLogicBuilder::default().max_gas_burnt(gas_limit); - let mut logic = logic_builder.build(get_context_with_prepaid_gas(gas_limit * 3)); + let mut logic_builder = VMLogicBuilder::default(); + logic_builder.config.limit_config.max_gas_burnt = gas_limit; + logic_builder.context.prepaid_gas = gas_limit * 3; + let mut logic = logic_builder.build(); promise_create(&mut logic, b"rick.test", 0, gas_limit / 2).expect("should create a promise"); logic.gas(op_limit * 2).expect_err("should fail with gas limit"); @@ -97,8 +107,10 @@ fn test_hit_prepaid_gas_limit() { let gas_limit = 10u64.pow(14); let op_limit = op_limit(gas_limit); - let mut logic_builder = VMLogicBuilder::default().max_gas_burnt(gas_limit * 3); - let mut logic = logic_builder.build(get_context_with_prepaid_gas(gas_limit)); + let mut logic_builder = VMLogicBuilder::default(); + logic_builder.config.limit_config.max_gas_burnt = gas_limit * 3; + logic_builder.context.prepaid_gas = gas_limit; + let mut logic = logic_builder.build(); promise_create(&mut logic, b"rick.test", 0, gas_limit / 2).expect("should create a promise"); logic.gas(op_limit * 2).expect_err("should fail with gas limit"); @@ -124,8 +136,10 @@ fn assert_with_gas(receipt: &ReceiptMetadata, expcted_gas: Gas) { fn function_call_weight_check(function_calls: &[(Gas, u64, Gas)]) { let gas_limit = 10_000_000_000; - let mut logic_builder = VMLogicBuilder::free().max_gas_burnt(gas_limit); - let mut logic = logic_builder.build(get_context_with_prepaid_gas(gas_limit)); + let mut logic_builder = VMLogicBuilder::free(); + logic_builder.config.limit_config.max_gas_burnt = gas_limit; + logic_builder.context.prepaid_gas = gas_limit; + let mut logic = logic_builder.build(); let mut ratios = vec![]; @@ -210,8 +224,10 @@ fn function_call_weight_basic_cases_test() { fn function_call_no_weight_refund() { let gas_limit = 10u64.pow(14); - let mut logic_builder = VMLogicBuilder::default().max_gas_burnt(gas_limit); - let mut logic = logic_builder.build(get_context_with_prepaid_gas(gas_limit)); + let mut logic_builder = VMLogicBuilder::default(); + logic_builder.config.limit_config.max_gas_burnt = gas_limit; + logic_builder.context.prepaid_gas = gas_limit; + let mut logic = logic_builder.build(); let index = promise_batch_create(&mut logic, "rick.test").expect("should create a promise"); promise_batch_action_function_call_weight(&mut logic, index, 0, 1000, 0) @@ -226,8 +242,10 @@ fn function_call_no_weight_refund() { #[test] fn test_overflowing_burn_gas_with_promises_gas() { let gas_limit = 3 * 10u64.pow(14); - let mut logic_builder = VMLogicBuilder::default().max_gas_burnt(gas_limit); - let mut logic = logic_builder.build(get_context_with_prepaid_gas(gas_limit)); + let mut logic_builder = VMLogicBuilder::default(); + logic_builder.config.limit_config.max_gas_burnt = gas_limit; + logic_builder.context.prepaid_gas = gas_limit; + let mut logic = logic_builder.build(); let account_id = logic.internal_mem_write(b"rick.test"); let args = logic.internal_mem_write(b""); @@ -260,8 +278,10 @@ fn test_overflowing_burn_gas_with_promises_gas() { #[test] fn test_overflowing_burn_gas_with_promises_gas_2() { let gas_limit = 3 * 10u64.pow(14); - let mut logic_builder = VMLogicBuilder::default().max_gas_burnt(gas_limit); - let mut logic = logic_builder.build(get_context_with_prepaid_gas(gas_limit / 2)); + let mut logic_builder = VMLogicBuilder::default(); + logic_builder.config.limit_config.max_gas_burnt = gas_limit; + logic_builder.context.prepaid_gas = gas_limit / 2; + let mut logic = logic_builder.build(); let account_id = logic.internal_mem_write(b"rick.test"); let args = logic.internal_mem_write(b""); @@ -271,7 +291,9 @@ fn test_overflowing_burn_gas_with_promises_gas_2() { logic.promise_batch_action_transfer(index, num_100u128.ptr).unwrap(); logic.promise_batch_then(index, account_id.len, account_id.ptr).unwrap(); let minimum_prepay = logic.gas_counter().used_gas(); - let mut logic = logic_builder.build(get_context_with_prepaid_gas(minimum_prepay)); + let mut logic_builder = logic_builder; + logic_builder.context.prepaid_gas = minimum_prepay; + let mut logic = logic_builder.build(); let index = promise_batch_create(&mut logic, "rick.test").expect("should create a promise"); logic.promise_batch_action_transfer(index, num_100u128.ptr).unwrap(); let call_id = logic.promise_batch_then(index, account_id.len, account_id.ptr).unwrap(); @@ -299,6 +321,17 @@ fn test_overflowing_burn_gas_with_promises_gas_2() { /// Increases an action cost to a high value and then watch an execution run out /// of gas. Then make sure the exact result is still the same. This prevents /// accidental protocol changes where gas is deducted in different order. +/// +/// The `exercise_action` function must be a function or closure that operates +/// on a `VMLogic` and triggers gas costs associated with the action parameter +/// under test. +/// +/// `num_action_paid` specifies how often the cost is charged in +/// `exercise_action`. We aim to make it `num_action_paid` = 1 in the typical +/// case but for cots per byte this is usually a higher value. +/// +/// `num_action_paid` is required to calculate by how much exactly gas prices +/// must be increased so that it will just trigger the gas limit. #[track_caller] fn check_action_gas_exceeds_limit( cost: ActionCosts, @@ -313,8 +346,12 @@ fn check_action_gas_exceeds_limit( send_not_sir: gas_limit / num_action_paid + 10, execution: 1, // exec part is `used`, make it small }; - let mut logic_builder = VMLogicBuilder::default().max_gas_burnt(gas_limit).gas_fee(cost, fee); - let mut logic = logic_builder.build(get_context_with_prepaid_gas(gas_attached)); + let mut logic_builder = VMLogicBuilder::default(); + logic_builder.config.limit_config.max_gas_burnt = gas_limit; + logic_builder.fees_config.action_fees[cost] = fee; + logic_builder.context.prepaid_gas = gas_attached; + logic_builder.context.output_data_receivers = vec!["alice.test".parse().unwrap()]; + let mut logic = logic_builder.build(); let result = exercise_action(&mut logic); assert!(result.is_err(), "expected out-of-gas error for {cost:?} but was ok"); @@ -333,14 +370,19 @@ fn check_action_gas_exceeds_limit( ); } -/// Check consistent result when exceeding attached gas on a specific action gas parameter. +/// Check consistent result when exceeding attached gas on a specific action gas +/// parameter. /// /// Very similar to `check_action_gas_exceeds_limit` but we hit a different -/// limit and return a different error. +/// limit and return a different error. See that comment for an explanation on +/// the arguments. /// /// This case is more interesting because the burnt gas can be below used gas, /// when the prepaid gas was exceeded by burnt burnt + promised gas but not by /// burnt gas alone. +/// +/// Consequently, `num_action_paid` here is even more important to calculate +/// exactly what the gas costs should be to trigger the limits. #[track_caller] fn check_action_gas_exceeds_attached( cost: ActionCosts, @@ -356,8 +398,12 @@ fn check_action_gas_exceeds_attached( send_not_sir: 10, // make it easy to distinguish `sir` / `not_sir` execution: gas_attached / num_action_paid + 1, }; - let mut logic_builder = VMLogicBuilder::default().max_gas_burnt(gas_limit).gas_fee(cost, fee); - let mut logic = logic_builder.build(get_context_with_prepaid_gas(gas_attached)); + let mut logic_builder = VMLogicBuilder::default(); + logic_builder.config.limit_config.max_gas_burnt = gas_limit; + logic_builder.fees_config.action_fees[cost] = fee; + logic_builder.context.prepaid_gas = gas_attached; + logic_builder.context.output_data_receivers = vec!["alice.test".parse().unwrap()]; + let mut logic = logic_builder.build(); let result = exercise_action(&mut logic); assert!(result.is_err(), "expected out-of-gas error for {cost:?} but was ok"); @@ -371,6 +417,30 @@ fn check_action_gas_exceeds_attached( expected.assert_eq(&actual); } +// Below are a bunch of `out_of_gas_*` tests. These test that when we run out of +// gas while charging a specific action gas cost, we burn a consistent amount of +// gas. This is to prevent accidental changes in how we charge gas. It cannot +// cover all cases but it can detect things like a changed order of gas charging +// or splitting pay_gas(A+B) to pay_gas(A), pay_gas(B), which went through to +// master unnoticed before. +// +// The setup for these tests is as follows: +// - 1 test per action cost +// - each test checks for 2 types of out of gas errors, gas limit exceeded and +// gas attached exceeded +// - common code to create a test VMLogic setup is in checker functions +// `check_action_gas_exceeds_limit` and `check_action_gas_exceeds_attached` +// which are called from every test +// - each action cost must be triggered in a different way, so we define a small +// function that does something which charges the tested action cost, then we +// give this function to the checker functions +// - if an action cost is charged through different paths, the test defines +// multiple functions that trigger the cost and the checker functions are +// called once for each of them +// - these action cost triggering functions are defined in the test's inner +// scope, unless they are shared between multiple tests + +/// see longer comment above for how this test works #[test] fn out_of_gas_new_action_receipt() { // two different ways to create an action receipts, first check exceeding the burnt limit @@ -391,8 +461,15 @@ fn out_of_gas_new_action_receipt() { expect!["9411968532130 burnt 10000000000000 used"], create_promise_dependency, ); + + /// function to trigger action receipt action cost + fn create_action_receipt(logic: &mut TestVMLogic) -> Result<(), VMLogicError> { + promise_batch_create(logic, "rick.test")?; + Ok(()) + } } +/// see longer comment above for how this test works #[test] fn out_of_gas_new_data_receipt() { check_action_gas_exceeds_limit( @@ -409,11 +486,329 @@ fn out_of_gas_new_data_receipt() { ); } -fn create_action_receipt(logic: &mut TestVMLogic) -> Result<(), VMLogicError> { - promise_batch_create(logic, "rick.test")?; +/// see longer comment above for how this test works +#[test] +fn out_of_gas_new_data_receipt_byte() { + check_action_gas_exceeds_limit(ActionCosts::new_data_receipt_byte, 11, value_return); + + // expect to burn it all because send + exec fees are fully paid upfront + check_action_gas_exceeds_attached( + ActionCosts::new_data_receipt_byte, + 11, + expect!["10000000000000 burnt 10000000000000 used"], + value_return, + ); + + // value return will pay for the cost of returned data dependency bytes, if there are any. + fn value_return(logic: &mut TestVMLogic) -> Result<(), VMLogicError> { + // 11 characters long string + let value = logic.internal_mem_write(b"lorem ipsum"); + logic.value_return(11, value.ptr)?; + Ok(()) + } +} + +/// see longer comment above for how this test works +#[test] +fn out_of_gas_create_account() { + check_action_gas_exceeds_limit(ActionCosts::create_account, 1, create_account); + + check_action_gas_exceeds_attached( + ActionCosts::create_account, + 1, + expect!["116969114801 burnt 10000000000000 used"], + create_account, + ); + + fn create_account(logic: &mut TestVMLogic) -> Result<(), VMLogicError> { + let account_id = "rick.test"; + let idx = promise_batch_create(logic, account_id)?; + logic.promise_batch_action_create_account(idx)?; + Ok(()) + } +} + +/// see longer comment above for how this test works +#[test] +fn out_of_gas_delete_account() { + check_action_gas_exceeds_limit(ActionCosts::delete_account, 1, delete_account); + + check_action_gas_exceeds_attached( + ActionCosts::delete_account, + 1, + expect!["125349193370 burnt 10000000000000 used"], + delete_account, + ); + + fn delete_account(logic: &mut TestVMLogic) -> Result<(), VMLogicError> { + let beneficiary_account_id = "alice.test"; + let deleted_account_id = "bob.test"; + let idx = promise_batch_create(logic, deleted_account_id)?; + let beneficiary = logic.internal_mem_write(beneficiary_account_id.as_bytes()); + logic.promise_batch_action_delete_account(idx, beneficiary.len, beneficiary.ptr)?; + Ok(()) + } +} + +/// see longer comment above for how this test works +#[test] +fn out_of_gas_deploy_contract_base() { + check_action_gas_exceeds_limit(ActionCosts::deploy_contract_base, 1, deploy_contract); + + check_action_gas_exceeds_attached( + ActionCosts::deploy_contract_base, + 1, + expect!["119677812659 burnt 10000000000000 used"], + deploy_contract, + ); +} + +/// see longer comment above for how this test works +#[test] +fn out_of_gas_deploy_contract_byte() { + check_action_gas_exceeds_limit(ActionCosts::deploy_contract_byte, 26, deploy_contract); + + check_action_gas_exceeds_attached( + ActionCosts::deploy_contract_byte, + 26, + expect!["304443562909 burnt 10000000000000 used"], + deploy_contract, + ); +} + +/// function to trigger base + 26 bytes deployment costs (26 is arbitrary) +fn deploy_contract(logic: &mut TestVMLogic) -> Result<(), VMLogicError> { + let account_id = "rick.test"; + let idx = promise_batch_create(logic, account_id)?; + let code = logic.internal_mem_write(b"lorem ipsum with length 26"); + logic.promise_batch_action_deploy_contract(idx, code.len, code.ptr)?; Ok(()) } +/// see longer comment above for how this test works +#[test] +fn out_of_gas_function_call_base() { + check_action_gas_exceeds_limit(ActionCosts::function_call_base, 1, cross_contract_call); + check_action_gas_exceeds_limit( + ActionCosts::function_call_base, + 1, + cross_contract_call_gas_weight, + ); + + check_action_gas_exceeds_attached( + ActionCosts::function_call_base, + 1, + expect!["125011579049 burnt 10000000000000 used"], + cross_contract_call, + ); + check_action_gas_exceeds_attached( + ActionCosts::function_call_base, + 1, + expect!["125011579049 burnt 10000000000000 used"], + cross_contract_call_gas_weight, + ); +} + +/// see longer comment above for how this test works +#[test] +fn out_of_gas_function_call_byte() { + check_action_gas_exceeds_limit(ActionCosts::function_call_byte, 40, cross_contract_call); + check_action_gas_exceeds_limit( + ActionCosts::function_call_byte, + 40, + cross_contract_call_gas_weight, + ); + + check_action_gas_exceeds_attached( + ActionCosts::function_call_byte, + 40, + expect!["2444873079439 burnt 10000000000000 used"], + cross_contract_call, + ); + check_action_gas_exceeds_attached( + ActionCosts::function_call_byte, + 40, + expect!["2444873079439 burnt 10000000000000 used"], + cross_contract_call_gas_weight, + ); +} + +/// function to trigger base + 40 bytes function call action costs (40 is 26 + +/// 14 which are arbitrary) +fn cross_contract_call(logic: &mut TestVMLogic) -> Result<(), VMLogicError> { + let account_id = "rick.test"; + let idx = promise_batch_create(logic, account_id)?; + let arg = b"lorem ipsum with length 26"; + let name = b"fn_with_len_14"; + let attached_balance = 1u128; + let gas = 1; // attaching very little gas so it doesn't cause gas exceeded on its own + promise_batch_action_function_call_ext(logic, idx, name, arg, attached_balance, gas)?; + Ok(()) +} + +/// same as `cross_contract_call` but splits gas remainder among outgoing calls +fn cross_contract_call_gas_weight(logic: &mut TestVMLogic) -> Result<(), VMLogicError> { + let account_id = "rick.test"; + let idx = promise_batch_create(logic, account_id)?; + let arg = b"lorem ipsum with length 26"; + let name = b"fn_with_len_14"; + let attached_balance = 1u128; + let gas = 1; // attaching very little gas so it doesn't cause gas exceeded on its own + let gas_weight = 1; + promise_batch_action_function_call_weight_ext( + logic, + idx, + name, + arg, + attached_balance, + gas, + gas_weight, + )?; + Ok(()) +} + +/// see longer comment above for how this test works +#[test] +fn out_of_gas_transfer() { + check_action_gas_exceeds_limit(ActionCosts::transfer, 1, promise_transfer); + + check_action_gas_exceeds_attached( + ActionCosts::transfer, + 1, + expect!["119935181141 burnt 10000000000000 used"], + promise_transfer, + ); + + fn promise_transfer(logic: &mut TestVMLogic) -> Result<(), VMLogicError> { + let account_id = "alice.test"; + let idx = promise_batch_create(logic, account_id)?; + let attached_balance = logic.internal_mem_write(&1u128.to_be_bytes()); + logic.promise_batch_action_transfer(idx, attached_balance.ptr)?; + Ok(()) + } +} + +/// see longer comment above for how this test works +#[test] +fn out_of_gas_stake() { + check_action_gas_exceeds_limit(ActionCosts::stake, 1, promise_stake); + + check_action_gas_exceeds_attached( + ActionCosts::stake, + 1, + expect!["122375106518 burnt 10000000000000 used"], + promise_stake, + ); + + fn promise_stake(logic: &mut TestVMLogic) -> Result<(), VMLogicError> { + let account_id = "pool.test"; + let idx = promise_batch_create(logic, account_id)?; + let attached_balance = logic.internal_mem_write(&1u128.to_be_bytes()); + let pk = write_test_pk(logic); + logic.promise_batch_action_stake(idx, attached_balance.ptr, pk.len, pk.ptr)?; + Ok(()) + } +} + +/// see longer comment above for how this test works +#[test] +fn out_of_gas_add_full_access_key() { + check_action_gas_exceeds_limit(ActionCosts::add_full_access_key, 1, promise_full_access_key); + + check_action_gas_exceeds_attached( + ActionCosts::add_full_access_key, + 1, + expect!["119999803802 burnt 10000000000000 used"], + promise_full_access_key, + ); + + fn promise_full_access_key(logic: &mut TestVMLogic) -> Result<(), VMLogicError> { + let account_id = "alice.test"; + let idx = promise_batch_create(logic, account_id)?; + let pk = test_pk(); + let nonce = 0; + promise_batch_action_add_key_with_full_access(logic, idx, &pk, nonce)?; + Ok(()) + } +} + +/// see longer comment above for how this test works +#[test] +fn out_of_gas_add_function_call_key_base() { + check_action_gas_exceeds_limit( + ActionCosts::add_function_call_key_base, + 1, + promise_function_key, + ); + + check_action_gas_exceeds_attached( + ActionCosts::add_function_call_key_base, + 1, + expect!["133982421242 burnt 10000000000000 used"], + promise_function_key, + ); +} + +/// see longer comment above for how this test works +#[test] +fn out_of_gas_add_function_call_key_byte() { + check_action_gas_exceeds_limit( + ActionCosts::add_function_call_key_byte, + 7, + promise_function_key, + ); + + check_action_gas_exceeds_attached( + ActionCosts::add_function_call_key_byte, + 7, + expect!["236200046312 burnt 10000000000000 used"], + promise_function_key, + ); +} + +/// function to trigger base + 7 bytes action costs for adding a new function +/// call access key to an account (7 is arbitrary) +fn promise_function_key(logic: &mut TestVMLogic) -> Result<(), VMLogicError> { + let account_id = "alice.test"; + let idx = promise_batch_create(logic, account_id)?; + let allowance = 1u128; + let pk = test_pk(); + let nonce = 0; + let methods = b"foo,baz"; + promise_batch_action_add_key_with_function_call( + logic, + idx, + &pk, + nonce, + allowance, + account_id.as_bytes(), + methods, + )?; + Ok(()) +} + +/// see longer comment above for how this test works +#[test] +fn out_of_gas_delete_key() { + check_action_gas_exceeds_limit(ActionCosts::delete_key, 1, promise_delete_key); + + check_action_gas_exceeds_attached( + ActionCosts::delete_key, + 1, + expect!["119999803802 burnt 10000000000000 used"], + promise_delete_key, + ); + + fn promise_delete_key(logic: &mut TestVMLogic) -> Result<(), VMLogicError> { + let account_id = "alice.test"; + let idx = promise_batch_create(logic, account_id)?; + let pk = write_test_pk(logic); + logic.promise_batch_action_delete_key(idx, pk.len, pk.ptr)?; + Ok(()) + } +} + +/// function to trigger action + data receipt action costs fn create_promise_dependency(logic: &mut TestVMLogic) -> Result<(), VMLogicError> { let account_id = "rick.test"; let idx = promise_batch_create(logic, account_id)?; @@ -427,3 +822,16 @@ fn create_promise_dependency(logic: &mut TestVMLogic) -> Result<(), VMLogicError fn op_limit(gas_limit: Gas) -> u32 { (gas_limit / (VMConfig::test().regular_op_cost as u64)) as u32 } + +fn test_pk() -> Vec { + let pk = "ed25519:22W5rKuvbMRphnDoCj6nfrWhRKvh9Xf9SWXfGHaeXGde" + .parse::() + .unwrap() + .try_to_vec() + .unwrap(); + pk +} + +fn write_test_pk(logic: &mut TestVMLogic) -> MemSlice { + logic.internal_mem_write(&test_pk()) +} diff --git a/runtime/near-vm-logic/src/tests/helpers.rs b/runtime/near-vm-logic/src/tests/helpers.rs index 33ed769dd51..24fe98390e2 100644 --- a/runtime/near-vm-logic/src/tests/helpers.rs +++ b/runtime/near-vm-logic/src/tests/helpers.rs @@ -42,8 +42,27 @@ pub(super) fn promise_batch_action_function_call( amount: u128, gas: Gas, ) -> Result<()> { - let method_id = logic.internal_mem_write(b"promise_batch_action"); - let args = logic.internal_mem_write(b"promise_batch_action_args"); + promise_batch_action_function_call_ext( + logic, + promise_index, + b"promise_batch_action", + b"promise_batch_action_args", + amount, + gas, + ) +} + +#[allow(dead_code)] +pub(super) fn promise_batch_action_function_call_ext( + logic: &mut TestVMLogic<'_>, + promise_index: u64, + method_id: &[u8], + args: &[u8], + amount: u128, + gas: Gas, +) -> Result<()> { + let method_id = logic.internal_mem_write(method_id); + let args = logic.internal_mem_write(args); let amount = logic.internal_mem_write(&amount.to_le_bytes()); logic.promise_batch_action_function_call( @@ -65,8 +84,29 @@ pub(super) fn promise_batch_action_function_call_weight( gas: Gas, weight: u64, ) -> Result<()> { - let method_id = logic.internal_mem_write(b"promise_batch_action"); - let args = logic.internal_mem_write(b"promise_batch_action_args"); + promise_batch_action_function_call_weight_ext( + logic, + promise_index, + b"promise_batch_action", + b"promise_batch_action_args", + amount, + gas, + weight, + ) +} + +#[allow(dead_code)] +pub(super) fn promise_batch_action_function_call_weight_ext( + logic: &mut TestVMLogic<'_>, + promise_index: u64, + method_id: &[u8], + args: &[u8], + amount: u128, + gas: Gas, + weight: u64, +) -> Result<()> { + let method_id = logic.internal_mem_write(method_id); + let args = logic.internal_mem_write(args); let amount = logic.internal_mem_write(&amount.to_le_bytes()); logic.promise_batch_action_function_call_weight( @@ -109,6 +149,23 @@ pub(super) fn promise_batch_action_add_key_with_function_call( ) } +#[allow(dead_code)] +pub(super) fn promise_batch_action_add_key_with_full_access( + logic: &mut TestVMLogic<'_>, + promise_index: u64, + public_key: &[u8], + nonce: u64, +) -> Result<()> { + let public_key = logic.internal_mem_write(public_key); + + logic.promise_batch_action_add_key_with_full_access( + promise_index, + public_key.len, + public_key.ptr, + nonce, + ) +} + #[macro_export] macro_rules! map( { $($key:path: $value:expr,)+ } => { diff --git a/runtime/near-vm-logic/src/tests/iterators.rs b/runtime/near-vm-logic/src/tests/iterators.rs index b8ab5f0f426..492ead46b0c 100644 --- a/runtime/near-vm-logic/src/tests/iterators.rs +++ b/runtime/near-vm-logic/src/tests/iterators.rs @@ -1,12 +1,10 @@ -use crate::tests::fixtures::get_context; use crate::tests::vm_logic_builder::VMLogicBuilder; use near_vm_errors::{HostError, VMLogicError}; #[test] fn test_iterator_deprecated() { - let context = get_context(vec![], false); let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(context); + let mut logic = logic_builder.build(); assert_eq!( Err(VMLogicError::HostError(HostError::Deprecated { method_name: "storage_iter_prefix".to_string() diff --git a/runtime/near-vm-logic/src/tests/logs.rs b/runtime/near-vm-logic/src/tests/logs.rs new file mode 100644 index 00000000000..04ee1cb814f --- /dev/null +++ b/runtime/near-vm-logic/src/tests/logs.rs @@ -0,0 +1,509 @@ +use crate::tests::helpers::*; +use crate::tests::vm_logic_builder::VMLogicBuilder; +use crate::{map, ExtCosts, MemSlice, VMLogic, VMLogicError}; +use near_vm_errors::HostError; + +#[test] +fn test_valid_utf8() { + let mut logic_builder = VMLogicBuilder::default(); + let mut logic = logic_builder.build(); + let string = "j ñ r'ø qò$`5 y'5 øò{%÷ `Võ%"; + let bytes = logic.internal_mem_write(string.as_bytes()); + logic.log_utf8(bytes.len, bytes.ptr).expect("Valid UTF-8 in bytes"); + let outcome = logic.compute_outcome_and_distribute_gas(); + assert_eq!(outcome.logs[0], string); + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::log_base: 1, + ExtCosts::log_byte: bytes.len, + ExtCosts::read_memory_base: 1, + ExtCosts::read_memory_byte: bytes.len, + ExtCosts::utf8_decoding_base: 1, + ExtCosts::utf8_decoding_byte: bytes.len, + }); +} + +#[test] +fn test_invalid_utf8() { + let mut logic_builder = VMLogicBuilder::default(); + let mut logic = logic_builder.build(); + let bytes = logic.internal_mem_write(b"\x80"); + assert_eq!(logic.log_utf8(bytes.len, bytes.ptr), Err(HostError::BadUTF8.into())); + let outcome = logic.compute_outcome_and_distribute_gas(); + assert_eq!(outcome.logs.len(), 0); + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::read_memory_base: 1, + ExtCosts::read_memory_byte: bytes.len, + ExtCosts::utf8_decoding_base: 1, + ExtCosts::utf8_decoding_byte: bytes.len, + }); +} + +#[test] +fn test_valid_null_terminated_utf8() { + let mut logic_builder = VMLogicBuilder::default(); + + let cstring = "j ñ r'ø qò$`5 y'5 øò{%÷ `Võ%\x00"; + let string = &cstring[..cstring.len() - 1]; + logic_builder.config.limit_config.max_total_log_length = string.len() as u64; + let mut logic = logic_builder.build(); + let bytes = logic.internal_mem_write(cstring.as_bytes()); + logic.log_utf8(u64::MAX, bytes.ptr).expect("Valid null-terminated utf-8 string_bytes"); + let outcome = logic.compute_outcome_and_distribute_gas(); + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::log_base: 1, + ExtCosts::log_byte: string.len() as u64, + ExtCosts::read_memory_base: bytes.len, + ExtCosts::read_memory_byte: bytes.len, + ExtCosts::utf8_decoding_base: 1, + ExtCosts::utf8_decoding_byte: string.len() as u64, + }); + assert_eq!(outcome.logs[0], string); +} + +#[test] +fn test_log_max_limit() { + let mut logic_builder = VMLogicBuilder::default(); + let string = "j ñ r'ø qò$`5 y'5 øò{%÷ `Võ%"; + let limit = string.len() as u64 - 1; + logic_builder.config.limit_config.max_total_log_length = limit; + let mut logic = logic_builder.build(); + let bytes = logic.internal_mem_write(string.as_bytes()); + + assert_eq!( + logic.log_utf8(bytes.len, bytes.ptr), + Err(HostError::TotalLogLengthExceeded { length: bytes.len, limit }.into()) + ); + + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::utf8_decoding_base: 1, + }); + + let outcome = logic.compute_outcome_and_distribute_gas(); + assert_eq!(outcome.logs.len(), 0); +} + +#[test] +fn test_log_total_length_limit() { + let mut logic_builder = VMLogicBuilder::default(); + let string = "j ñ r'ø qò$`5 y'5 øò{%÷ `Võ%".as_bytes(); + let num_logs = 10; + let limit = string.len() as u64 * num_logs - 1; + logic_builder.config.limit_config.max_total_log_length = limit; + logic_builder.config.limit_config.max_number_logs = num_logs; + let mut logic = logic_builder.build(); + let bytes = logic.internal_mem_write(string); + + for _ in 0..num_logs - 1 { + logic.log_utf8(bytes.len, bytes.ptr).expect("total is still under the limit"); + } + assert_eq!( + logic.log_utf8(bytes.len, bytes.ptr), + Err(HostError::TotalLogLengthExceeded { length: limit + 1, limit }.into()) + ); + + let outcome = logic.compute_outcome_and_distribute_gas(); + assert_eq!(outcome.logs.len() as u64, num_logs - 1); +} + +#[test] +fn test_log_number_limit() { + let mut logic_builder = VMLogicBuilder::default(); + let string = "blabla"; + let max_number_logs = 3; + logic_builder.config.limit_config.max_total_log_length = + (string.len() + 1) as u64 * (max_number_logs + 1); + logic_builder.config.limit_config.max_number_logs = max_number_logs; + let mut logic = logic_builder.build(); + let bytes = logic.internal_mem_write(string.as_bytes()); + for _ in 0..max_number_logs { + logic + .log_utf8(bytes.len, bytes.ptr) + .expect("Valid utf-8 string_bytes under the log number limit"); + } + assert_eq!( + logic.log_utf8(bytes.len, bytes.ptr), + Err(HostError::NumberOfLogsExceeded { limit: max_number_logs }.into()) + ); + + assert_costs(map! { + ExtCosts::base: max_number_logs + 1, + ExtCosts::log_base: max_number_logs, + ExtCosts::log_byte: bytes.len * max_number_logs, + ExtCosts::read_memory_base: max_number_logs, + ExtCosts::read_memory_byte: bytes.len * max_number_logs, + ExtCosts::utf8_decoding_base: max_number_logs, + ExtCosts::utf8_decoding_byte: bytes.len * max_number_logs, + }); + + let outcome = logic.compute_outcome_and_distribute_gas(); + assert_eq!(outcome.logs.len() as u64, max_number_logs); +} + +fn append_utf16(dst: &mut Vec, string: &str) { + for code_unit in string.encode_utf16() { + dst.extend_from_slice(&code_unit.to_le_bytes()); + } +} + +#[test] +fn test_log_utf16_number_limit() { + let string = "$ qò$`"; + let mut bytes = Vec::new(); + append_utf16(&mut bytes, string); + + let mut logic_builder = VMLogicBuilder::default(); + let max_number_logs = 3; + logic_builder.config.limit_config.max_total_log_length = + (bytes.len() + 1) as u64 * (max_number_logs + 1); + logic_builder.config.limit_config.max_number_logs = max_number_logs; + + let mut logic = logic_builder.build(); + let bytes = logic.internal_mem_write(&bytes); + for _ in 0..max_number_logs { + logic + .log_utf16(bytes.len, bytes.ptr) + .expect("Valid utf-16 string_bytes under the log number limit"); + } + assert_eq!( + logic.log_utf16(bytes.len, bytes.ptr), + Err(HostError::NumberOfLogsExceeded { limit: max_number_logs }.into()) + ); + + assert_costs(map! { + ExtCosts::base: max_number_logs + 1, + ExtCosts::log_base: max_number_logs, + ExtCosts::log_byte: string.len() as u64 * max_number_logs, + ExtCosts::read_memory_base: max_number_logs, + ExtCosts::read_memory_byte: bytes.len * max_number_logs, + ExtCosts::utf16_decoding_base: max_number_logs, + ExtCosts::utf16_decoding_byte: bytes.len * max_number_logs, + }); + + let outcome = logic.compute_outcome_and_distribute_gas(); + assert_eq!(outcome.logs.len() as u64, max_number_logs); +} + +#[test] +fn test_log_total_length_limit_mixed() { + let mut logic_builder = VMLogicBuilder::default(); + + let string = "abc"; + let mut utf16_bytes: Vec = vec![0u8; 0]; + append_utf16(&mut utf16_bytes, string); + + let num_logs_each = 10; + let limit = string.len() as u64 * (num_logs_each * 2 + 1) - 1; + logic_builder.config.limit_config.max_total_log_length = limit; + logic_builder.config.limit_config.max_number_logs = num_logs_each * 2 + 1; + let mut logic = logic_builder.build(); + + let utf8_bytes = logic.internal_mem_write(string.as_bytes()); + let utf16_bytes = logic.internal_mem_write(&utf16_bytes); + + for _ in 0..num_logs_each { + logic.log_utf16(utf16_bytes.len, utf16_bytes.ptr).expect("total is still under the limit"); + + logic.log_utf8(utf8_bytes.len, utf8_bytes.ptr).expect("total is still under the limit"); + } + assert_eq!( + logic.log_utf8(utf8_bytes.len, utf8_bytes.ptr), + Err(HostError::TotalLogLengthExceeded { length: limit + 1, limit }.into()) + ); + + let outcome = logic.compute_outcome_and_distribute_gas(); + assert_eq!(outcome.logs.len() as u64, num_logs_each * 2); +} + +#[test] +fn test_log_utf8_max_limit_null_terminated() { + let mut logic_builder = VMLogicBuilder::default(); + let bytes = "j ñ r'ø qò$`5 y'5 øò{%÷ `Võ%\x00".as_bytes(); + let limit = (bytes.len() - 2) as u64; + logic_builder.config.limit_config.max_total_log_length = limit; + let mut logic = logic_builder.build(); + let bytes = logic.internal_mem_write(bytes); + + assert_eq!( + logic.log_utf8(u64::MAX, bytes.ptr), + Err(HostError::TotalLogLengthExceeded { length: limit + 1, limit }.into()) + ); + + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::read_memory_base: bytes.len - 1, + ExtCosts::read_memory_byte: bytes.len - 1, + ExtCosts::utf8_decoding_base: 1, + }); + + let outcome = logic.compute_outcome_and_distribute_gas(); + assert_eq!(outcome.logs.len(), 0); +} + +#[test] +fn test_valid_log_utf16() { + let mut logic_builder = VMLogicBuilder::default(); + let mut logic = logic_builder.build(); + + let string = "$ qò$`"; + let mut bytes = Vec::new(); + append_utf16(&mut bytes, string); + let bytes = logic.internal_mem_write(&bytes); + + logic.log_utf16(bytes.len, bytes.ptr).expect("Valid utf-16 string_bytes"); + + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::read_memory_base: 1, + ExtCosts::read_memory_byte: bytes.len, + ExtCosts::utf16_decoding_base: 1, + ExtCosts::utf16_decoding_byte: bytes.len, + ExtCosts::log_base: 1, + ExtCosts::log_byte: string.len() as u64, + }); + let outcome = logic.compute_outcome_and_distribute_gas(); + assert_eq!(outcome.logs[0], string); +} + +#[test] +fn test_valid_log_utf16_max_log_len_not_even() { + let mut logic_builder = VMLogicBuilder::default(); + logic_builder.config.limit_config.max_total_log_length = 5; + let mut logic = logic_builder.build(); + + let string = "ab"; + let mut bytes = Vec::new(); + append_utf16(&mut bytes, string); + append_utf16(&mut bytes, "\0"); + let bytes = logic.internal_mem_write(&bytes); + logic.log_utf16(u64::MAX, bytes.ptr).expect("Valid utf-16 bytes"); + + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::read_memory_base: bytes.len / 2, + ExtCosts::read_memory_byte: bytes.len, + ExtCosts::utf16_decoding_base: 1, + ExtCosts::utf16_decoding_byte: bytes.len - 2, + ExtCosts::log_base: 1, + ExtCosts::log_byte: string.len() as u64, + }); + + let string = "abc"; + let mut bytes = Vec::new(); + append_utf16(&mut bytes, string); + append_utf16(&mut bytes, "\0"); + let bytes = logic.internal_mem_write(&bytes); + assert_eq!( + logic.log_utf16(u64::MAX, bytes.ptr), + Err(HostError::TotalLogLengthExceeded { length: 6, limit: 5 }.into()) + ); + + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::read_memory_base: 2, + ExtCosts::read_memory_byte: 2 * 2, + ExtCosts::utf16_decoding_base: 1, + }); +} + +#[test] +fn test_log_utf8_max_limit_null_terminated_fail() { + let mut logic_builder = VMLogicBuilder::default(); + logic_builder.config.limit_config.max_total_log_length = 3; + let mut logic = logic_builder.build(); + let bytes = logic.internal_mem_write(b"abcdefgh\0"); + let res = logic.log_utf8(u64::MAX, bytes.ptr); + assert_eq!(res, Err(HostError::TotalLogLengthExceeded { length: 4, limit: 3 }.into())); + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::read_memory_base: logic_builder.config.limit_config.max_total_log_length + 1, + ExtCosts::read_memory_byte: logic_builder.config.limit_config.max_total_log_length + 1, + ExtCosts::utf8_decoding_base: 1, + }); +} + +#[test] +fn test_valid_log_utf16_null_terminated() { + let mut logic_builder = VMLogicBuilder::default(); + let mut logic = logic_builder.build(); + + let string = "$ qò$`"; + let mut bytes = Vec::new(); + append_utf16(&mut bytes, string); + bytes.extend_from_slice(&[0, 0]); + let bytes = logic.internal_mem_write(&bytes); + + logic.log_utf16(u64::MAX, bytes.ptr).expect("Valid utf-16 string_bytes"); + + let outcome = logic.compute_outcome_and_distribute_gas(); + assert_eq!(outcome.logs[0], string); + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::read_memory_base: bytes.len / 2 , + ExtCosts::read_memory_byte: bytes.len, + ExtCosts::utf16_decoding_base: 1, + ExtCosts::utf16_decoding_byte: bytes.len - 2, + ExtCosts::log_base: 1, + ExtCosts::log_byte: string.len() as u64, + }); +} + +#[test] +fn test_invalid_log_utf16() { + let mut logic_builder = VMLogicBuilder::default(); + let mut logic = logic_builder.build(); + let mut bytes: Vec = Vec::new(); + for u16_ in [0xD834u16, 0xDD1E, 0x006d, 0x0075, 0xD800, 0x0069, 0x0063] { + bytes.extend_from_slice(&u16_.to_le_bytes()); + } + let bytes = logic.internal_mem_write(&bytes); + let res = logic.log_utf16(bytes.len, bytes.ptr); + assert_eq!(res, Err(HostError::BadUTF16.into())); + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::read_memory_base: 1, + ExtCosts::read_memory_byte: bytes.len, + ExtCosts::utf16_decoding_base: 1, + ExtCosts::utf16_decoding_byte: bytes.len, + }); +} + +#[test] +fn test_valid_log_utf16_null_terminated_fail() { + let mut logic_builder = VMLogicBuilder::default(); + let mut logic = logic_builder.build(); + + let mut bytes = Vec::new(); + append_utf16(&mut bytes, "$ qò$`"); + bytes.extend_from_slice(&[0x00, 0xD8]); // U+D800, unpaired surrogate + append_utf16(&mut bytes, "foobarbaz\0"); + let bytes = logic.internal_mem_write(&bytes); + + let res = logic.log_utf16(u64::MAX, bytes.ptr); + assert_eq!(res, Err(HostError::BadUTF16.into())); + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::read_memory_base: bytes.len / 2, + ExtCosts::read_memory_byte: bytes.len, + ExtCosts::utf16_decoding_base: 1, + ExtCosts::utf16_decoding_byte: bytes.len - 2, + }); +} + +mod utf8_mem_violation { + use super::*; + + fn check(read_ok: bool, test: fn(&mut VMLogic<'_>, MemSlice) -> Result<(), VMLogicError>) { + let mut logic_builder = VMLogicBuilder::default(); + let mut logic = logic_builder.build(); + + let bytes = b"foo bar \xff baz qux"; + let bytes = logic.internal_mem_write_at(64 * 1024 - bytes.len() as u64, bytes); + let err = if read_ok { HostError::BadUTF8 } else { HostError::MemoryAccessViolation }; + assert_eq!(Err(err.into()), test(&mut logic, bytes)); + } + + #[test] + fn test_good_read() { + // The data is read correctly but it has invalid UTF-8 thus it ends up + // with BadUTF8 error and user being charged for decoding. + check(true, |logic, slice| logic.log_utf8(slice.len, slice.ptr)); + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::read_memory_base: 1, + ExtCosts::read_memory_byte: 17, + ExtCosts::utf8_decoding_base: 1, + ExtCosts::utf8_decoding_byte: 17, + }); + } + + #[test] + fn test_read_past_end() { + // The data goes past the end of the memory resulting in memory access + // violation. User is not charged for UTF-8 decoding (except for the + // base cost which is always charged). + check(false, |logic, slice| logic.log_utf8(slice.len + 1, slice.ptr)); + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::read_memory_base: 1, + ExtCosts::read_memory_byte: 18, + ExtCosts::utf8_decoding_base: 1, + }); + } + + #[test] + fn test_nul_past_end() { + // The call goes past the end of the memory trying to find NUL byte + // resulting in memory access violation. User is not charged for UTF-8 + // decoding (except for the base cost which is always charged). + check(false, |logic, slice| logic.log_utf8(u64::MAX, slice.ptr)); + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::read_memory_base: 18, + ExtCosts::read_memory_byte: 18, + ExtCosts::utf8_decoding_base: 1, + }); + } +} + +mod utf16_mem_violation { + use super::*; + + fn check(read_ok: bool, test: fn(&mut VMLogic<'_>, MemSlice) -> Result<(), VMLogicError>) { + let mut logic_builder = VMLogicBuilder::default(); + let mut logic = logic_builder.build(); + + let mut bytes = Vec::new(); + append_utf16(&mut bytes, "$ qò$`"); + bytes.extend_from_slice(&[0x00, 0xD8]); // U+D800, unpaired surrogate + append_utf16(&mut bytes, "foobarbaz"); + let bytes = logic.internal_mem_write_at(64 * 1024 - bytes.len() as u64, &bytes); + let err = if read_ok { HostError::BadUTF16 } else { HostError::MemoryAccessViolation }; + assert_eq!(Err(err.into()), test(&mut logic, bytes)); + } + + #[test] + fn test_good_read() { + // The data is read correctly but it has invalid UTF-16 thus it ends up + // with BadUTF16 error and user being charged for decoding. + check(true, |logic, slice| logic.log_utf16(slice.len, slice.ptr)); + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::read_memory_base: 1, + ExtCosts::read_memory_byte: 32, + ExtCosts::utf16_decoding_base: 1, + ExtCosts::utf16_decoding_byte: 32, + }); + } + + #[test] + fn test_read_past_end() { + // The data goes past the end of the memory resulting in memory access + // violation. User is not charged for UTF-16 decoding (except for the + // base cost which is always charged). + check(false, |logic, slice| logic.log_utf16(slice.len + 2, slice.ptr)); + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::read_memory_base: 1, + ExtCosts::read_memory_byte: 34, + ExtCosts::utf16_decoding_base: 1, + }); + } + + #[test] + fn test_nul_past_end() { + // The call goes past the end of the memory trying to find NUL word + // resulting in memory access violation. User is not charged for UTF-16 + // decoding (except for the base cost which is always charged). + check(false, |logic, slice| logic.log_utf16(u64::MAX, slice.ptr)); + assert_costs(map! { + ExtCosts::base: 1, + ExtCosts::read_memory_base: 17, + ExtCosts::read_memory_byte: 34, + ExtCosts::utf16_decoding_base: 1, + }); + } +} diff --git a/runtime/near-vm-logic/src/tests/miscs.rs b/runtime/near-vm-logic/src/tests/miscs.rs index 5bcf12e3944..980f916ed85 100644 --- a/runtime/near-vm-logic/src/tests/miscs.rs +++ b/runtime/near-vm-logic/src/tests/miscs.rs @@ -1,4 +1,3 @@ -use crate::tests::fixtures::get_context; use crate::tests::helpers::*; use crate::tests::vm_logic_builder::VMLogicBuilder; use crate::{map, ExtCosts}; @@ -8,400 +7,10 @@ use serde::{de::Error, Deserialize, Deserializer}; use serde_json::from_slice; use std::{fmt::Display, fs}; -#[test] -fn test_valid_utf8() { - let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); - let string = "j ñ r'ø qò$`5 y'5 øò{%÷ `Võ%"; - let bytes = logic.internal_mem_write(string.as_bytes()); - logic.log_utf8(bytes.len, bytes.ptr).expect("Valid UTF-8 in bytes"); - let outcome = logic.compute_outcome_and_distribute_gas(); - assert_eq!(outcome.logs[0], string); - assert_costs(map! { - ExtCosts::base: 1, - ExtCosts::log_base: 1, - ExtCosts::log_byte: bytes.len, - ExtCosts::read_memory_base: 1, - ExtCosts::read_memory_byte: bytes.len, - ExtCosts::utf8_decoding_base: 1, - ExtCosts::utf8_decoding_byte: bytes.len, - }); -} - -#[test] -fn test_invalid_utf8() { - let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); - let bytes = logic.internal_mem_write(b"\x80"); - assert_eq!(logic.log_utf8(bytes.len, bytes.ptr), Err(HostError::BadUTF8.into())); - let outcome = logic.compute_outcome_and_distribute_gas(); - assert_eq!(outcome.logs.len(), 0); - assert_costs(map! { - ExtCosts::base: 1, - ExtCosts::read_memory_base: 1, - ExtCosts::read_memory_byte: bytes.len, - ExtCosts::utf8_decoding_base: 1, - ExtCosts::utf8_decoding_byte: bytes.len, - }); -} - -#[test] -fn test_valid_null_terminated_utf8() { - let mut logic_builder = VMLogicBuilder::default(); - - let cstring = "j ñ r'ø qò$`5 y'5 øò{%÷ `Võ%\x00"; - let string = &cstring[..cstring.len() - 1]; - logic_builder.config.limit_config.max_total_log_length = string.len() as u64; - let mut logic = logic_builder.build(get_context(vec![], false)); - let bytes = logic.internal_mem_write(cstring.as_bytes()); - logic.log_utf8(u64::MAX, bytes.ptr).expect("Valid null-terminated utf-8 string_bytes"); - let outcome = logic.compute_outcome_and_distribute_gas(); - assert_costs(map! { - ExtCosts::base: 1, - ExtCosts::log_base: 1, - ExtCosts::log_byte: string.len() as u64, - ExtCosts::read_memory_base: bytes.len, - ExtCosts::read_memory_byte: bytes.len, - ExtCosts::utf8_decoding_base: 1, - ExtCosts::utf8_decoding_byte: string.len() as u64, - }); - assert_eq!(outcome.logs[0], string); -} - -#[test] -fn test_log_max_limit() { - let mut logic_builder = VMLogicBuilder::default(); - let string = "j ñ r'ø qò$`5 y'5 øò{%÷ `Võ%"; - let limit = string.len() as u64 - 1; - logic_builder.config.limit_config.max_total_log_length = limit; - let mut logic = logic_builder.build(get_context(vec![], false)); - let bytes = logic.internal_mem_write(string.as_bytes()); - - assert_eq!( - logic.log_utf8(bytes.len, bytes.ptr), - Err(HostError::TotalLogLengthExceeded { length: bytes.len, limit }.into()) - ); - - assert_costs(map! { - ExtCosts::base: 1, - ExtCosts::utf8_decoding_base: 1, - }); - - let outcome = logic.compute_outcome_and_distribute_gas(); - assert_eq!(outcome.logs.len(), 0); -} - -#[test] -fn test_log_total_length_limit() { - let mut logic_builder = VMLogicBuilder::default(); - let string = "j ñ r'ø qò$`5 y'5 øò{%÷ `Võ%".as_bytes(); - let num_logs = 10; - let limit = string.len() as u64 * num_logs - 1; - logic_builder.config.limit_config.max_total_log_length = limit; - logic_builder.config.limit_config.max_number_logs = num_logs; - let mut logic = logic_builder.build(get_context(vec![], false)); - let bytes = logic.internal_mem_write(string); - - for _ in 0..num_logs - 1 { - logic.log_utf8(bytes.len, bytes.ptr).expect("total is still under the limit"); - } - assert_eq!( - logic.log_utf8(bytes.len, bytes.ptr), - Err(HostError::TotalLogLengthExceeded { length: limit + 1, limit }.into()) - ); - - let outcome = logic.compute_outcome_and_distribute_gas(); - assert_eq!(outcome.logs.len() as u64, num_logs - 1); -} - -#[test] -fn test_log_number_limit() { - let mut logic_builder = VMLogicBuilder::default(); - let string = "blabla"; - let max_number_logs = 3; - logic_builder.config.limit_config.max_total_log_length = - (string.len() + 1) as u64 * (max_number_logs + 1); - logic_builder.config.limit_config.max_number_logs = max_number_logs; - let mut logic = logic_builder.build(get_context(vec![], false)); - let bytes = logic.internal_mem_write(string.as_bytes()); - for _ in 0..max_number_logs { - logic - .log_utf8(bytes.len, bytes.ptr) - .expect("Valid utf-8 string_bytes under the log number limit"); - } - assert_eq!( - logic.log_utf8(bytes.len, bytes.ptr), - Err(HostError::NumberOfLogsExceeded { limit: max_number_logs }.into()) - ); - - assert_costs(map! { - ExtCosts::base: max_number_logs + 1, - ExtCosts::log_base: max_number_logs, - ExtCosts::log_byte: bytes.len * max_number_logs, - ExtCosts::read_memory_base: max_number_logs, - ExtCosts::read_memory_byte: bytes.len * max_number_logs, - ExtCosts::utf8_decoding_base: max_number_logs, - ExtCosts::utf8_decoding_byte: bytes.len * max_number_logs, - }); - - let outcome = logic.compute_outcome_and_distribute_gas(); - assert_eq!(outcome.logs.len() as u64, max_number_logs); -} - -fn append_utf16(dst: &mut Vec, string: &str) { - for code_unit in string.encode_utf16() { - dst.extend_from_slice(&code_unit.to_le_bytes()); - } -} - -#[test] -fn test_log_utf16_number_limit() { - let string = "$ qò$`"; - let mut bytes = Vec::new(); - append_utf16(&mut bytes, string); - - let mut logic_builder = VMLogicBuilder::default(); - let max_number_logs = 3; - logic_builder.config.limit_config.max_total_log_length = - (bytes.len() + 1) as u64 * (max_number_logs + 1); - logic_builder.config.limit_config.max_number_logs = max_number_logs; - - let mut logic = logic_builder.build(get_context(vec![], false)); - let bytes = logic.internal_mem_write(&bytes); - for _ in 0..max_number_logs { - logic - .log_utf16(bytes.len, bytes.ptr) - .expect("Valid utf-16 string_bytes under the log number limit"); - } - assert_eq!( - logic.log_utf16(bytes.len, bytes.ptr), - Err(HostError::NumberOfLogsExceeded { limit: max_number_logs }.into()) - ); - - assert_costs(map! { - ExtCosts::base: max_number_logs + 1, - ExtCosts::log_base: max_number_logs, - ExtCosts::log_byte: string.len() as u64 * max_number_logs, - ExtCosts::read_memory_base: max_number_logs, - ExtCosts::read_memory_byte: bytes.len * max_number_logs, - ExtCosts::utf16_decoding_base: max_number_logs, - ExtCosts::utf16_decoding_byte: bytes.len * max_number_logs, - }); - - let outcome = logic.compute_outcome_and_distribute_gas(); - assert_eq!(outcome.logs.len() as u64, max_number_logs); -} - -#[test] -fn test_log_total_length_limit_mixed() { - let mut logic_builder = VMLogicBuilder::default(); - - let string = "abc"; - let mut utf16_bytes: Vec = vec![0u8; 0]; - append_utf16(&mut utf16_bytes, string); - - let num_logs_each = 10; - let limit = string.len() as u64 * (num_logs_each * 2 + 1) - 1; - logic_builder.config.limit_config.max_total_log_length = limit; - logic_builder.config.limit_config.max_number_logs = num_logs_each * 2 + 1; - let mut logic = logic_builder.build(get_context(vec![], false)); - - let utf8_bytes = logic.internal_mem_write(string.as_bytes()); - let utf16_bytes = logic.internal_mem_write(&utf16_bytes); - - for _ in 0..num_logs_each { - logic.log_utf16(utf16_bytes.len, utf16_bytes.ptr).expect("total is still under the limit"); - - logic.log_utf8(utf8_bytes.len, utf8_bytes.ptr).expect("total is still under the limit"); - } - assert_eq!( - logic.log_utf8(utf8_bytes.len, utf8_bytes.ptr), - Err(HostError::TotalLogLengthExceeded { length: limit + 1, limit }.into()) - ); - - let outcome = logic.compute_outcome_and_distribute_gas(); - assert_eq!(outcome.logs.len() as u64, num_logs_each * 2); -} - -#[test] -fn test_log_utf8_max_limit_null_terminated() { - let mut logic_builder = VMLogicBuilder::default(); - let bytes = "j ñ r'ø qò$`5 y'5 øò{%÷ `Võ%\x00".as_bytes(); - let limit = (bytes.len() - 2) as u64; - logic_builder.config.limit_config.max_total_log_length = limit; - let mut logic = logic_builder.build(get_context(vec![], false)); - let bytes = logic.internal_mem_write(bytes); - - assert_eq!( - logic.log_utf8(u64::MAX, bytes.ptr), - Err(HostError::TotalLogLengthExceeded { length: limit + 1, limit }.into()) - ); - - assert_costs(map! { - ExtCosts::base: 1, - ExtCosts::read_memory_base: bytes.len - 1, - ExtCosts::read_memory_byte: bytes.len - 1, - ExtCosts::utf8_decoding_base: 1, - }); - - let outcome = logic.compute_outcome_and_distribute_gas(); - assert_eq!(outcome.logs.len(), 0); -} - -#[test] -fn test_valid_log_utf16() { - let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); - - let string = "$ qò$`"; - let mut bytes = Vec::new(); - append_utf16(&mut bytes, string); - let bytes = logic.internal_mem_write(&bytes); - - logic.log_utf16(bytes.len, bytes.ptr).expect("Valid utf-16 string_bytes"); - - assert_costs(map! { - ExtCosts::base: 1, - ExtCosts::read_memory_base: 1, - ExtCosts::read_memory_byte: bytes.len, - ExtCosts::utf16_decoding_base: 1, - ExtCosts::utf16_decoding_byte: bytes.len, - ExtCosts::log_base: 1, - ExtCosts::log_byte: string.len() as u64, - }); - let outcome = logic.compute_outcome_and_distribute_gas(); - assert_eq!(outcome.logs[0], string); -} - -#[test] -fn test_valid_log_utf16_max_log_len_not_even() { - let mut logic_builder = VMLogicBuilder::default(); - logic_builder.config.limit_config.max_total_log_length = 5; - let mut logic = logic_builder.build(get_context(vec![], false)); - - let string = "ab"; - let mut bytes = Vec::new(); - append_utf16(&mut bytes, string); - append_utf16(&mut bytes, "\0"); - let bytes = logic.internal_mem_write(&bytes); - logic.log_utf16(u64::MAX, bytes.ptr).expect("Valid utf-16 bytes"); - - assert_costs(map! { - ExtCosts::base: 1, - ExtCosts::read_memory_base: bytes.len / 2, - ExtCosts::read_memory_byte: bytes.len, - ExtCosts::utf16_decoding_base: 1, - ExtCosts::utf16_decoding_byte: bytes.len - 2, - ExtCosts::log_base: 1, - ExtCosts::log_byte: string.len() as u64, - }); - - let string = "abc"; - let mut bytes = Vec::new(); - append_utf16(&mut bytes, string); - append_utf16(&mut bytes, "\0"); - let bytes = logic.internal_mem_write(&bytes); - assert_eq!( - logic.log_utf16(u64::MAX, bytes.ptr), - Err(HostError::TotalLogLengthExceeded { length: 6, limit: 5 }.into()) - ); - - assert_costs(map! { - ExtCosts::base: 1, - ExtCosts::read_memory_base: 2, - ExtCosts::read_memory_byte: 2 * 2, - ExtCosts::utf16_decoding_base: 1, - }); -} - -#[test] -fn test_log_utf8_max_limit_null_terminated_fail() { - let mut logic_builder = VMLogicBuilder::default(); - logic_builder.config.limit_config.max_total_log_length = 3; - let mut logic = logic_builder.build(get_context(vec![], false)); - let bytes = logic.internal_mem_write(b"abcdefgh\0"); - let res = logic.log_utf8(u64::MAX, bytes.ptr); - assert_eq!(res, Err(HostError::TotalLogLengthExceeded { length: 4, limit: 3 }.into())); - assert_costs(map! { - ExtCosts::base: 1, - ExtCosts::read_memory_base: logic_builder.config.limit_config.max_total_log_length + 1, - ExtCosts::read_memory_byte: logic_builder.config.limit_config.max_total_log_length + 1, - ExtCosts::utf8_decoding_base: 1, - }); -} - -#[test] -fn test_valid_log_utf16_null_terminated() { - let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); - - let string = "$ qò$`"; - let mut bytes = Vec::new(); - append_utf16(&mut bytes, string); - bytes.extend_from_slice(&[0, 0]); - let bytes = logic.internal_mem_write(&bytes); - - logic.log_utf16(u64::MAX, bytes.ptr).expect("Valid utf-16 string_bytes"); - - let outcome = logic.compute_outcome_and_distribute_gas(); - assert_eq!(outcome.logs[0], string); - assert_costs(map! { - ExtCosts::base: 1, - ExtCosts::read_memory_base: bytes.len / 2 , - ExtCosts::read_memory_byte: bytes.len, - ExtCosts::utf16_decoding_base: 1, - ExtCosts::utf16_decoding_byte: bytes.len - 2, - ExtCosts::log_base: 1, - ExtCosts::log_byte: string.len() as u64, - }); -} - -#[test] -fn test_invalid_log_utf16() { - let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); - let mut bytes: Vec = Vec::new(); - for u16_ in [0xD834u16, 0xDD1E, 0x006d, 0x0075, 0xD800, 0x0069, 0x0063] { - bytes.extend_from_slice(&u16_.to_le_bytes()); - } - let bytes = logic.internal_mem_write(&bytes); - let res = logic.log_utf16(bytes.len, bytes.ptr); - assert_eq!(res, Err(HostError::BadUTF16.into())); - assert_costs(map! { - ExtCosts::base: 1, - ExtCosts::read_memory_base: 1, - ExtCosts::read_memory_byte: bytes.len, - ExtCosts::utf16_decoding_base: 1, - ExtCosts::utf16_decoding_byte: bytes.len, - }); -} - -#[test] -fn test_valid_log_utf16_null_terminated_fail() { - let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); - - let mut bytes = Vec::new(); - append_utf16(&mut bytes, "$ qò$`"); - bytes.extend_from_slice(&[0x00, 0xD8]); // U+D800, unpaired surrogate - append_utf16(&mut bytes, "foobarbaz\0"); - let bytes = logic.internal_mem_write(&bytes); - - let res = logic.log_utf16(u64::MAX, bytes.ptr); - assert_eq!(res, Err(HostError::BadUTF16.into())); - assert_costs(map! { - ExtCosts::base: 1, - ExtCosts::read_memory_base: bytes.len / 2, - ExtCosts::read_memory_byte: bytes.len, - ExtCosts::utf16_decoding_base: 1, - ExtCosts::utf16_decoding_byte: bytes.len - 2, - }); -} - #[test] fn test_sha256() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let data = logic.internal_mem_write(b"tesdsst"); logic.sha256(data.len, data.ptr, 0).unwrap(); @@ -430,7 +39,7 @@ fn test_sha256() { #[test] fn test_keccak256() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let data = logic.internal_mem_write(b"tesdsst"); logic.keccak256(data.len, data.ptr, 0).unwrap(); @@ -459,7 +68,7 @@ fn test_keccak256() { #[test] fn test_keccak512() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let data = logic.internal_mem_write(b"tesdsst"); logic.keccak512(data.len, data.ptr, 0).unwrap(); @@ -490,7 +99,7 @@ fn test_keccak512() { #[test] fn test_ripemd160() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let data = logic.internal_mem_write(b"tesdsst"); logic.ripemd160(data.len, data.ptr, 0).unwrap(); @@ -543,7 +152,7 @@ fn test_ecrecover() { .unwrap() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let m = logic.internal_mem_write(&m); let sig = logic.internal_mem_write(&sig); @@ -574,7 +183,7 @@ fn test_ecrecover() { #[test] fn test_hash256_register() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let data = b"tesdsst"; logic.wrapped_internal_write_register(1, data).unwrap(); @@ -606,7 +215,7 @@ fn test_key_length_limit() { let mut logic_builder = VMLogicBuilder::default(); let limit = 1024; logic_builder.config.limit_config.max_length_storage_key = limit; - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); // Under the limit. Valid calls. let key = crate::MemSlice { ptr: 0, len: limit }; @@ -647,7 +256,7 @@ fn test_value_length_limit() { let mut logic_builder = VMLogicBuilder::default(); let limit = 1024; logic_builder.config.limit_config.max_length_storage_value = limit; - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let key = logic.internal_mem_write(b"hello"); logic @@ -667,7 +276,7 @@ fn test_num_promises() { let mut logic_builder = VMLogicBuilder::default(); let num_promises = 10; logic_builder.config.limit_config.max_promises_per_function_call_action = num_promises; - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let account_id = logic.internal_mem_write(b"alice"); for _ in 0..num_promises { logic @@ -689,7 +298,7 @@ fn test_num_joined_promises() { let mut logic_builder = VMLogicBuilder::default(); let num_deps = 10; logic_builder.config.limit_config.max_number_input_data_dependencies = num_deps; - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let account_id = logic.internal_mem_write(b"alice"); let promise_id = logic .promise_batch_create(account_id.len, account_id.ptr) @@ -714,7 +323,7 @@ fn test_num_input_dependencies_recursive_join() { let mut logic_builder = VMLogicBuilder::default(); let num_steps = 10; logic_builder.config.limit_config.max_number_input_data_dependencies = 1 << num_steps; - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let account_id = logic.internal_mem_write(b"alice"); let original_promise_id = logic .promise_batch_create(account_id.len, account_id.ptr) @@ -755,7 +364,7 @@ fn test_return_value_limit() { let mut logic_builder = VMLogicBuilder::default(); let limit = 1024; logic_builder.config.limit_config.max_length_returned_data = limit; - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); logic.value_return(limit, 0).expect("Returned value length is under the limit"); assert_eq!( @@ -769,7 +378,7 @@ fn test_contract_size_limit() { let mut logic_builder = VMLogicBuilder::default(); let limit = 1024; logic_builder.config.limit_config.max_contract_size = limit; - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let account_id = logic.internal_mem_write(b"alice"); diff --git a/runtime/near-vm-logic/src/tests/mod.rs b/runtime/near-vm-logic/src/tests/mod.rs index 7a9a28b3552..fcdd60e234e 100644 --- a/runtime/near-vm-logic/src/tests/mod.rs +++ b/runtime/near-vm-logic/src/tests/mod.rs @@ -2,10 +2,10 @@ mod alt_bn128; mod context; #[cfg(feature = "protocol_feature_ed25519_verify")] mod ed25519_verify; -mod fixtures; mod gas_counter; pub(crate) mod helpers; mod iterators; +mod logs; mod miscs; mod promises; mod registers; diff --git a/runtime/near-vm-logic/src/tests/promises.rs b/runtime/near-vm-logic/src/tests/promises.rs index 2c54169c38e..0397e2a8732 100644 --- a/runtime/near-vm-logic/src/tests/promises.rs +++ b/runtime/near-vm-logic/src/tests/promises.rs @@ -1,4 +1,3 @@ -use crate::tests::fixtures::get_context; use crate::tests::helpers::*; use crate::tests::vm_logic_builder::VMLogicBuilder; use crate::types::PromiseResult; @@ -34,7 +33,7 @@ fn test_promise_results() { let mut logic_builder = VMLogicBuilder::default(); logic_builder.promise_results = promise_results; - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); assert_eq!(logic.promise_results_count(), Ok(3), "Total count of registers must be 3"); assert_eq!(logic.promise_result(0, 0), Ok(1), "Must return code 1 on success"); @@ -48,7 +47,7 @@ fn test_promise_results() { #[test] fn test_promise_batch_action_function_call() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let index = promise_create(&mut logic, b"rick.test", 0, 0).expect("should create a promise"); let index_ptr = logic.internal_mem_write(&index.to_le_bytes()).ptr; @@ -80,7 +79,7 @@ fn test_promise_batch_action_function_call() { #[test] fn test_promise_batch_action_create_account() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let index = promise_create(&mut logic, b"rick.test", 0, 0).expect("should create a promise"); let index_ptr = logic.internal_mem_write(&index.to_le_bytes()).ptr; @@ -118,7 +117,7 @@ fn test_promise_batch_action_create_account() { #[test] fn test_promise_batch_action_deploy_contract() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let index = promise_create(&mut logic, b"rick.test", 0, 0).expect("should create a promise"); let index_ptr = logic.internal_mem_write(&index.to_le_bytes()).ptr; @@ -164,11 +163,8 @@ fn test_promise_batch_action_deploy_contract() { #[test] fn test_promise_batch_action_transfer() { - let mut context = get_context(vec![], false); - context.account_balance = 100; - context.attached_deposit = 10; let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(context); + let mut logic = logic_builder.build(); let index = promise_create(&mut logic, b"rick.test", 0, 0).expect("should create a promise"); let index_ptr = logic.internal_mem_write(&index.to_le_bytes()).ptr; @@ -216,11 +212,8 @@ fn test_promise_batch_action_transfer() { #[test] fn test_promise_batch_action_stake() { - let mut context = get_context(vec![], false); - // And there are 10N in attached balance to the transaction. - context.account_balance = 100; let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(context); + let mut logic = logic_builder.build(); let index = promise_create(&mut logic, b"rick.test", 0, 0).expect("should create a promise"); let key = "ed25519:5do5nkAEVhL8iteDvXNgxi4pWK78Y7DDadX11ArFNyrf" .parse::() @@ -272,11 +265,8 @@ fn test_promise_batch_action_stake() { #[test] fn test_promise_batch_action_add_key_with_function_call() { - let mut context = get_context(vec![], false); - context.account_balance = 100; - let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(context); + let mut logic = logic_builder.build(); let index = promise_create(&mut logic, b"rick.test", 0, 0).expect("should create a promise"); let index_ptr = logic.internal_mem_write(&index.to_le_bytes()).ptr; let key = "ed25519:5do5nkAEVhL8iteDvXNgxi4pWK78Y7DDadX11ArFNyrf" @@ -362,10 +352,8 @@ fn test_promise_batch_action_add_key_with_function_call() { #[test] fn test_promise_batch_then() { - let mut context = get_context(vec![], false); - context.account_balance = 100; let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(context); + let mut logic = logic_builder.build(); let account_id = b"rick.test"; let index = promise_create(&mut logic, account_id, 0, 0).expect("should create a promise"); diff --git a/runtime/near-vm-logic/src/tests/registers.rs b/runtime/near-vm-logic/src/tests/registers.rs index 103cccf7d0d..c098d6460cb 100644 --- a/runtime/near-vm-logic/src/tests/registers.rs +++ b/runtime/near-vm-logic/src/tests/registers.rs @@ -1,4 +1,3 @@ -use crate::tests::fixtures::get_context; use crate::tests::vm_logic_builder::VMLogicBuilder; use crate::VMConfig; use near_vm_errors::{HostError, VMLogicError}; @@ -6,7 +5,7 @@ use near_vm_errors::{HostError, VMLogicError}; #[test] fn test_one_register() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); logic.wrapped_internal_write_register(0, &[0, 1, 2]).unwrap(); assert_eq!(logic.register_len(0).unwrap(), 3u64); @@ -16,7 +15,7 @@ fn test_one_register() { #[test] fn test_non_existent_register() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); assert_eq!(logic.register_len(0), Ok(u64::MAX) as Result); let buffer = [0u8; 3]; @@ -30,7 +29,7 @@ fn test_non_existent_register() { fn test_many_registers() { let mut logic_builder = VMLogicBuilder::default(); let max_registers = logic_builder.config.limit_config.max_number_registers; - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); for i in 0..max_registers { let value = (i * 10).to_le_bytes(); @@ -49,7 +48,7 @@ fn test_many_registers() { fn test_max_register_size() { let mut logic_builder = VMLogicBuilder::free(); let max_register_size = logic_builder.config.limit_config.max_register_size; - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let value = vec![0u8; (max_register_size + 1) as usize]; @@ -64,7 +63,7 @@ fn test_max_register_memory_limit() { let mut logic_builder = VMLogicBuilder::free(); let config = VMConfig::free(); logic_builder.config = config.clone(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let max_registers = config.limit_config.registers_memory_limit / config.limit_config.max_register_size; @@ -83,6 +82,6 @@ fn test_max_register_memory_limit() { #[test] fn test_register_is_not_used() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); assert_eq!(logic.register_len(0), Ok(u64::MAX)); } diff --git a/runtime/near-vm-logic/src/tests/storage_read_write.rs b/runtime/near-vm-logic/src/tests/storage_read_write.rs index 7339a07c82a..31e93d4d1d7 100644 --- a/runtime/near-vm-logic/src/tests/storage_read_write.rs +++ b/runtime/near-vm-logic/src/tests/storage_read_write.rs @@ -1,11 +1,10 @@ -use crate::tests::fixtures::get_context; use crate::tests::vm_logic_builder::VMLogicBuilder; use crate::{External, StorageGetMode}; #[test] fn test_storage_write_with_register() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let key: &[u8] = b"foo"; let val: &[u8] = b"bar"; @@ -27,7 +26,7 @@ fn test_storage_read_with_register() { let val: &[u8] = b"bar"; logic_builder.ext.storage_set(key, val).unwrap(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); logic.wrapped_internal_write_register(1, key).unwrap(); @@ -38,7 +37,7 @@ fn test_storage_read_with_register() { #[test] fn test_storage_remove_with_register() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let key = logic.internal_mem_write(b"foo"); let val = logic.internal_mem_write(b"bar"); @@ -58,7 +57,7 @@ fn test_storage_has_key_with_register() { let val: &[u8] = b"bar"; logic_builder.ext.storage_set(key, val).unwrap(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); logic.wrapped_internal_write_register(1, key).unwrap(); diff --git a/runtime/near-vm-logic/src/tests/storage_usage.rs b/runtime/near-vm-logic/src/tests/storage_usage.rs index b941c122bff..26c54dd650c 100644 --- a/runtime/near-vm-logic/src/tests/storage_usage.rs +++ b/runtime/near-vm-logic/src/tests/storage_usage.rs @@ -1,11 +1,10 @@ -use crate::tests::fixtures::get_context; use crate::tests::vm_logic_builder::VMLogicBuilder; #[test] fn test_storage_write_counter() { let mut logic_builder = VMLogicBuilder::default(); let data_record_cost = logic_builder.fees_config.storage_usage_config.num_extra_bytes_record; - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let key = logic.internal_mem_write(b"foo"); let val = logic.internal_mem_write(b"bar"); @@ -23,7 +22,7 @@ fn test_storage_write_counter() { #[test] fn test_storage_remove() { let mut logic_builder = VMLogicBuilder::default(); - let mut logic = logic_builder.build(get_context(vec![], false)); + let mut logic = logic_builder.build(); let key = logic.internal_mem_write(b"foo"); let val = logic.internal_mem_write(b"bar"); diff --git a/runtime/near-vm-logic/src/tests/view_method.rs b/runtime/near-vm-logic/src/tests/view_method.rs index 417d4cf68ac..b286ba95dee 100644 --- a/runtime/near-vm-logic/src/tests/view_method.rs +++ b/runtime/near-vm-logic/src/tests/view_method.rs @@ -1,11 +1,10 @@ -use crate::tests::fixtures::get_context; use crate::tests::vm_logic_builder::VMLogicBuilder; macro_rules! test_prohibited { ($f: ident $(, $arg: expr )* ) => { - let mut logic_builder = VMLogicBuilder::default(); + let mut logic_builder = VMLogicBuilder::view(); #[allow(unused_mut)] - let mut logic = logic_builder.build(get_context(vec![], true)); + let mut logic = logic_builder.build(); let name = stringify!($f); logic.$f($($arg, )*).expect_err(&format!("{} is not allowed in view calls", name)) @@ -43,8 +42,7 @@ fn test_prohibited_view_methods() { #[test] fn test_allowed_view_method() { - let mut logic_builder = VMLogicBuilder::default(); - let context = get_context(vec![], true); - let mut logic = logic_builder.build(context.clone()); - assert_eq!(logic.block_index().unwrap(), context.block_height); + let mut logic_builder = VMLogicBuilder::view(); + let mut logic = logic_builder.build(); + assert_eq!(logic.block_index().unwrap(), logic_builder.context.block_height); } diff --git a/runtime/near-vm-logic/src/tests/vm_logic_builder.rs b/runtime/near-vm-logic/src/tests/vm_logic_builder.rs index 473fa540aad..7b1b02cb1e5 100644 --- a/runtime/near-vm-logic/src/tests/vm_logic_builder.rs +++ b/runtime/near-vm-logic/src/tests/vm_logic_builder.rs @@ -1,8 +1,8 @@ use crate::mocks::mock_external::MockedExternal; use crate::mocks::mock_memory::MockedMemory; -use crate::types::{Gas, PromiseResult}; -use crate::{ActionCosts, MemSlice, VMConfig, VMContext, VMLogic}; -use near_primitives_core::runtime::fees::{Fee, RuntimeFeesConfig}; +use crate::types::PromiseResult; +use crate::{MemSlice, VMConfig, VMContext, VMLogic}; +use near_primitives_core::runtime::fees::RuntimeFeesConfig; use near_primitives_core::types::ProtocolVersion; pub(super) const LATEST_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::MAX; @@ -14,6 +14,7 @@ pub(super) struct VMLogicBuilder { pub promise_results: Vec, pub memory: MockedMemory, pub current_protocol_version: ProtocolVersion, + pub context: VMContext, } impl Default for VMLogicBuilder { @@ -25,12 +26,21 @@ impl Default for VMLogicBuilder { memory: MockedMemory::default(), promise_results: vec![], current_protocol_version: LATEST_PROTOCOL_VERSION, + context: get_context(), } } } impl VMLogicBuilder { - pub fn build(&mut self, context: VMContext) -> TestVMLogic<'_> { + pub fn view() -> Self { + let mut builder = Self::default(); + let max_gas_burnt = builder.config.limit_config.max_gas_burnt; + builder.context.view_config = Some(crate::ViewConfig { max_gas_burnt }); + builder + } + + pub fn build(&mut self) -> TestVMLogic<'_> { + let context = self.context.clone(); TestVMLogic::from(VMLogic::new_with_protocol_version( &mut self.ext, context, @@ -50,17 +60,29 @@ impl VMLogicBuilder { memory: MockedMemory::default(), promise_results: vec![], current_protocol_version: LATEST_PROTOCOL_VERSION, + context: get_context(), } } +} - pub fn max_gas_burnt(mut self, max_gas_burnt: Gas) -> Self { - self.config.limit_config.max_gas_burnt = max_gas_burnt; - self - } - - pub fn gas_fee(mut self, cost: ActionCosts, fee: Fee) -> Self { - self.fees_config.action_fees[cost] = fee; - self +fn get_context() -> VMContext { + VMContext { + current_account_id: "alice.near".parse().unwrap(), + signer_account_id: "bob.near".parse().unwrap(), + signer_account_pk: vec![0, 1, 2, 3, 4], + predecessor_account_id: "carol.near".parse().unwrap(), + input: vec![0, 1, 2, 3, 4], + block_height: 10, + block_timestamp: 42, + epoch_height: 1, + account_balance: 100, + storage_usage: 0, + account_locked_balance: 50, + attached_deposit: 10, + prepaid_gas: 10u64.pow(14), + random_seed: vec![0, 1, 2], + view_config: None, + output_data_receivers: vec![], } } @@ -97,11 +119,15 @@ impl TestVMLogic<'_> { /// makes it convenient to populate the memory with various different data /// to later use in function calls. pub(super) fn internal_mem_write(&mut self, data: &[u8]) -> MemSlice { - let ptr = self.mem_write_offset; + let slice = self.internal_mem_write_at(self.mem_write_offset, data); + self.mem_write_offset += slice.len; + slice + } + + /// Writes data into guest memory at given location. + pub(super) fn internal_mem_write_at(&mut self, ptr: u64, data: &[u8]) -> MemSlice { self.memory().set_for_free(ptr, data).unwrap(); - let len = data.len() as u64; - self.mem_write_offset += len; - MemSlice { len, ptr } + MemSlice { len: u64::try_from(data.len()).unwrap(), ptr } } /// Reads data from guest memory into a Vector. diff --git a/runtime/near-vm-logic/src/vmstate.rs b/runtime/near-vm-logic/src/vmstate.rs index ee660205ee3..fe7b1a87d01 100644 --- a/runtime/near-vm-logic/src/vmstate.rs +++ b/runtime/near-vm-logic/src/vmstate.rs @@ -69,7 +69,6 @@ impl<'a> Memory<'a> { self.0.view_memory(slice).map_err(|_| HostError::MemoryAccessViolation.into()) } - #[cfg(any(test, feature = "sandbox"))] /// Like [`Self::view`] but does not pay gas fees. pub(super) fn view_for_free(&self, slice: MemSlice) -> Result> { self.0.view_memory(slice).map_err(|_| HostError::MemoryAccessViolation.into()) diff --git a/runtime/near-vm-runner/src/imports.rs b/runtime/near-vm-runner/src/imports.rs index 58589a8da23..2136be895e9 100644 --- a/runtime/near-vm-runner/src/imports.rs +++ b/runtime/near-vm-runner/src/imports.rs @@ -463,12 +463,29 @@ pub(crate) mod wasmer2 { pub(crate) mod wasmtime { use super::str_eq; use near_vm_logic::{ProtocolVersion, VMLogic, VMLogicError}; - use std::cell::{RefCell, UnsafeCell}; + use std::cell::UnsafeCell; use std::ffi::c_void; + /// This is a container from which an error can be taken out by value. This is necessary as + /// `anyhow` does not really give any opportunity to grab causes by value and the VM Logic + /// errors end up a couple layers deep in a causal chain. + #[derive(Debug)] + pub(crate) struct ErrorContainer(std::sync::Mutex>); + impl ErrorContainer { + pub(crate) fn take(&self) -> Option { + let mut guard = self.0.lock().unwrap_or_else(|e| e.into_inner()); + guard.take() + } + } + impl std::error::Error for ErrorContainer {} + impl std::fmt::Display for ErrorContainer { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("VMLogic error occurred and is now stored in an opaque storage container") + } + } + thread_local! { static CALLER_CONTEXT: UnsafeCell<*mut c_void> = UnsafeCell::new(0 as *mut c_void); - static EMBEDDER_ERROR: RefCell> = RefCell::new(None); } pub(crate) fn link( @@ -485,7 +502,7 @@ pub(crate) mod wasmtime { $func:ident < [ $( $arg_name:ident : $arg_type:ident ),* ] -> [ $( $returns:ident ),* ] > ) => { #[allow(unused_parens)] - fn $func(caller: wasmtime::Caller<'_, ()>, $( $arg_name: $arg_type ),* ) -> Result<($( $returns ),*), wasmtime::Trap> { + fn $func(caller: wasmtime::Caller<'_, ()>, $( $arg_name: $arg_type ),* ) -> anyhow::Result<($( $returns ),*)> { const IS_GAS: bool = str_eq(stringify!($func), "gas"); let _span = if IS_GAS { None @@ -508,12 +525,7 @@ pub(crate) mod wasmtime { match logic.$func( $( $arg_name as $arg_type, )* ) { Ok(result) => Ok(result as ($( $returns ),* ) ), Err(err) => { - // Wasmtime doesn't have proper mechanism for wrapping custom errors - // into traps. So, just store error into TLS and use special exit code here. - EMBEDDER_ERROR.with(|embedder_error| { - *embedder_error.borrow_mut() = Some(err) - }); - Err(wasmtime::Trap::i32_exit(239)) + Err(ErrorContainer(std::sync::Mutex::new(Some(err))).into()) } } } @@ -523,10 +535,6 @@ pub(crate) mod wasmtime { } for_each_available_import!(protocol_version, add_import); } - - pub(crate) fn last_error() -> Option { - EMBEDDER_ERROR.with(|embedder_error| embedder_error.replace(None)) - } } /// Constant-time string equality, work-around for `"foo" == "bar"` not working diff --git a/runtime/near-vm-runner/src/wasmtime_runner.rs b/runtime/near-vm-runner/src/wasmtime_runner.rs index 029314b529a..d21bd6fe8fc 100644 --- a/runtime/near-vm-runner/src/wasmtime_runner.rs +++ b/runtime/near-vm-runner/src/wasmtime_runner.rs @@ -16,7 +16,7 @@ use std::borrow::Cow; use std::cell::RefCell; use std::ffi::c_void; use wasmtime::ExternType::Func; -use wasmtime::{Engine, Linker, Memory, MemoryType, Module, Store, TrapCode}; +use wasmtime::{Engine, Linker, Memory, MemoryType, Module, Store}; type Caller = wasmtime::Caller<'static, ()>; thread_local! { @@ -79,68 +79,43 @@ impl MemoryLike for WasmtimeMemory { } } -fn trap_to_error(trap: &wasmtime::Trap) -> Result { - if trap.i32_exit_status() == Some(239) { - match imports::wasmtime::last_error() { - Some(VMLogicError::HostError(h)) => Ok(FunctionCallError::HostError(h)), - Some(VMLogicError::ExternalError(s)) => Err(VMRunnerError::ExternalError(s)), - Some(VMLogicError::InconsistentStateError(e)) => { - Err(VMRunnerError::InconsistentStateError(e)) - } - None => panic!("Error is not properly set"), - } - } else { - Ok(match trap.trap_code() { - Some(TrapCode::StackOverflow) => FunctionCallError::WasmTrap(WasmTrap::StackOverflow), - Some(TrapCode::MemoryOutOfBounds) => { - FunctionCallError::WasmTrap(WasmTrap::MemoryOutOfBounds) - } - Some(TrapCode::TableOutOfBounds) => { - FunctionCallError::WasmTrap(WasmTrap::MemoryOutOfBounds) - } - Some(TrapCode::IndirectCallToNull) => { - FunctionCallError::WasmTrap(WasmTrap::IndirectCallToNull) - } - Some(TrapCode::BadSignature) => { - FunctionCallError::WasmTrap(WasmTrap::IncorrectCallIndirectSignature) - } - Some(TrapCode::IntegerOverflow) => { - FunctionCallError::WasmTrap(WasmTrap::IllegalArithmetic) - } - Some(TrapCode::IntegerDivisionByZero) => { - FunctionCallError::WasmTrap(WasmTrap::IllegalArithmetic) - } - Some(TrapCode::BadConversionToInteger) => { - FunctionCallError::WasmTrap(WasmTrap::IllegalArithmetic) - } - Some(TrapCode::UnreachableCodeReached) => { - FunctionCallError::WasmTrap(WasmTrap::Unreachable) - } - Some(TrapCode::Interrupt) => { - return Err(VMRunnerError::Nondeterministic("interrupt".to_string())); - } - _ => { - return Err(VMRunnerError::WasmUnknownError { - debug_message: "unknown trap".to_string(), - }); - } - }) - } -} - impl IntoVMError for anyhow::Error { fn into_vm_error(self) -> Result { let cause = self.root_cause(); - match cause.downcast_ref::() { - Some(trap) => trap_to_error(trap), - None => Ok(FunctionCallError::LinkError { msg: format!("{:#?}", cause) }), + if let Some(container) = cause.downcast_ref::() { + use {VMLogicError as LE, VMRunnerError as RE}; + return match container.take() { + Some(LE::HostError(h)) => Ok(FunctionCallError::HostError(h)), + Some(LE::ExternalError(s)) => Err(RE::ExternalError(s)), + Some(LE::InconsistentStateError(e)) => Err(RE::InconsistentStateError(e)), + None => panic!("error has already been taken out of the container?!"), + }; } - } -} - -impl IntoVMError for wasmtime::Trap { - fn into_vm_error(self) -> Result { - trap_to_error(&self) + if let Some(trap) = cause.downcast_ref::() { + use wasmtime::Trap as T; + let nondeterministic_message = 'nondet: { + return Ok(FunctionCallError::WasmTrap(match *trap { + T::StackOverflow => WasmTrap::StackOverflow, + T::MemoryOutOfBounds => WasmTrap::MemoryOutOfBounds, + T::TableOutOfBounds => WasmTrap::MemoryOutOfBounds, + T::IndirectCallToNull => WasmTrap::IndirectCallToNull, + T::BadSignature => WasmTrap::IncorrectCallIndirectSignature, + T::IntegerOverflow => WasmTrap::IllegalArithmetic, + T::IntegerDivisionByZero => WasmTrap::IllegalArithmetic, + T::BadConversionToInteger => WasmTrap::IllegalArithmetic, + T::UnreachableCodeReached => WasmTrap::Unreachable, + T::Interrupt => break 'nondet "interrupt", + T::HeapMisaligned => break 'nondet "heap misaligned", + t => { + return Err(VMRunnerError::WasmUnknownError { + debug_message: format!("unhandled trap type: {:?}", t), + }) + } + })); + }; + return Err(VMRunnerError::Nondeterministic(nondeterministic_message.into())); + } + Ok(FunctionCallError::LinkError { msg: format!("{:#?}", cause) }) } } @@ -156,7 +131,7 @@ pub fn get_engine(config: &mut wasmtime::Config) -> Engine { pub(super) fn default_config() -> wasmtime::Config { let mut config = wasmtime::Config::default(); - config.max_wasm_stack(1024 * 1024 * 1024).unwrap(); // wasm stack metering is implemented by pwasm-utils, we don't want wasmtime to trap before that + config.max_wasm_stack(1024 * 1024 * 1024); // wasm stack metering is implemented by instrumentation, we don't want wasmtime to trap before that config.wasm_threads(WASM_FEATURES.threads); config.wasm_reference_types(WASM_FEATURES.reference_types); config.wasm_simd(WASM_FEATURES.simd); @@ -277,7 +252,7 @@ impl crate::runner::VM for WasmtimeVM { } match linker.instantiate(&mut store, &module) { Ok(instance) => match instance.get_func(&mut store, method_name) { - Some(func) => match func.typed::<(), (), _>(&mut store) { + Some(func) => match func.typed::<(), ()>(&mut store) { Ok(run) => match run.call(&mut store, ()) { Ok(_) => Ok(VMOutcome::ok(logic)), Err(err) => Ok(VMOutcome::abort(logic, err.into_vm_error()?)), diff --git a/runtime/runtime-params-estimator/Cargo.toml b/runtime/runtime-params-estimator/Cargo.toml index 368ec24d863..875a34b609b 100644 --- a/runtime/runtime-params-estimator/Cargo.toml +++ b/runtime/runtime-params-estimator/Cargo.toml @@ -68,7 +68,8 @@ nightly = [ nightly_protocol = [ "near-primitives/nightly_protocol", "near-test-contracts/nightly", - "protocol_feature_ed25519_verify" + "protocol_feature_ed25519_verify", + "protocol_feature_nep366_delegate_action", ] sandbox = ["node-runtime/sandbox"] io_trace = ["near-store/io_trace", "near-o11y/io_trace", "near-vm-logic/io_trace"] @@ -76,3 +77,4 @@ protocol_feature_ed25519_verify = [ "near-vm-logic/protocol_feature_ed25519_verify", "near-vm-runner/protocol_feature_ed25519_verify" ] +protocol_feature_nep366_delegate_action = [] diff --git a/runtime/runtime-params-estimator/src/cost.rs b/runtime/runtime-params-estimator/src/cost.rs index bc4e1ff3c48..c64389ceec0 100644 --- a/runtime/runtime-params-estimator/src/cost.rs +++ b/runtime/runtime-params-estimator/src/cost.rs @@ -199,7 +199,9 @@ pub enum Cost { ActionDeleteAccountSendNotSir, ActionDeleteAccountSendSir, ActionDeleteAccountExec, - + /// Estimates `action_creation_config.delegate_cost` which is charged + /// for `DelegateAction` actions. + ActionDelegate, /// Estimates `wasm_config.ext_costs.base` which is intended to be charged /// once on every host function call. However, this is currently /// inconsistent. First, we do not charge on Math API methods (`sha256`, diff --git a/runtime/runtime-params-estimator/src/costs_to_runtime_config.rs b/runtime/runtime-params-estimator/src/costs_to_runtime_config.rs index 91b146ab22e..83398c9d675 100644 --- a/runtime/runtime-params-estimator/src/costs_to_runtime_config.rs +++ b/runtime/runtime-params-estimator/src/costs_to_runtime_config.rs @@ -54,6 +54,8 @@ fn runtime_fees_config(cost_table: &CostTable) -> anyhow::Result fee(Cost::ActionCreateAccount)?, + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + ActionCosts::delegate => fee(Cost::ActionDelegate)?, ActionCosts::delete_account => fee(Cost::ActionDeleteAccount)?, ActionCosts::deploy_contract_base => fee(Cost::ActionDeployContractBase)?, ActionCosts::deploy_contract_byte => fee(Cost::ActionDeployContractPerByte)?, diff --git a/runtime/runtime/Cargo.toml b/runtime/runtime/Cargo.toml index e5a8ac3465e..bf60b0e9cd2 100644 --- a/runtime/runtime/Cargo.toml +++ b/runtime/runtime/Cargo.toml @@ -8,7 +8,6 @@ edition.workspace = true [dependencies] borsh.workspace = true -byteorder.workspace = true hex.workspace = true num-bigint.workspace = true num-rational.workspace = true @@ -38,6 +37,7 @@ protocol_feature_flat_state = ["near-store/protocol_feature_flat_state", "near-v protocol_feature_zero_balance_account = ["near-primitives/protocol_feature_zero_balance_account"] nightly_protocol = ["near-primitives/nightly_protocol"] no_cpu_compatibility_checks = ["near-vm-runner/no_cpu_compatibility_checks"] +protocol_feature_nep366_delegate_action = [] no_cache = [ "near-vm-runner/no_cache", diff --git a/runtime/runtime/src/actions.rs b/runtime/runtime/src/actions.rs index 59c93618da3..0cad3370edf 100644 --- a/runtime/runtime/src/actions.rs +++ b/runtime/runtime/src/actions.rs @@ -35,6 +35,17 @@ use near_vm_logic::types::PromiseResult; use near_vm_logic::{VMContext, VMOutcome}; use near_vm_runner::precompile_contract; +#[cfg(feature = "protocol_feature_nep366_delegate_action")] +use crate::config::{total_prepaid_exec_fees, total_prepaid_gas, total_prepaid_send_fees}; +#[cfg(feature = "protocol_feature_nep366_delegate_action")] +use near_primitives::errors::InvalidAccessKeyError; +#[cfg(feature = "protocol_feature_nep366_delegate_action")] +use near_primitives::transaction::{DelegateAction, SignedDelegateAction}; +#[cfg(feature = "protocol_feature_nep366_delegate_action")] +use near_primitives::types::Gas; +#[cfg(feature = "protocol_feature_nep366_delegate_action")] +use near_vm_logic::ActionCosts; + /// Runs given function call with given context / apply state. pub(crate) fn execute_function_call( apply_state: &ApplyState, @@ -624,6 +635,221 @@ pub(crate) fn action_add_key( Ok(()) } +#[cfg(feature = "protocol_feature_nep366_delegate_action")] +pub(crate) fn apply_delegate_action( + state_update: &mut TrieUpdate, + apply_state: &ApplyState, + action_receipt: &ActionReceipt, + sender_id: &AccountId, + signed_delegate_action: &SignedDelegateAction, + result: &mut ActionResult, +) -> Result<(), RuntimeError> { + let delegate_action = &signed_delegate_action.delegate_action; + + if !signed_delegate_action.verify() { + result.result = Err(ActionErrorKind::DelegateActionInvalidSignature.into()); + return Ok(()); + } + if apply_state.block_height > delegate_action.max_block_height { + result.result = Err(ActionErrorKind::DelegateActionExpired.into()); + return Ok(()); + } + if delegate_action.sender_id.as_str() != sender_id.as_str() { + result.result = Err(ActionErrorKind::DelegateActionSenderDoesNotMatchTxReceiver { + sender_id: delegate_action.sender_id.clone(), + receiver_id: sender_id.clone(), + } + .into()); + return Ok(()); + } + + validate_delegate_action_key(state_update, apply_state, delegate_action, result)?; + if result.result.is_err() { + // Validation failed. Need to return Ok() because this is not a runtime error. + // "result.result" will be return to the User as the action execution result. + return Ok(()); + } + + // Generate a new receipt from DelegateAction. + let new_receipt = Receipt { + predecessor_id: sender_id.clone(), + receiver_id: delegate_action.receiver_id.clone(), + receipt_id: CryptoHash::default(), + + receipt: ReceiptEnum::Action(ActionReceipt { + signer_id: action_receipt.signer_id.clone(), + signer_public_key: action_receipt.signer_public_key.clone(), + gas_price: action_receipt.gas_price, + output_data_receivers: vec![], + input_data_ids: vec![], + actions: delegate_action.get_actions(), + }), + }; + + // Note, Relayer prepaid all fees and all things required by actions: attached deposits and attached gas. + // If something goes wrong, deposit is refunded to the predecessor, this is sender_id/Sender in DelegateAction. + // Gas is refunded to the signer, this is Relayer. + // Some contracts refund the deposit. Usually they refund the deposit to the predecessor and this is sender_id/Sender from DelegateAction. + // Therefore Relayer should verify DelegateAction before submitting it because it spends the attached deposit. + + let prepaid_send_fees = total_prepaid_send_fees( + &apply_state.config.fees, + &action_receipt.actions, + apply_state.current_protocol_version, + )?; + let required_gas = receipt_required_gas(apply_state, &new_receipt)?; + // This gas will be burnt by the receiver of the created receipt, + result.gas_used = safe_add_gas(result.gas_used, required_gas)?; + // This gas was prepaid on Relayer shard. Need to burn it because the receipt is going to be sent. + // gas_used is incremented because otherwise the gas will be refunded. Refund function checks only gas_used. + result.gas_used = safe_add_gas(result.gas_used, prepaid_send_fees)?; + result.gas_burnt = safe_add_gas(result.gas_burnt, prepaid_send_fees)?; + result.new_receipts.push(new_receipt); + + Ok(()) +} + +/// Returns Gas amount is required to execute Receipt and all actions it contains +#[cfg(feature = "protocol_feature_nep366_delegate_action")] +fn receipt_required_gas(apply_state: &ApplyState, receipt: &Receipt) -> Result { + Ok(match &receipt.receipt { + ReceiptEnum::Action(action_receipt) => { + let mut required_gas = safe_add_gas( + total_prepaid_exec_fees( + &apply_state.config.fees, + &action_receipt.actions, + &receipt.receiver_id, + apply_state.current_protocol_version, + )?, + total_prepaid_gas(&action_receipt.actions)?, + )?; + required_gas = safe_add_gas( + required_gas, + apply_state.config.fees.fee(ActionCosts::new_action_receipt).exec_fee(), + )?; + + required_gas + } + ReceiptEnum::Data(_) => 0, + }) +} + +/// Validate access key which was used for signing DelegateAction: +/// +/// - Checks whether the access key is present fo given public_key and sender_id. +/// - Validates nonce and updates it if it's ok. +/// - Validates access key permissions. +#[cfg(feature = "protocol_feature_nep366_delegate_action")] +fn validate_delegate_action_key( + state_update: &mut TrieUpdate, + apply_state: &ApplyState, + delegate_action: &DelegateAction, + result: &mut ActionResult, +) -> Result<(), RuntimeError> { + // 'delegate_action.sender_id' account existence must be checked by a caller + let mut access_key = match get_access_key( + state_update, + &delegate_action.sender_id, + &delegate_action.public_key, + )? { + Some(access_key) => access_key, + None => { + result.result = Err(ActionErrorKind::DelegateActionAccessKeyError( + InvalidAccessKeyError::AccessKeyNotFound { + account_id: delegate_action.sender_id.clone(), + public_key: delegate_action.public_key.clone(), + }, + ) + .into()); + return Ok(()); + } + }; + + if delegate_action.nonce <= access_key.nonce { + result.result = Err(ActionErrorKind::DelegateActionInvalidNonce { + delegate_nonce: delegate_action.nonce, + ak_nonce: access_key.nonce, + } + .into()); + return Ok(()); + } + + let upper_bound = apply_state.block_height + * near_primitives::account::AccessKey::ACCESS_KEY_NONCE_RANGE_MULTIPLIER; + if delegate_action.nonce >= upper_bound { + result.result = Err(ActionErrorKind::DelegateActionNonceTooLarge { + delegate_nonce: delegate_action.nonce, + upper_bound, + } + .into()); + return Ok(()); + } + + access_key.nonce = delegate_action.nonce; + + let actions = delegate_action.get_actions(); + + // The restriction of "function call" access keys: + // the transaction must contain the only `FunctionCall` if "function call" access key is used + if let AccessKeyPermission::FunctionCall(ref function_call_permission) = access_key.permission { + if actions.len() != 1 { + result.result = Err(ActionErrorKind::DelegateActionAccessKeyError( + InvalidAccessKeyError::RequiresFullAccess, + ) + .into()); + return Ok(()); + } + if let Some(Action::FunctionCall(ref function_call)) = actions.get(0) { + if function_call.deposit > 0 { + result.result = Err(ActionErrorKind::DelegateActionAccessKeyError( + InvalidAccessKeyError::DepositWithFunctionCall, + ) + .into()); + } + if delegate_action.receiver_id.as_ref() != function_call_permission.receiver_id { + result.result = Err(ActionErrorKind::DelegateActionAccessKeyError( + InvalidAccessKeyError::ReceiverMismatch { + tx_receiver: delegate_action.receiver_id.clone(), + ak_receiver: function_call_permission.receiver_id.clone(), + }, + ) + .into()); + return Ok(()); + } + if !function_call_permission.method_names.is_empty() + && function_call_permission + .method_names + .iter() + .all(|method_name| &function_call.method_name != method_name) + { + result.result = Err(ActionErrorKind::DelegateActionAccessKeyError( + InvalidAccessKeyError::MethodNameMismatch { + method_name: function_call.method_name.clone(), + }, + ) + .into()); + return Ok(()); + } + } else { + // There should Action::FunctionCall when "function call" permission is used + result.result = Err(ActionErrorKind::DelegateActionAccessKeyError( + InvalidAccessKeyError::RequiresFullAccess, + ) + .into()); + return Ok(()); + } + }; + + set_access_key( + state_update, + delegate_action.sender_id.clone(), + delegate_action.public_key.clone(), + &access_key, + ); + + Ok(()) +} + pub(crate) fn check_actor_permissions( action: &Action, account: &Option, @@ -657,6 +883,8 @@ pub(crate) fn check_actor_permissions( } } Action::CreateAccount(_) | Action::FunctionCall(_) | Action::Transfer(_) => (), + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + Action::Delegate(_) => (), }; Ok(()) } @@ -731,12 +959,22 @@ pub(crate) fn check_account_existence( .into()); } } + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + Action::Delegate(_) => { + if account.is_none() { + return Err(ActionErrorKind::AccountDoesNotExist { + account_id: account_id.clone(), + } + .into()); + } + } }; Ok(()) } #[cfg(test)] mod tests { + use near_primitives::hash::hash; use near_primitives::trie_key::TrieKey; use near_store::test_utils::create_tries; @@ -744,6 +982,21 @@ mod tests { use super::*; use crate::near_primitives::shard_layout::ShardUId; + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + use near_primitives::account::FunctionCallPermission; + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + use near_primitives::errors::InvalidAccessKeyError; + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + use near_primitives::runtime::migration_data::MigrationFlags; + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + use near_primitives::transaction::{CreateAccountAction, NonDelegateAction}; + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + use near_primitives::types::{EpochId, StateChangeCause}; + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + use near_store::set_account; + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + use std::sync::Arc; + fn test_action_create_account( account_id: AccountId, predecessor_id: AccountId, @@ -921,4 +1174,602 @@ mod tests { }) ); } + + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn create_delegate_action_receipt() -> (ActionReceipt, SignedDelegateAction) { + let signed_delegate_action = SignedDelegateAction { + delegate_action: DelegateAction { + sender_id: "bob.test.near".parse().unwrap(), + receiver_id: "token.test.near".parse().unwrap(), + actions: vec![ + NonDelegateAction( + Action::FunctionCall( + FunctionCallAction { + method_name: "ft_transfer".parse().unwrap(), + args: vec![123, 34, 114, 101, 99, 101, 105, 118, 101, 114, 95, 105, 100, 34, 58, 34, 106, 97, 110, 101, 46, 116, 101, 115, 116, 46, 110, 101, 97, 114, 34, 44, 34, 97, 109, 111, 117, 110, 116, 34, 58, 34, 52, 34, 125], + gas: 30000000000000, + deposit: 1, + } + ) + ) + ], + nonce: 19000001, + max_block_height: 57, + public_key: "ed25519:HaYUbyeiNRnyHtQceRgT3gyMBigZFEW9EYYU1KTHtdR1".parse::().unwrap(), + }, + signature: "ed25519:2b1NHmrj7LVgA5H9aDtQmd6JgZqy4nPAYHtNQc88PiEY3xMjpkKMDN1wVWZaXMGx9tjWbXzp4jXSCyTPqUfPdRUB".parse().unwrap() + }; + + let action_receipt = ActionReceipt { + signer_id: "alice.test.near".parse().unwrap(), + signer_public_key: PublicKey::empty(near_crypto::KeyType::ED25519), + gas_price: 1, + output_data_receivers: Vec::new(), + input_data_ids: Vec::new(), + actions: vec![Action::Delegate(signed_delegate_action.clone())], + }; + + (action_receipt, signed_delegate_action) + } + + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn create_apply_state(block_height: BlockHeight) -> ApplyState { + ApplyState { + block_height, + prev_block_hash: CryptoHash::default(), + block_hash: CryptoHash::default(), + epoch_id: EpochId::default(), + epoch_height: 3, + gas_price: 2, + block_timestamp: 1, + gas_limit: None, + random_seed: CryptoHash::default(), + current_protocol_version: 1, + config: Arc::new(RuntimeConfig::test()), + cache: None, + is_new_chunk: false, + migration_data: Arc::default(), + migration_flags: MigrationFlags::default(), + } + } + + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn setup_account( + account_id: &AccountId, + public_key: &PublicKey, + access_key: &AccessKey, + ) -> TrieUpdate { + let tries = create_tries(); + let mut state_update = + tries.new_trie_update(ShardUId::single_shard(), CryptoHash::default()); + let account = Account::new(100, 0, CryptoHash::default(), 100); + set_account(&mut state_update, account_id.clone(), &account); + set_access_key(&mut state_update, account_id.clone(), public_key.clone(), access_key); + + state_update.commit(StateChangeCause::InitialState); + let trie_changes = state_update.finalize().unwrap().0; + let mut store_update = tries.store_update(); + let root = tries.apply_all(&trie_changes, ShardUId::single_shard(), &mut store_update); + store_update.commit().unwrap(); + + tries.new_trie_update(ShardUId::single_shard(), root) + } + + #[test] + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_delegate_action() { + let mut result = ActionResult::default(); + let (action_receipt, signed_delegate_action) = create_delegate_action_receipt(); + let sender_id = signed_delegate_action.delegate_action.sender_id.clone(); + let sender_pub_key = signed_delegate_action.delegate_action.public_key.clone(); + let access_key = AccessKey { nonce: 19000000, permission: AccessKeyPermission::FullAccess }; + + let apply_state = + create_apply_state(signed_delegate_action.delegate_action.max_block_height); + let mut state_update = setup_account(&sender_id, &sender_pub_key, &access_key); + + apply_delegate_action( + &mut state_update, + &apply_state, + &action_receipt, + &sender_id, + &signed_delegate_action, + &mut result, + ) + .expect("Expect ok"); + + assert!(result.result.is_ok(), "Result error: {:?}", result.result.err()); + assert_eq!( + result.new_receipts, + vec![Receipt { + predecessor_id: sender_id.clone(), + receiver_id: signed_delegate_action.delegate_action.receiver_id.clone(), + receipt_id: CryptoHash::default(), + receipt: ReceiptEnum::Action(ActionReceipt { + signer_id: action_receipt.signer_id.clone(), + signer_public_key: action_receipt.signer_public_key.clone(), + gas_price: action_receipt.gas_price, + output_data_receivers: Vec::new(), + input_data_ids: Vec::new(), + actions: signed_delegate_action.delegate_action.get_actions(), + }) + }] + ); + } + + #[test] + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_delegate_action_signature_verification() { + let mut result = ActionResult::default(); + let (action_receipt, mut signed_delegate_action) = create_delegate_action_receipt(); + let sender_id = signed_delegate_action.delegate_action.sender_id.clone(); + let sender_pub_key = signed_delegate_action.delegate_action.public_key.clone(); + let access_key = AccessKey { nonce: 19000000, permission: AccessKeyPermission::FullAccess }; + + let apply_state = + create_apply_state(signed_delegate_action.delegate_action.max_block_height); + let mut state_update = setup_account(&sender_id, &sender_pub_key, &access_key); + + // Corrupt receiver_id. Signature verifycation must fail. + signed_delegate_action.delegate_action.receiver_id = "www.test.near".parse().unwrap(); + + apply_delegate_action( + &mut state_update, + &apply_state, + &action_receipt, + &sender_id, + &signed_delegate_action, + &mut result, + ) + .expect("Expect ok"); + + assert_eq!(result.result, Err(ActionErrorKind::DelegateActionInvalidSignature.into())); + } + + #[test] + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_delegate_action_max_height() { + let mut result = ActionResult::default(); + let (action_receipt, signed_delegate_action) = create_delegate_action_receipt(); + let sender_id = signed_delegate_action.delegate_action.sender_id.clone(); + let sender_pub_key = signed_delegate_action.delegate_action.public_key.clone(); + let access_key = AccessKey { nonce: 19000000, permission: AccessKeyPermission::FullAccess }; + + // Setup current block as higher than max_block_height. Must fail. + let apply_state = + create_apply_state(signed_delegate_action.delegate_action.max_block_height + 1); + let mut state_update = setup_account(&sender_id, &sender_pub_key, &access_key); + + apply_delegate_action( + &mut state_update, + &apply_state, + &action_receipt, + &sender_id, + &signed_delegate_action, + &mut result, + ) + .expect("Expect ok"); + + assert_eq!(result.result, Err(ActionErrorKind::DelegateActionExpired.into())); + } + + #[test] + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_delegate_action_validate_sender_account() { + let mut result = ActionResult::default(); + let (action_receipt, signed_delegate_action) = create_delegate_action_receipt(); + let sender_id = signed_delegate_action.delegate_action.sender_id.clone(); + let sender_pub_key = signed_delegate_action.delegate_action.public_key.clone(); + let access_key = AccessKey { nonce: 19000000, permission: AccessKeyPermission::FullAccess }; + + let apply_state = + create_apply_state(signed_delegate_action.delegate_action.max_block_height); + let mut state_update = setup_account(&sender_id, &sender_pub_key, &access_key); + + // Use a different sender_id. Must fail. + apply_delegate_action( + &mut state_update, + &apply_state, + &action_receipt, + &"www.test.near".parse().unwrap(), + &signed_delegate_action, + &mut result, + ) + .expect("Expect ok"); + + assert_eq!( + result.result, + Err(ActionErrorKind::DelegateActionSenderDoesNotMatchTxReceiver { + sender_id: sender_id.clone(), + receiver_id: "www.test.near".parse().unwrap(), + } + .into()) + ); + + // Sender account doesn't exist. Must fail. + assert_eq!( + check_account_existence( + &Action::Delegate(signed_delegate_action), + &mut None, + &sender_id, + 1, + false, + false + ), + Err(ActionErrorKind::AccountDoesNotExist { account_id: sender_id.clone() }.into()) + ); + } + + #[test] + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_validate_delegate_action_key_update_nonce() { + let (_, signed_delegate_action) = create_delegate_action_receipt(); + let sender_id = signed_delegate_action.delegate_action.sender_id.clone(); + let sender_pub_key = signed_delegate_action.delegate_action.public_key.clone(); + let access_key = AccessKey { nonce: 19000000, permission: AccessKeyPermission::FullAccess }; + + let apply_state = + create_apply_state(signed_delegate_action.delegate_action.max_block_height); + let mut state_update = setup_account(&sender_id, &sender_pub_key, &access_key); + + // Everything is ok + let mut result = ActionResult::default(); + validate_delegate_action_key( + &mut state_update, + &apply_state, + &signed_delegate_action.delegate_action, + &mut result, + ) + .expect("Expect ok"); + assert!(result.result.is_ok(), "Result error: {:?}", result.result); + + // Must fail, Nonce had been updated by previous step. + result = ActionResult::default(); + validate_delegate_action_key( + &mut state_update, + &apply_state, + &signed_delegate_action.delegate_action, + &mut result, + ) + .expect("Expect ok"); + assert_eq!( + result.result, + Err(ActionErrorKind::DelegateActionInvalidNonce { + delegate_nonce: signed_delegate_action.delegate_action.nonce, + ak_nonce: signed_delegate_action.delegate_action.nonce, + } + .into()) + ); + + // Increment nonce. Must pass. + result = ActionResult::default(); + let mut delegate_action = signed_delegate_action.delegate_action.clone(); + delegate_action.nonce += 1; + validate_delegate_action_key( + &mut state_update, + &apply_state, + &delegate_action, + &mut result, + ) + .expect("Expect ok"); + assert!(result.result.is_ok(), "Result error: {:?}", result.result); + } + + #[test] + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_delegate_action_key_doesnt_exist() { + let mut result = ActionResult::default(); + let (_, signed_delegate_action) = create_delegate_action_receipt(); + let sender_id = signed_delegate_action.delegate_action.sender_id.clone(); + let sender_pub_key = signed_delegate_action.delegate_action.public_key.clone(); + let access_key = AccessKey { nonce: 19000000, permission: AccessKeyPermission::FullAccess }; + + let apply_state = + create_apply_state(signed_delegate_action.delegate_action.max_block_height); + let mut state_update = setup_account( + &sender_id, + &PublicKey::empty(near_crypto::KeyType::ED25519), + &access_key, + ); + + validate_delegate_action_key( + &mut state_update, + &apply_state, + &signed_delegate_action.delegate_action, + &mut result, + ) + .expect("Expect ok"); + assert_eq!( + result.result, + Err(ActionErrorKind::DelegateActionAccessKeyError( + InvalidAccessKeyError::AccessKeyNotFound { + account_id: sender_id.clone(), + public_key: sender_pub_key.clone(), + }, + ) + .into()) + ); + } + + #[test] + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_delegate_action_key_incorrect_nonce() { + let mut result = ActionResult::default(); + let (_, signed_delegate_action) = create_delegate_action_receipt(); + let sender_id = signed_delegate_action.delegate_action.sender_id.clone(); + let sender_pub_key = signed_delegate_action.delegate_action.public_key.clone(); + let access_key = AccessKey { + nonce: signed_delegate_action.delegate_action.nonce, + permission: AccessKeyPermission::FullAccess, + }; + + let apply_state = + create_apply_state(signed_delegate_action.delegate_action.max_block_height); + let mut state_update = setup_account(&sender_id, &sender_pub_key, &access_key); + + validate_delegate_action_key( + &mut state_update, + &apply_state, + &signed_delegate_action.delegate_action, + &mut result, + ) + .expect("Expect ok"); + assert_eq!( + result.result, + Err(ActionErrorKind::DelegateActionInvalidNonce { + delegate_nonce: signed_delegate_action.delegate_action.nonce, + ak_nonce: signed_delegate_action.delegate_action.nonce, + } + .into()) + ); + } + + #[test] + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_delegate_action_key_nonce_too_large() { + let mut result = ActionResult::default(); + let (_, signed_delegate_action) = create_delegate_action_receipt(); + let sender_id = signed_delegate_action.delegate_action.sender_id.clone(); + let sender_pub_key = signed_delegate_action.delegate_action.public_key.clone(); + let access_key = AccessKey { nonce: 19000000, permission: AccessKeyPermission::FullAccess }; + + let apply_state = create_apply_state(1); + let mut state_update = setup_account(&sender_id, &sender_pub_key, &access_key); + + validate_delegate_action_key( + &mut state_update, + &apply_state, + &signed_delegate_action.delegate_action, + &mut result, + ) + .expect("Expect ok"); + assert_eq!( + result.result, + Err(ActionErrorKind::DelegateActionNonceTooLarge { + delegate_nonce: signed_delegate_action.delegate_action.nonce, + upper_bound: 1000000, + } + .into()) + ); + } + + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_delegate_action_key_permissions( + access_key: &AccessKey, + delegate_action: &DelegateAction, + ) -> ActionResult { + let mut result = ActionResult::default(); + let sender_id = delegate_action.sender_id.clone(); + let sender_pub_key = delegate_action.public_key.clone(); + + let apply_state = create_apply_state(delegate_action.max_block_height); + let mut state_update = setup_account(&sender_id, &sender_pub_key, &access_key); + + validate_delegate_action_key( + &mut state_update, + &apply_state, + &delegate_action, + &mut result, + ) + .expect("Expect ok"); + + result + } + + #[test] + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_delegate_action_key_permissions_fncall() { + let (_, signed_delegate_action) = create_delegate_action_receipt(); + let access_key = AccessKey { + nonce: 19000000, + permission: AccessKeyPermission::FunctionCall(FunctionCallPermission { + allowance: None, + receiver_id: signed_delegate_action.delegate_action.receiver_id.to_string(), + method_names: vec!["test_method".parse().unwrap()], + }), + }; + + let mut delegate_action = signed_delegate_action.delegate_action.clone(); + delegate_action.actions = + vec![NonDelegateAction(Action::FunctionCall(FunctionCallAction { + args: Vec::new(), + deposit: 0, + gas: 300, + method_name: "test_method".parse().unwrap(), + }))]; + let result = test_delegate_action_key_permissions(&access_key, &delegate_action); + assert!(result.result.is_ok(), "Result error {:?}", result.result); + } + + #[test] + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_delegate_action_key_permissions_incorrect_action() { + let (_, signed_delegate_action) = create_delegate_action_receipt(); + let access_key = AccessKey { + nonce: 19000000, + permission: AccessKeyPermission::FunctionCall(FunctionCallPermission { + allowance: None, + receiver_id: signed_delegate_action.delegate_action.receiver_id.to_string(), + method_names: vec!["test_method".parse().unwrap()], + }), + }; + + let mut delegate_action = signed_delegate_action.delegate_action.clone(); + delegate_action.actions = + vec![NonDelegateAction(Action::CreateAccount(CreateAccountAction {}))]; + + let result = test_delegate_action_key_permissions(&access_key, &delegate_action); + + assert_eq!( + result.result, + Err(ActionErrorKind::DelegateActionAccessKeyError( + InvalidAccessKeyError::RequiresFullAccess, + ) + .into()) + ); + } + + #[test] + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_delegate_action_key_permissions_actions_number() { + let (_, signed_delegate_action) = create_delegate_action_receipt(); + let access_key = AccessKey { + nonce: 19000000, + permission: AccessKeyPermission::FunctionCall(FunctionCallPermission { + allowance: None, + receiver_id: signed_delegate_action.delegate_action.receiver_id.to_string(), + method_names: vec!["test_method".parse().unwrap()], + }), + }; + + let mut delegate_action = signed_delegate_action.delegate_action.clone(); + delegate_action.actions = vec![ + NonDelegateAction(Action::FunctionCall(FunctionCallAction { + args: Vec::new(), + deposit: 0, + gas: 300, + method_name: "test_method".parse().unwrap(), + })), + NonDelegateAction(Action::FunctionCall(FunctionCallAction { + args: Vec::new(), + deposit: 0, + gas: 300, + method_name: "test_method".parse().unwrap(), + })), + ]; + + let result = test_delegate_action_key_permissions(&access_key, &delegate_action); + + assert_eq!( + result.result, + Err(ActionErrorKind::DelegateActionAccessKeyError( + InvalidAccessKeyError::RequiresFullAccess, + ) + .into()) + ); + } + + #[test] + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_delegate_action_key_permissions_fncall_deposit() { + let (_, signed_delegate_action) = create_delegate_action_receipt(); + let access_key = AccessKey { + nonce: 19000000, + permission: AccessKeyPermission::FunctionCall(FunctionCallPermission { + allowance: None, + receiver_id: signed_delegate_action.delegate_action.receiver_id.to_string(), + method_names: Vec::new(), + }), + }; + + let mut delegate_action = signed_delegate_action.delegate_action.clone(); + delegate_action.actions = + vec![NonDelegateAction(Action::FunctionCall(FunctionCallAction { + args: Vec::new(), + deposit: 1, + gas: 300, + method_name: "test_method".parse().unwrap(), + }))]; + + let result = test_delegate_action_key_permissions(&access_key, &delegate_action); + + assert_eq!( + result.result, + Err(ActionErrorKind::DelegateActionAccessKeyError( + InvalidAccessKeyError::DepositWithFunctionCall, + ) + .into()) + ); + } + + #[test] + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_delegate_action_key_permissions_receiver_id() { + let (_, signed_delegate_action) = create_delegate_action_receipt(); + let access_key = AccessKey { + nonce: 19000000, + permission: AccessKeyPermission::FunctionCall(FunctionCallPermission { + allowance: None, + receiver_id: "another.near".parse().unwrap(), + method_names: Vec::new(), + }), + }; + + let mut delegate_action = signed_delegate_action.delegate_action.clone(); + delegate_action.actions = + vec![NonDelegateAction(Action::FunctionCall(FunctionCallAction { + args: Vec::new(), + deposit: 0, + gas: 300, + method_name: "test_method".parse().unwrap(), + }))]; + + let result = test_delegate_action_key_permissions(&access_key, &delegate_action); + + assert_eq!( + result.result, + Err(ActionErrorKind::DelegateActionAccessKeyError( + InvalidAccessKeyError::ReceiverMismatch { + tx_receiver: delegate_action.receiver_id.clone(), + ak_receiver: "another.near".parse().unwrap(), + }, + ) + .into()) + ); + } + + #[test] + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_delegate_action_key_permissions_method() { + let (_, signed_delegate_action) = create_delegate_action_receipt(); + let access_key = AccessKey { + nonce: 19000000, + permission: AccessKeyPermission::FunctionCall(FunctionCallPermission { + allowance: None, + receiver_id: signed_delegate_action.delegate_action.receiver_id.to_string(), + method_names: vec!["another_method".parse().unwrap()], + }), + }; + + let mut delegate_action = signed_delegate_action.delegate_action.clone(); + delegate_action.actions = + vec![NonDelegateAction(Action::FunctionCall(FunctionCallAction { + args: Vec::new(), + deposit: 0, + gas: 300, + method_name: "test_method".parse().unwrap(), + }))]; + + let result = test_delegate_action_key_permissions(&access_key, &delegate_action); + + assert_eq!( + result.result, + Err(ActionErrorKind::DelegateActionAccessKeyError( + InvalidAccessKeyError::MethodNameMismatch { + method_name: "test_method".parse().unwrap(), + }, + ) + .into()) + ); + } } diff --git a/runtime/runtime/src/balance_checker.rs b/runtime/runtime/src/balance_checker.rs index ca6feb59918..34cef10e967 100644 --- a/runtime/runtime/src/balance_checker.rs +++ b/runtime/runtime/src/balance_checker.rs @@ -2,7 +2,7 @@ use crate::safe_add_balance_apply; use crate::config::{ safe_add_balance, safe_add_gas, safe_gas_to_balance, total_deposit, total_prepaid_exec_fees, - total_prepaid_gas, + total_prepaid_gas, total_prepaid_send_fees, }; use crate::{ApplyStats, DelayedReceiptIndices, ValidatorAccountsUpdate}; use near_primitives::errors::{ @@ -55,6 +55,14 @@ fn receipt_cost( )?, )?; total_gas = safe_add_gas(total_gas, total_prepaid_gas(&action_receipt.actions)?)?; + total_gas = safe_add_gas( + total_gas, + total_prepaid_send_fees( + transaction_costs, + &action_receipt.actions, + current_protocol_version, + )?, + )?; let total_gas_cost = safe_gas_to_balance(action_receipt.gas_price, total_gas)?; total_cost = safe_add_balance(total_cost, total_gas_cost)?; } diff --git a/runtime/runtime/src/config.rs b/runtime/runtime/src/config.rs index a6e516e51c3..fbf655a46ff 100644 --- a/runtime/runtime/src/config.rs +++ b/runtime/runtime/src/config.rs @@ -8,7 +8,7 @@ use num_traits::pow::Pow; use near_primitives::account::AccessKeyPermission; use near_primitives::errors::IntegerOverflowError; // Just re-exporting RuntimeConfig for backwards compatibility. -pub use near_primitives::num_rational::Rational; +pub use near_primitives::num_rational::Rational32; pub use near_primitives::runtime::config::RuntimeConfig; use near_primitives::runtime::fees::{transfer_exec_fee, transfer_send_fee, RuntimeFeesConfig}; use near_primitives::transaction::{ @@ -37,7 +37,7 @@ pub struct TransactionCost { /// Multiplies `gas_price` by the power of `inflation_base` with exponent `inflation_exponent`. pub fn safe_gas_price_inflated( gas_price: Balance, - inflation_base: Rational, + inflation_base: Rational32, inflation_exponent: u8, ) -> Result { let numer = BigUint::from(*inflation_base.numer() as usize).pow(inflation_exponent as u32); @@ -125,12 +125,75 @@ pub fn total_send_fees( DeleteAccount(_) => { config.fee(ActionCosts::delete_account).send_fee(sender_is_receiver) } + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + Delegate(signed_delegate_action) => { + let delegate_cost = config.fee(ActionCosts::delegate).send_fee(sender_is_receiver); + let delegate_action = &signed_delegate_action.delegate_action; + + delegate_cost + + total_send_fees( + config, + sender_is_receiver, + &delegate_action.get_actions(), + &delegate_action.receiver_id, + current_protocol_version, + )? + } + }; + result = safe_add_gas(result, delta)?; + } + Ok(result) +} + +/// Total sum of gas that needs to be burnt to send the inner actions of DelegateAction +/// +/// This is only relevant for DelegateAction, where the send fees of the inner actions +/// need to be prepaid. All other actions burn send fees directly, so calling this function +/// with other actions will return 0. +#[cfg(feature = "protocol_feature_nep366_delegate_action")] +pub fn total_prepaid_send_fees( + config: &RuntimeFeesConfig, + actions: &[Action], + current_protocol_version: ProtocolVersion, +) -> Result { + let mut result = 0; + + for action in actions { + use Action::*; + let delta = match action { + Delegate(signed_delegate_action) => { + let delegate_action = &signed_delegate_action.delegate_action; + let sender_is_receiver = delegate_action.sender_id == delegate_action.receiver_id; + + total_send_fees( + config, + sender_is_receiver, + &delegate_action.get_actions(), + &delegate_action.receiver_id, + current_protocol_version, + )? + } + _ => 0, }; result = safe_add_gas(result, delta)?; } Ok(result) } +/// Total sum of gas that needs to be burnt to send the inner actions of DelegateAction +/// +/// This is only relevant for DelegateAction, where the send fees of the inner actions +/// need to be prepaid. All other actions burn send fees directly, so calling this function +/// with other actions will return 0. +#[cfg(not(feature = "protocol_feature_nep366_delegate_action"))] +pub fn total_prepaid_send_fees( + _config: &RuntimeFeesConfig, + _actions: &[Action], + _current_protocol_version: ProtocolVersion, +) -> Result { + Ok(0) +} + pub fn exec_fee( config: &RuntimeFeesConfig, action: &Action, @@ -176,6 +239,8 @@ pub fn exec_fee( }, DeleteKey(_) => config.fee(ActionCosts::delete_key).exec_fee(), DeleteAccount(_) => config.fee(ActionCosts::delete_account).exec_fee(), + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + Delegate(_) => config.fee(ActionCosts::delegate).exec_fee(), } } @@ -199,7 +264,10 @@ pub fn tx_cost( current_protocol_version, )?, )?; - let prepaid_gas = total_prepaid_gas(&transaction.actions)?; + let prepaid_gas = safe_add_gas( + total_prepaid_gas(&transaction.actions)?, + total_prepaid_send_fees(config, &transaction.actions, current_protocol_version)?, + )?; // If signer is equals to receiver the receipt will be processed at the same block as this // transaction. Otherwise it will processed in the next block and the gas might be inflated. let initial_receipt_hop = if transaction.signer_id == transaction.receiver_id { 0 } else { 1 }; @@ -246,7 +314,36 @@ pub fn total_prepaid_exec_fees( ) -> Result { let mut result = 0; for action in actions { - let delta = exec_fee(config, action, receiver_id, current_protocol_version); + #[cfg_attr(not(feature = "protocol_feature_nep366_delegate_action"), allow(unused_mut))] + let mut delta; + // In case of Action::Delegate it's needed to add Gas which is required for the inner actions. + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + if let Action::Delegate(signed_delegate_action) = action { + let actions = signed_delegate_action.delegate_action.get_actions(); + delta = total_prepaid_exec_fees( + config, + &actions, + &signed_delegate_action.delegate_action.receiver_id, + current_protocol_version, + )?; + delta = safe_add_gas( + delta, + exec_fee( + config, + action, + &signed_delegate_action.delegate_action.receiver_id, + current_protocol_version, + ), + )?; + delta = safe_add_gas(delta, config.fee(ActionCosts::new_action_receipt).exec_fee())?; + } else { + delta = exec_fee(config, action, receiver_id, current_protocol_version); + } + #[cfg(not(feature = "protocol_feature_nep366_delegate_action"))] + { + delta = exec_fee(config, action, receiver_id, current_protocol_version); + } + result = safe_add_gas(result, delta)?; } Ok(result) @@ -255,14 +352,46 @@ pub fn total_prepaid_exec_fees( pub fn total_deposit(actions: &[Action]) -> Result { let mut total_balance: Balance = 0; for action in actions { - total_balance = safe_add_balance(total_balance, action.get_deposit_balance())?; + let action_balance; + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + if let Action::Delegate(signed_delegate_action) = action { + // Note, here Relayer pays the deposit but if actions fail, the deposit is + // refunded to Sender of DelegateAction + let actions = signed_delegate_action.delegate_action.get_actions(); + action_balance = total_deposit(&actions)?; + } else { + action_balance = action.get_deposit_balance(); + } + #[cfg(not(feature = "protocol_feature_nep366_delegate_action"))] + { + action_balance = action.get_deposit_balance(); + } + + total_balance = safe_add_balance(total_balance, action_balance)?; } Ok(total_balance) } /// Get the total sum of prepaid gas for given actions. pub fn total_prepaid_gas(actions: &[Action]) -> Result { - actions.iter().try_fold(0, |acc, action| safe_add_gas(acc, action.get_prepaid_gas())) + let mut total_gas: Gas = 0; + for action in actions { + let action_gas; + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + if let Action::Delegate(signed_delegate_action) = action { + let actions = signed_delegate_action.delegate_action.get_actions(); + action_gas = total_prepaid_gas(&actions)?; + } else { + action_gas = action.get_prepaid_gas(); + } + #[cfg(not(feature = "protocol_feature_nep366_delegate_action"))] + { + action_gas = action.get_prepaid_gas(); + } + + total_gas = safe_add_gas(total_gas, action_gas)?; + } + Ok(total_gas) } #[cfg(test)] @@ -271,10 +400,10 @@ mod tests { #[test] fn test_safe_gas_price_inflated() { - assert_eq!(safe_gas_price_inflated(10000, Rational::new(101, 100), 1).unwrap(), 10100); - assert_eq!(safe_gas_price_inflated(10000, Rational::new(101, 100), 2).unwrap(), 10201); + assert_eq!(safe_gas_price_inflated(10000, Rational32::new(101, 100), 1).unwrap(), 10100); + assert_eq!(safe_gas_price_inflated(10000, Rational32::new(101, 100), 2).unwrap(), 10201); // Rounded up - assert_eq!(safe_gas_price_inflated(10000, Rational::new(101, 100), 3).unwrap(), 10304); - assert_eq!(safe_gas_price_inflated(10000, Rational::new(101, 100), 32).unwrap(), 13750); + assert_eq!(safe_gas_price_inflated(10000, Rational32::new(101, 100), 3).unwrap(), 10304); + assert_eq!(safe_gas_price_inflated(10000, Rational32::new(101, 100), 32).unwrap(), 13750); } } diff --git a/runtime/runtime/src/lib.rs b/runtime/runtime/src/lib.rs index dd96e9bd2f6..281ca1f097d 100644 --- a/runtime/runtime/src/lib.rs +++ b/runtime/runtime/src/lib.rs @@ -3,6 +3,7 @@ use std::collections::{HashMap, HashSet}; use std::rc::Rc; use std::sync::Arc; +use config::total_prepaid_send_fees; use tracing::debug; use near_chain_configs::Genesis; @@ -442,6 +443,17 @@ impl Runtime { apply_state.current_protocol_version, )?; } + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + Action::Delegate(signed_delegate_action) => { + apply_delegate_action( + state_update, + apply_state, + action_receipt, + account_id, + signed_delegate_action, + &mut result, + )?; + } }; Ok(result) } @@ -758,7 +770,14 @@ impl Runtime { transaction_costs: &RuntimeFeesConfig, ) -> Result { let total_deposit = total_deposit(&action_receipt.actions)?; - let prepaid_gas = total_prepaid_gas(&action_receipt.actions)?; + let prepaid_gas = safe_add_gas( + total_prepaid_gas(&action_receipt.actions)?, + total_prepaid_send_fees( + transaction_costs, + &action_receipt.actions, + current_protocol_version, + )?, + )?; let prepaid_exec_gas = safe_add_gas( total_prepaid_exec_fees( transaction_costs, @@ -803,6 +822,7 @@ impl Runtime { )?, )?; } + if deposit_refund > 0 { result .new_receipts diff --git a/runtime/runtime/src/verifier.rs b/runtime/runtime/src/verifier.rs index b17c11a1790..79a1fac1410 100644 --- a/runtime/runtime/src/verifier.rs +++ b/runtime/runtime/src/verifier.rs @@ -131,6 +131,9 @@ fn is_zero_balance_account( Ok(true) } +#[cfg(feature = "protocol_feature_nep366_delegate_action")] +use near_primitives::transaction::SignedDelegateAction; + /// Validates the transaction without using the state. It allows any node to validate a /// transaction before forwarding it to the node that tracks the `signer_id` account. pub fn validate_transaction( @@ -380,6 +383,7 @@ fn validate_data_receipt( /// /// - Checks limits if applicable. /// - Checks that the total number of actions doesn't exceed the limit. +/// - Checks that there not other action if Action::Delegate is present. /// - Validates each individual action. /// - Checks that the total prepaid gas doesn't exceed the limit. pub(crate) fn validate_actions( @@ -393,12 +397,22 @@ pub(crate) fn validate_actions( }); } + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + let mut found_delegate_action = false; let mut iter = actions.iter().peekable(); while let Some(action) = iter.next() { if let Action::DeleteAccount(_) = action { if iter.peek().is_some() { return Err(ActionsValidationError::DeleteActionMustBeFinal); } + } else { + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + if let Action::Delegate(_) = action { + if found_delegate_action { + return Err(ActionsValidationError::DelegateActionMustBeOnlyOne); + } + found_delegate_action = true; + } } validate_action(limit_config, action)?; } @@ -429,9 +443,21 @@ pub fn validate_action( Action::AddKey(a) => validate_add_key_action(limit_config, a), Action::DeleteKey(_) => Ok(()), Action::DeleteAccount(_) => Ok(()), + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + Action::Delegate(a) => validate_delegate_action(limit_config, a), } } +#[cfg(feature = "protocol_feature_nep366_delegate_action")] +fn validate_delegate_action( + limit_config: &VMLimitConfig, + signed_delegate_action: &SignedDelegateAction, +) -> Result<(), ActionsValidationError> { + let actions = signed_delegate_action.delegate_action.get_actions(); + validate_actions(limit_config, &actions)?; + Ok(()) +} + /// Validates `DeployContractAction`. Checks that the given contract size doesn't exceed the limit. fn validate_deploy_contract_action( limit_config: &VMLimitConfig, @@ -578,6 +604,11 @@ mod tests { use crate::near_primitives::trie_key::TrieKey; use near_store::{set, set_code}; + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + use near_crypto::Signature; + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + use near_primitives::transaction::{DelegateAction, NonDelegateAction}; + /// Initial balance used in tests. const TESTING_INIT_BALANCE: Balance = 1_000_000_000 * NEAR_BASE; @@ -1817,4 +1848,47 @@ mod tests { ) .expect("valid action"); } + + #[test] + #[cfg(feature = "protocol_feature_nep366_delegate_action")] + fn test_delegate_action_must_be_only_one() { + let signed_delegate_action = SignedDelegateAction { + delegate_action: DelegateAction { + sender_id: "bob.test.near".parse().unwrap(), + receiver_id: "token.test.near".parse().unwrap(), + actions: vec![NonDelegateAction(Action::CreateAccount(CreateAccountAction {}))], + nonce: 19000001, + max_block_height: 57, + public_key: PublicKey::empty(KeyType::ED25519), + }, + signature: Signature::default(), + }; + assert_eq!( + validate_actions( + &VMLimitConfig::test(), + &[ + Action::Delegate(signed_delegate_action.clone()), + Action::Delegate(signed_delegate_action.clone()), + ] + ), + Err(ActionsValidationError::DelegateActionMustBeOnlyOne), + ); + assert_eq!( + validate_actions( + &&VMLimitConfig::test(), + &[Action::Delegate(signed_delegate_action.clone()),] + ), + Ok(()), + ); + assert_eq!( + validate_actions( + &VMLimitConfig::test(), + &[ + Action::CreateAccount(CreateAccountAction {}), + Action::Delegate(signed_delegate_action.clone()), + ] + ), + Ok(()), + ); + } } diff --git a/runtime/runtime/tests/runtime_group_tools/random_config.rs b/runtime/runtime/tests/runtime_group_tools/random_config.rs index 7b35087111c..e40268f3687 100644 --- a/runtime/runtime/tests/runtime_group_tools/random_config.rs +++ b/runtime/runtime/tests/runtime_group_tools/random_config.rs @@ -1,4 +1,4 @@ -use near_primitives::num_rational::Rational; +use near_primitives::num_rational::Rational32; use near_primitives::runtime::config::RuntimeConfig; use near_primitives::runtime::fees::{Fee, RuntimeFeesConfig, StorageUsageConfig}; use rand::{thread_rng, RngCore}; @@ -20,8 +20,8 @@ pub fn random_config() -> RuntimeConfig { num_extra_bytes_record: rng.next_u64() % 10000, storage_amount_per_byte: rng.next_u64() as u128, }, - burnt_gas_reward: Rational::new((rng.next_u32() % 100).try_into().unwrap(), 100), - pessimistic_gas_price_inflation_ratio: Rational::new( + burnt_gas_reward: Rational32::new((rng.next_u32() % 100).try_into().unwrap(), 100), + pessimistic_gas_price_inflation_ratio: Rational32::new( (101 + rng.next_u32() % 10).try_into().unwrap(), 100, ), diff --git a/test-utils/store-validator/src/main.rs b/test-utils/store-validator/src/main.rs index 9acfb61590c..e0f74a09576 100644 --- a/test-utils/store-validator/src/main.rs +++ b/test-utils/store-validator/src/main.rs @@ -6,7 +6,7 @@ use ansi_term::Color::{Green, Red, White, Yellow}; use clap::{Arg, Command}; use near_chain::store_validator::StoreValidator; -use near_chain::RuntimeAdapter; +use near_chain::RuntimeWithEpochManagerAdapter; use near_chain_configs::GenesisValidationMode; use near_o11y::testonly::init_integration_logger; use nearcore::{get_default_home, load_config}; @@ -34,7 +34,7 @@ fn main() { .open() .unwrap() .get_store(near_store::Temperature::Hot); - let runtime_adapter: Arc = + let runtime_adapter: Arc = Arc::new(nearcore::NightshadeRuntime::from_config(home_dir, store.clone(), &near_config)); let mut store_validator = StoreValidator::new( diff --git a/tools/cold-store/Cargo.toml b/tools/cold-store/Cargo.toml index 23d3a8a1e75..618f4b82bba 100644 --- a/tools/cold-store/Cargo.toml +++ b/tools/cold-store/Cargo.toml @@ -13,9 +13,3 @@ near-chain-configs = { path = "../../core/chain-configs"} near-epoch-manager = { path = "../../chain/epoch-manager" } near-primitives = { path = "../../core/primitives" } near-store = { path = "../../core/store"} - -[features] -cold_store = [ - "near-store/cold_store", - "nearcore/cold_store", -] \ No newline at end of file diff --git a/tools/cold-store/src/cli.rs b/tools/cold-store/src/cli.rs index dc438b8caef..0d1f82b7078 100644 --- a/tools/cold-store/src/cli.rs +++ b/tools/cold-store/src/cli.rs @@ -2,10 +2,11 @@ use near_epoch_manager::EpochManagerAdapter; use near_primitives::block::Tip; use near_primitives::hash::CryptoHash; use near_store::cold_storage::{update_cold_db, update_cold_head}; -use near_store::{DBCol, NodeStorage, Temperature, FINAL_HEAD_KEY, HEAD_KEY}; +use near_store::{DBCol, NodeStorage, Temperature, COLD_HEAD_KEY, FINAL_HEAD_KEY, HEAD_KEY}; use nearcore::{NearConfig, NightshadeRuntime}; use clap::Parser; +use std::io::Result; use std::path::Path; use std::sync::Arc; @@ -41,14 +42,11 @@ impl ColdStoreCommand { ); let store = opener.open().unwrap_or_else(|e| panic!("Error opening storage: {:#}", e)); - let hot_runtime = Arc::new(NightshadeRuntime::from_config( - home_dir, - store.get_store(Temperature::Hot), - &near_config, - )); + let hot_runtime = + Arc::new(NightshadeRuntime::from_config(home_dir, store.get_hot_store(), &near_config)); match self.subcmd { SubCommand::Open => check_open(&store), - SubCommand::Head => print_heads(&store), + SubCommand::Head => print_heads(&store).unwrap(), SubCommand::CopyNextBlocks(cmd) => { for _ in 0..cmd.number_of_blocks { copy_next_block(&store, &near_config, &hot_runtime); @@ -68,19 +66,30 @@ fn check_open(store: &NodeStorage) { assert!(store.has_cold()); } -fn print_heads(store: &NodeStorage) { - println!( - "HOT HEAD is at {:#?}", - store.get_store(Temperature::Hot).get_ser::(DBCol::BlockMisc, HEAD_KEY) - ); - println!( - "HOT FINAL HEAD is at {:#?}", - store.get_store(Temperature::Hot).get_ser::(DBCol::BlockMisc, FINAL_HEAD_KEY) - ); - println!( - "COLD HEAD is at {:#?}", - store.get_store(Temperature::Cold).get_ser::(DBCol::BlockMisc, HEAD_KEY) - ); +fn print_heads(store: &NodeStorage) -> Result<()> { + let hot_store = store.get_hot_store(); + let cold_store = store.get_cold_store(); + + // hot store + { + let kind = hot_store.get_db_kind()?; + let head = hot_store.get_ser::(DBCol::BlockMisc, HEAD_KEY)?; + let final_head = hot_store.get_ser::(DBCol::BlockMisc, FINAL_HEAD_KEY)?; + let cold_head = hot_store.get_ser::(DBCol::BlockMisc, COLD_HEAD_KEY)?; + println!("HOT STORE KIND is {:#?}", kind); + println!("HOT STORE HEAD is at {:#?}", head); + println!("HOT STORE FINAL_HEAD is at {:#?}", final_head); + println!("HOT STORE COLD_HEAD is at {:#?}", cold_head); + } + + // cold store + if let Some(cold_store) = cold_store { + let kind = cold_store.get_db_kind()?; + let head_in_cold = cold_store.get_ser::(DBCol::BlockMisc, HEAD_KEY)?; + println!("COLD STORE KIND is {:#?}", kind); + println!("COLD STORE HEAD is at {:#?}", head_in_cold); + } + Ok(()) } fn copy_next_block(store: &NodeStorage, config: &NearConfig, hot_runtime: &Arc) { diff --git a/tools/cold-store/src/lib.rs b/tools/cold-store/src/lib.rs index ea746796b94..7d392b8dbe7 100644 --- a/tools/cold-store/src/lib.rs +++ b/tools/cold-store/src/lib.rs @@ -1,4 +1,2 @@ -#[cfg(feature = "cold_store")] pub mod cli; -#[cfg(feature = "cold_store")] pub use cli::ColdStoreCommand; diff --git a/tools/mirror/src/offline.rs b/tools/mirror/src/offline.rs index b805314e9ec..05b9466bb4d 100644 --- a/tools/mirror/src/offline.rs +++ b/tools/mirror/src/offline.rs @@ -1,7 +1,8 @@ use crate::{ChainError, SourceBlock}; use anyhow::Context; use async_trait::async_trait; -use near_chain::{ChainStore, ChainStoreAccess, RuntimeAdapter}; +use near_chain::types::RuntimeAdapter; +use near_chain::{ChainStore, ChainStoreAccess}; use near_chain_configs::GenesisValidationMode; use near_crypto::PublicKey; use near_epoch_manager::EpochManagerAdapter; diff --git a/tools/mock-node/src/lib.rs b/tools/mock-node/src/lib.rs index 693e2c6f9d1..e22d6402b9b 100644 --- a/tools/mock-node/src/lib.rs +++ b/tools/mock-node/src/lib.rs @@ -525,7 +525,7 @@ impl ChainHistoryAccess { mod test { use crate::ChainHistoryAccess; use near_chain::ChainGenesis; - use near_chain::{Chain, RuntimeAdapter}; + use near_chain::{Chain, RuntimeWithEpochManagerAdapter}; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; use near_network::types::PartialEncodedChunkRequestMsg; @@ -544,7 +544,7 @@ mod test { Path::new("../../../.."), create_test_store(), &genesis, - )) as Arc]; + )) as Arc]; let mut env = TestEnv::builder(chain_genesis.clone()) .validator_seats(1) .runtime_adapters(runtimes.clone()) diff --git a/tools/mock-node/src/setup.rs b/tools/mock-node/src/setup.rs index f95fbd1a3a3..dbba86b6438 100644 --- a/tools/mock-node/src/setup.rs +++ b/tools/mock-node/src/setup.rs @@ -3,10 +3,9 @@ use crate::{MockNetworkConfig, MockPeerManagerActor}; use actix::{Actor, Addr, Arbiter}; use anyhow::Context; +use near_chain::types::RuntimeAdapter; use near_chain::ChainStoreUpdate; -use near_chain::{ - Chain, ChainGenesis, ChainStore, ChainStoreAccess, DoomslugThresholdMode, RuntimeAdapter, -}; +use near_chain::{Chain, ChainGenesis, ChainStore, ChainStoreAccess, DoomslugThresholdMode}; use near_chain_configs::GenesisConfig; use near_client::{start_client, start_view_client, ClientActor, ViewClientActor}; use near_epoch_manager::{EpochManager, EpochManagerAdapter}; @@ -246,6 +245,7 @@ pub fn setup_mock_node( telemetry, None, adv.clone(), + None, ); let view_client = start_view_client( diff --git a/tools/state-viewer/Cargo.toml b/tools/state-viewer/Cargo.toml index f91d2398613..046e2be170d 100644 --- a/tools/state-viewer/Cargo.toml +++ b/tools/state-viewer/Cargo.toml @@ -14,9 +14,11 @@ once_cell.workspace = true rand.workspace = true rayon.workspace = true redis.workspace = true +rust-s3.workspace = true serde.workspace = true serde_json.workspace = true tempfile.workspace = true +thiserror.workspace = true tracing.workspace = true near-chain = { path = "../../chain/chain" } @@ -47,4 +49,3 @@ nightly = [ ] nightly_protocol = ["nearcore/nightly_protocol"] protocol_feature_flat_state = ["nearcore/protocol_feature_flat_state"] -cold_store = ["near-store/cold_store"] diff --git a/tools/state-viewer/src/apply_chain_range.rs b/tools/state-viewer/src/apply_chain_range.rs index 16c65dad813..9ed3ca26b20 100644 --- a/tools/state-viewer/src/apply_chain_range.rs +++ b/tools/state-viewer/src/apply_chain_range.rs @@ -8,7 +8,7 @@ use rayon::iter::{IntoParallelIterator, ParallelIterator}; use near_chain::chain::collect_receipts_from_response; use near_chain::migrations::check_if_block_is_first_with_chunk_of_version; use near_chain::types::ApplyTransactionResult; -use near_chain::{ChainStore, ChainStoreAccess, ChainStoreUpdate, RuntimeAdapter}; +use near_chain::{ChainStore, ChainStoreAccess, ChainStoreUpdate, RuntimeWithEpochManagerAdapter}; use near_chain_configs::Genesis; use near_primitives::borsh::maybestd::sync::Arc; use near_primitives::hash::CryptoHash; @@ -115,7 +115,7 @@ fn apply_block_from_range( shard_id: ShardId, store: Store, genesis: &Genesis, - runtime_adapter: Arc, + runtime_adapter: Arc, progress_reporter: &ProgressReporter, verbose_output: bool, csv_file_mutex: &Mutex>, @@ -351,7 +351,7 @@ pub fn apply_chain_range( only_contracts, sequential) .entered(); - let runtime_adapter: Arc = Arc::new(runtime); + let runtime_adapter: Arc = Arc::new(runtime); let chain_store = ChainStore::new(store.clone(), genesis.config.genesis_height, false); let end_height = end_height.unwrap_or_else(|| chain_store.head().unwrap().height); let start_height = start_height.unwrap_or_else(|| chain_store.tail().unwrap()); diff --git a/tools/state-viewer/src/apply_chunk.rs b/tools/state-viewer/src/apply_chunk.rs index ed518ec38b6..a3b5eb440ab 100644 --- a/tools/state-viewer/src/apply_chunk.rs +++ b/tools/state-viewer/src/apply_chunk.rs @@ -3,7 +3,7 @@ use borsh::BorshDeserialize; use near_chain::chain::collect_receipts_from_response; use near_chain::migrations::check_if_block_is_first_with_chunk_of_version; use near_chain::types::ApplyTransactionResult; -use near_chain::{ChainStore, ChainStoreAccess, RuntimeAdapter}; +use near_chain::{ChainStore, ChainStoreAccess, RuntimeWithEpochManagerAdapter}; use near_primitives::hash::CryptoHash; use near_primitives::merkle::combine_hash; use near_primitives::receipt::Receipt; @@ -72,7 +72,7 @@ fn get_incoming_receipts( // returns (apply_result, gas limit) pub(crate) fn apply_chunk( - runtime: &dyn RuntimeAdapter, + runtime: &dyn RuntimeWithEpochManagerAdapter, chain_store: &mut ChainStore, chunk_hash: ChunkHash, target_height: Option, @@ -149,7 +149,7 @@ enum HashType { fn find_tx_or_receipt( hash: &CryptoHash, block_hash: &CryptoHash, - runtime: &dyn RuntimeAdapter, + runtime: &dyn RuntimeWithEpochManagerAdapter, chain_store: &mut ChainStore, ) -> anyhow::Result> { let block = chain_store.get_block(&block_hash)?; @@ -176,7 +176,7 @@ fn find_tx_or_receipt( } fn apply_tx_in_block( - runtime: &dyn RuntimeAdapter, + runtime: &dyn RuntimeWithEpochManagerAdapter, chain_store: &mut ChainStore, tx_hash: &CryptoHash, block_hash: CryptoHash, @@ -203,7 +203,7 @@ fn apply_tx_in_block( } fn apply_tx_in_chunk( - runtime: &dyn RuntimeAdapter, + runtime: &dyn RuntimeWithEpochManagerAdapter, store: Store, chain_store: &mut ChainStore, tx_hash: &CryptoHash, @@ -262,7 +262,7 @@ fn apply_tx_in_chunk( pub(crate) fn apply_tx( genesis_height: BlockHeight, - runtime: &dyn RuntimeAdapter, + runtime: &dyn RuntimeWithEpochManagerAdapter, store: Store, tx_hash: CryptoHash, ) -> anyhow::Result> { @@ -277,7 +277,7 @@ pub(crate) fn apply_tx( } fn apply_receipt_in_block( - runtime: &dyn RuntimeAdapter, + runtime: &dyn RuntimeWithEpochManagerAdapter, chain_store: &mut ChainStore, id: &CryptoHash, block_hash: CryptoHash, @@ -305,7 +305,7 @@ fn apply_receipt_in_block( } fn apply_receipt_in_chunk( - runtime: &dyn RuntimeAdapter, + runtime: &dyn RuntimeWithEpochManagerAdapter, store: Store, chain_store: &mut ChainStore, id: &CryptoHash, @@ -390,7 +390,7 @@ fn apply_receipt_in_chunk( pub(crate) fn apply_receipt( genesis_height: BlockHeight, - runtime: &dyn RuntimeAdapter, + runtime: &dyn RuntimeWithEpochManagerAdapter, store: Store, id: CryptoHash, ) -> anyhow::Result> { diff --git a/tools/state-viewer/src/cli.rs b/tools/state-viewer/src/cli.rs index 49c2120be0d..6d1a9600815 100644 --- a/tools/state-viewer/src/cli.rs +++ b/tools/state-viewer/src/cli.rs @@ -39,6 +39,9 @@ pub enum StateViewerSubCommand { CheckBlock, /// Looks up a certain chunk. Chunks(ChunksCmd), + /// List account names with contracts deployed. + #[clap(alias = "contract_accounts")] + ContractAccounts(ContractAccountsCmd), /// Dump contract data in storage of given account to binary file. #[clap(alias = "dump_account_storage")] DumpAccountStorage(DumpAccountStorageCmd), @@ -92,11 +95,8 @@ impl StateViewerSubCommand { let near_config = load_config(home_dir, genesis_validation) .unwrap_or_else(|e| panic!("Error loading config: {:#}", e)); - #[cfg(feature = "cold_store")] let cold_store_config: Option<&near_store::StoreConfig> = near_config.config.cold_store.as_ref(); - #[cfg(not(feature = "cold_store"))] - let cold_store_config: Option = None; let store_opener = NodeStorage::opener(home_dir, &near_config.config.store, cold_store_config); @@ -113,6 +113,7 @@ impl StateViewerSubCommand { StateViewerSubCommand::Chain(cmd) => cmd.run(home_dir, near_config, store), StateViewerSubCommand::CheckBlock => check_block_chunk_existence(near_config, store), StateViewerSubCommand::Chunks(cmd) => cmd.run(near_config, store), + StateViewerSubCommand::ContractAccounts(cmd) => cmd.run(home_dir, near_config, store), StateViewerSubCommand::DumpAccountStorage(cmd) => cmd.run(home_dir, near_config, store), StateViewerSubCommand::DumpCode(cmd) => cmd.run(home_dir, near_config, store), StateViewerSubCommand::DumpState(cmd) => cmd.run(home_dir, near_config, store), @@ -260,6 +261,18 @@ impl ChunksCmd { } } +#[derive(Parser)] +pub struct ContractAccountsCmd { + // TODO: add filter options, e.g. only contracts that execute certain + // actions +} + +impl ContractAccountsCmd { + pub fn run(self, home_dir: &Path, near_config: NearConfig, store: Store) { + contract_accounts(home_dir, store, near_config).unwrap(); + } +} + #[derive(Parser)] pub struct DumpAccountStorageCmd { #[clap(long)] @@ -357,11 +370,27 @@ pub struct DumpStatePartsCmd { part_id: Option, /// Where to write the state parts to. #[clap(long)] - output_dir: PathBuf, + output_dir: Option, + /// S3 bucket to store state parts. + #[clap(long)] + s3_bucket: Option, + /// S3 region to store state parts. + #[clap(long)] + s3_region: Option, } impl DumpStatePartsCmd { pub fn run(self, home_dir: &Path, near_config: NearConfig, store: Store) { + assert_eq!( + self.s3_bucket.is_some(), + self.s3_region.is_some(), + "Need to provide either both or none of --s3-bucket and --s3-region" + ); + let s3 = if let Some(s3_bucket) = self.s3_bucket { + Some((s3_bucket, self.s3_region.unwrap())) + } else { + None + }; dump_state_parts( self.epoch_selection, self.shard_id, @@ -369,7 +398,8 @@ impl DumpStatePartsCmd { home_dir, near_config, store, - &self.output_dir, + self.output_dir, + s3, ); } } diff --git a/tools/state-viewer/src/commands.rs b/tools/state-viewer/src/commands.rs index 9b8b53264ae..0ecbf3958c4 100644 --- a/tools/state-viewer/src/commands.rs +++ b/tools/state-viewer/src/commands.rs @@ -1,4 +1,5 @@ use crate::apply_chain_range::apply_chain_range; +use crate::contract_accounts::ContractAccount; use crate::state_dump::state_dump; use crate::state_dump::state_dump_redis; use crate::tx_dump::dump_tx_from_block; @@ -6,14 +7,18 @@ use crate::{apply_chunk, epoch_info}; use ansi_term::Color::Red; use near_chain::chain::collect_receipts_from_response; use near_chain::migrations::check_if_block_is_first_with_chunk_of_version; +use near_chain::types::RuntimeAdapter; use near_chain::types::{ApplyTransactionResult, BlockHeaderInfo}; -use near_chain::{ChainStore, ChainStoreAccess, ChainStoreUpdate, Error, RuntimeAdapter}; +use near_chain::{ + ChainStore, ChainStoreAccess, ChainStoreUpdate, Error, RuntimeWithEpochManagerAdapter, +}; use near_chain_configs::GenesisChangeConfig; use near_epoch_manager::{EpochManager, EpochManagerAdapter}; use near_network::iter_peers_from_store; use near_primitives::account::id::AccountId; use near_primitives::block::{Block, BlockHeader}; use near_primitives::hash::CryptoHash; +use near_primitives::shard_layout::ShardLayout; use near_primitives::shard_layout::ShardUId; use near_primitives::sharding::ChunkHash; use near_primitives::state_record::StateRecord; @@ -22,6 +27,7 @@ use near_primitives::types::{chunk_extra::ChunkExtra, BlockHeight, ShardId, Stat use near_primitives_core::types::Gas; use near_store::db::Database; use near_store::test_utils::create_test_store; +use near_store::TrieDBStorage; use near_store::{Store, Trie, TrieCache, TrieCachingStorage, TrieConfig}; use nearcore::{NearConfig, NightshadeRuntime}; use node_runtime::adapter::ViewRuntimeAdapter; @@ -35,7 +41,7 @@ use std::sync::Arc; pub(crate) fn apply_block( block_hash: CryptoHash, shard_id: ShardId, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, chain_store: &mut ChainStore, ) -> (Block, ApplyTransactionResult) { let block = chain_store.get_block(&block_hash).unwrap(); @@ -125,7 +131,7 @@ pub(crate) fn apply_block_at_height( near_config.genesis.config.genesis_height, near_config.client_config.save_trie_changes, ); - let runtime_adapter: Arc = + let runtime_adapter: Arc = Arc::new(NightshadeRuntime::from_config(home_dir, store, &near_config)); let block_hash = chain_store.get_block_hash_by_height(height).unwrap(); let (block, apply_result) = @@ -418,7 +424,7 @@ pub(crate) fn peers(db: Arc) { pub(crate) fn print_apply_block_result( block: &Block, apply_result: &ApplyTransactionResult, - runtime_adapter: &dyn RuntimeAdapter, + runtime_adapter: &dyn RuntimeWithEpochManagerAdapter, chain_store: &mut ChainStore, shard_id: ShardId, ) { @@ -717,7 +723,7 @@ pub(crate) fn print_epoch_info( let mut epoch_manager = EpochManager::new_from_genesis_config(store.clone(), &near_config.genesis.config) .expect("Failed to start Epoch Manager"); - let runtime_adapter: Arc = + let runtime_adapter: Arc = Arc::new(NightshadeRuntime::from_config(&home_dir, store.clone(), &near_config)); epoch_info::print_epoch_info( @@ -841,3 +847,33 @@ fn format_hash(h: CryptoHash, show_full_hashes: bool) -> String { pub fn chunk_mask_to_str(mask: &[bool]) -> String { mask.iter().map(|f| if *f { '.' } else { 'X' }).collect() } + +pub(crate) fn contract_accounts( + home_dir: &Path, + store: Store, + near_config: NearConfig, +) -> anyhow::Result<()> { + let (_runtime, state_roots, _header) = load_trie(store.clone(), home_dir, &near_config); + + for (shard_id, &state_root) in state_roots.iter().enumerate() { + eprintln!("Starting shard {shard_id}"); + // TODO: This assumes simple nightshade layout, it will need an update when we reshard. + let shard_uid = ShardUId::from_shard_id_and_layout( + shard_id as u64, + &ShardLayout::get_simple_nightshade_layout(), + ); + // Use simple non-caching storage, we don't expect many duplicate lookups while iterating. + let storage = TrieDBStorage::new(store.clone(), shard_uid); + // We don't need flat state to traverse all accounts. + let flat_state = None; + let trie = Trie::new(Box::new(storage), state_root, flat_state); + + for contract in ContractAccount::in_trie(&trie)? { + match contract { + Ok(contract) => println!("{contract}"), + Err(err) => eprintln!("{err}"), + } + } + } + Ok(()) +} diff --git a/tools/state-viewer/src/contract_accounts.rs b/tools/state-viewer/src/contract_accounts.rs new file mode 100644 index 00000000000..5260a1aa17c --- /dev/null +++ b/tools/state-viewer/src/contract_accounts.rs @@ -0,0 +1,163 @@ +//! State viewer functions to list and filter accounts that have contracts +//! deployed. + +use near_primitives::hash::CryptoHash; +use near_primitives::trie_key::trie_key_parsers::parse_account_id_from_contract_code_key; +use near_primitives::trie_key::TrieKey; +use near_primitives::types::AccountId; +use near_store::{NibbleSlice, StorageError, Trie, TrieTraversalItem}; +use std::collections::VecDeque; +use std::sync::Arc; + +/// Output type for contract account queries with all relevant data around a +/// single contract. +pub(crate) struct ContractAccount { + pub(crate) account_id: AccountId, + pub(crate) source_wasm: Arc<[u8]>, +} + +#[derive(Debug, thiserror::Error)] +pub enum ContractAccountError { + #[error("could not parse key {1:?}")] + InvalidKey(#[source] std::io::Error, Vec), + #[error("failed loading contract code for account {1}")] + NoCode(#[source] StorageError, AccountId), +} + +impl std::fmt::Display for ContractAccount { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:<64} {:>9}", self.account_id, self.source_wasm.len()) + } +} + +impl ContractAccount { + /// Iterate over all contracts stored in the given trie, in lexicographic + /// order of the account IDs. + pub(crate) fn in_trie(trie: &Trie) -> anyhow::Result { + ContractAccountIterator::new(trie) + } + + fn from_contract_trie_node( + trie_key: &[u8], + value_hash: CryptoHash, + trie: &Trie, + ) -> Result { + let account_id = parse_account_id_from_contract_code_key(trie_key) + .map_err(|err| ContractAccountError::InvalidKey(err, trie_key.to_vec()))?; + let source_wasm = trie + .storage + .retrieve_raw_bytes(&value_hash) + .map_err(|err| ContractAccountError::NoCode(err, account_id.clone()))?; + Ok(Self { account_id, source_wasm }) + } +} + +pub(crate) struct ContractAccountIterator<'a> { + /// Trie nodes that point to the contracts. + contract_nodes: VecDeque, + trie: &'a Trie, +} + +impl<'a> ContractAccountIterator<'a> { + pub(crate) fn new(trie: &'a Trie) -> anyhow::Result { + let mut trie_iter = trie.iter()?; + // TODO(#8376): Consider changing the interface to TrieKey to make this easier. + // `TrieKey::ContractCode` requires a valid `AccountId`, we use "xx" + let key = TrieKey::ContractCode { account_id: "xx".parse()? }.to_vec(); + let (prefix, suffix) = key.split_at(key.len() - 2); + assert_eq!(suffix, "xx".as_bytes()); + + // `visit_nodes_interval` wants nibbles stored in `Vec` as input + let nibbles_before: Vec = NibbleSlice::new(prefix).iter().collect(); + let nibbles_after = { + let mut tmp = nibbles_before.clone(); + *tmp.last_mut().unwrap() += 1; + tmp + }; + + // finally, use trie iterator to find all contract nodes + let vec_of_nodes = trie_iter.visit_nodes_interval(&nibbles_before, &nibbles_after)?; + let contract_nodes = VecDeque::from(vec_of_nodes); + Ok(Self { contract_nodes, trie }) + } +} + +impl Iterator for ContractAccountIterator<'_> { + type Item = Result; + + fn next(&mut self) -> Option { + while let Some(item) = self.contract_nodes.pop_front() { + // only look at nodes with a value, ignoring intermediate nodes + // without values + if let TrieTraversalItem { hash, key: Some(trie_key) } = item { + let contract = ContractAccount::from_contract_trie_node(&trie_key, hash, self.trie); + return Some(contract); + } + } + None + } +} + +#[cfg(test)] +mod tests { + use super::ContractAccount; + use near_primitives::trie_key::TrieKey; + use near_store::test_utils::{create_tries, test_populate_trie}; + use near_store::{ShardUId, Trie}; + + #[test] + fn test_three_contracts() { + let tries = create_tries(); + let initial = vec![ + contract_tuple("caroline.near", 3), + contract_tuple("alice.near", 1), + contract_tuple("alice.nearx", 2), + // data right before contracts in trie order + account_tuple("xeno.near", 1), + // data right after contracts in trie order + access_key_tuple("alan.near", 1), + ]; + let root = test_populate_trie(&tries, &Trie::EMPTY_ROOT, ShardUId::single_shard(), initial); + let trie = tries.get_trie_for_shard(ShardUId::single_shard(), root); + + let contract_accounts: Vec<_> = + ContractAccount::in_trie(&trie).expect("failed creating iterator").collect(); + assert_eq!(3, contract_accounts.len(), "wrong number of contracts returned by iterator"); + + // expect reordering toe lexicographic order + let contract1 = contract_accounts[0].as_ref().expect("returned error instead of contract"); + let contract2 = contract_accounts[1].as_ref().expect("returned error instead of contract"); + let contract3 = contract_accounts[2].as_ref().expect("returned error instead of contract"); + assert_eq!(contract1.account_id.as_str(), "alice.near"); + assert_eq!(contract2.account_id.as_str(), "alice.nearx"); + assert_eq!(contract3.account_id.as_str(), "caroline.near"); + assert_eq!(&*contract1.source_wasm, &[1u8, 1, 1]); + assert_eq!(&*contract2.source_wasm, &[2u8, 2, 2]); + assert_eq!(&*contract3.source_wasm, &[3u8, 3, 3]); + } + + /// Create a test contract key-value pair to insert in the test trie. + fn contract_tuple(account: &str, num: u8) -> (Vec, Option>) { + ( + TrieKey::ContractCode { account_id: account.parse().unwrap() }.to_vec(), + Some(vec![num, num, num]), + ) + } + + /// Create a test account key-value pair to insert in the test trie. + fn account_tuple(account: &str, num: u8) -> (Vec, Option>) { + (TrieKey::Account { account_id: account.parse().unwrap() }.to_vec(), Some(vec![num, num])) + } + + /// Create a test access key key-value pair to insert in the test trie. + fn access_key_tuple(account: &str, num: u8) -> (Vec, Option>) { + ( + TrieKey::AccessKey { + account_id: account.parse().unwrap(), + public_key: near_crypto::PublicKey::empty(near_crypto::KeyType::ED25519), + } + .to_vec(), + Some(vec![num, num, num, num]), + ) + } +} diff --git a/tools/state-viewer/src/dump_state_parts.rs b/tools/state-viewer/src/dump_state_parts.rs index 1dbad67dc4c..36ffb021af6 100644 --- a/tools/state-viewer/src/dump_state_parts.rs +++ b/tools/state-viewer/src/dump_state_parts.rs @@ -1,6 +1,6 @@ use crate::epoch_info::iterate_and_filter; use clap::Subcommand; -use near_chain::{ChainStore, ChainStoreAccess, RuntimeAdapter}; +use near_chain::{ChainStore, ChainStoreAccess, RuntimeWithEpochManagerAdapter}; use near_epoch_manager::EpochManager; use near_primitives::epoch_manager::epoch_info::EpochInfo; use near_primitives::state_part::PartId; @@ -10,7 +10,7 @@ use near_primitives_core::hash::CryptoHash; use near_primitives_core::types::{BlockHeight, EpochHeight, ShardId}; use near_store::Store; use nearcore::{NearConfig, NightshadeRuntime}; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::str::FromStr; use std::sync::Arc; use std::time::Instant; @@ -108,9 +108,10 @@ pub(crate) fn dump_state_parts( home_dir: &Path, near_config: NearConfig, store: Store, - output_dir: &Path, + output_dir: Option, + s3_bucket_and_region: Option<(String, String)>, ) { - let runtime_adapter: Arc = + let runtime_adapter: Arc = Arc::new(NightshadeRuntime::from_config(home_dir, store.clone(), &near_config)); let mut epoch_manager = EpochManager::new_from_genesis_config(store.clone(), &near_config.genesis.config) @@ -148,7 +149,19 @@ pub(crate) fn dump_state_parts( "Dumping state as seen at the beginning of the specified epoch.", ); - std::fs::create_dir_all(output_dir).unwrap(); + let part_storage: Box = if let Some(output_dir) = output_dir { + Box::new(FileSystemStorage::new(output_dir)) + } else { + let (s3_bucket, s3_region) = s3_bucket_and_region.unwrap(); + Box::new(S3Storage::new( + &s3_bucket, + &s3_region, + &near_config.client_config.chain_id, + epoch.epoch_height(), + shard_id, + )) + }; + for part_id in if let Some(part_id) = part_id { part_id..part_id + 1 } else { 0..num_parts } { let now = Instant::now(); assert!(part_id < num_parts, "part_id: {}, num_parts: {}", part_id, num_parts); @@ -160,14 +173,70 @@ pub(crate) fn dump_state_parts( PartId::new(part_id, num_parts), ) .unwrap(); - let filename = output_dir.join(format!("state_part_{:06}", part_id)); - let len = state_part.len(); + part_storage.store(&state_part, part_id, now); + } +} + +trait StatePartRecorder { + fn store(&self, state_part: &[u8], part_id: u64, timer: Instant); +} + +struct FileSystemStorage { + output_dir: PathBuf, +} + +impl FileSystemStorage { + fn new(output_dir: PathBuf) -> Self { + std::fs::create_dir_all(&output_dir).unwrap(); + Self { output_dir } + } +} + +impl StatePartRecorder for FileSystemStorage { + fn store(&self, state_part: &[u8], part_id: u64, timer: Instant) { + let filename = self.output_dir.join(format!("state_part_{:06}", part_id)); std::fs::write(&filename, state_part).unwrap(); - tracing::info!( - target: "dump-state-parts", - part_id, - part_length = len, - ?filename, - elapsed_sec = now.elapsed().as_secs_f64()); + let len = state_part.len(); + tracing::info!(target: "dump-state-parts", part_id, part_length = len, ?filename, elapsed_sec = timer.elapsed().as_secs_f64(), "Wrote a state part on disk"); + } +} + +struct S3Storage { + prefix: String, + bucket: s3::Bucket, +} + +impl S3Storage { + fn new( + s3_bucket: &str, + s3_region: &str, + chain_id: &str, + epoch_height: u64, + shard_id: u64, + ) -> Self { + let prefix = + format!("/chain_id={}/epoch_height={}/shard_id={}", chain_id, epoch_height, shard_id); + let bucket = s3::Bucket::new( + &s3_bucket, + s3_region.parse().unwrap(), + s3::creds::Credentials::default().unwrap(), + ) + .unwrap(); + + tracing::info!(target: "dump-state-parts", s3_bucket, s3_region, prefix, "Initialized an S3 bucket"); + Self { prefix, bucket } + } + + fn get_location(&self, part_id: u64) -> String { + format!("{}/state_part_{:06}", self.prefix, part_id) + } +} + +impl StatePartRecorder for S3Storage { + fn store(&self, state_part: &[u8], part_id: u64, timer: Instant) { + let location = self.get_location(part_id); + self.bucket.put_object_blocking(&location, &state_part).unwrap(); + let len = state_part.len(); + tracing::info!(target: "dump-state-parts", part_id, part_length = len, ?location, elapsed_sec = timer.elapsed().as_secs_f64(), "Wrote a state part to S3"); } } diff --git a/tools/state-viewer/src/epoch_info.rs b/tools/state-viewer/src/epoch_info.rs index 0f8bb02a5b9..376d0ad59d3 100644 --- a/tools/state-viewer/src/epoch_info.rs +++ b/tools/state-viewer/src/epoch_info.rs @@ -1,7 +1,7 @@ use borsh::BorshDeserialize; use clap::Subcommand; use core::ops::Range; -use near_chain::{ChainStore, ChainStoreAccess, RuntimeAdapter}; +use near_chain::{ChainStore, ChainStoreAccess, RuntimeWithEpochManagerAdapter}; use near_epoch_manager::EpochManager; use near_primitives::account::id::AccountId; use near_primitives::epoch_manager::epoch_info::EpochInfo; @@ -36,7 +36,7 @@ pub(crate) fn print_epoch_info( store: Store, chain_store: &mut ChainStore, epoch_manager: &mut EpochManager, - runtime_adapter: Arc, + runtime_adapter: Arc, ) { let epoch_ids = get_epoch_ids(epoch_selection, store, chain_store, epoch_manager); @@ -82,7 +82,7 @@ fn display_block_and_chunk_producers( epoch_info: &EpochInfo, chain_store: &mut ChainStore, epoch_manager: &mut EpochManager, - runtime_adapter: Arc, + runtime_adapter: Arc, ) { let block_height_range: Range = get_block_height_range(&epoch_info, &chain_store, epoch_manager); @@ -212,7 +212,7 @@ fn display_epoch_info( head_epoch_height: &EpochHeight, chain_store: &mut ChainStore, epoch_manager: &mut EpochManager, - runtime_adapter: Arc, + runtime_adapter: Arc, ) { println!("{:?}: {:#?}", epoch_id, epoch_info); if epoch_info.epoch_height() >= *head_epoch_height { @@ -237,7 +237,7 @@ fn display_validator_info( account_id: AccountId, chain_store: &mut ChainStore, epoch_manager: &mut EpochManager, - runtime_adapter: Arc, + runtime_adapter: Arc, ) { if let Some(kickout) = epoch_info.validator_kickout().get(&account_id) { println!("Validator {} kickout: {:#?}", account_id, kickout); diff --git a/tools/state-viewer/src/lib.rs b/tools/state-viewer/src/lib.rs index 555c65b8ff3..73fea0ab710 100644 --- a/tools/state-viewer/src/lib.rs +++ b/tools/state-viewer/src/lib.rs @@ -4,6 +4,7 @@ mod apply_chain_range; mod apply_chunk; pub mod cli; mod commands; +mod contract_accounts; mod dump_state_parts; mod epoch_info; mod rocksdb_stats; diff --git a/tools/state-viewer/src/state_dump.rs b/tools/state-viewer/src/state_dump.rs index d8bc7bd6406..d23fa869063 100644 --- a/tools/state-viewer/src/state_dump.rs +++ b/tools/state-viewer/src/state_dump.rs @@ -1,5 +1,5 @@ use borsh::BorshSerialize; -use near_chain::RuntimeAdapter; +use near_chain::types::RuntimeAdapter; use near_chain_configs::{Genesis, GenesisChangeConfig, GenesisConfig}; use near_crypto::PublicKey; use near_epoch_manager::EpochManagerAdapter;