From 80511bbcd1b4bfe21dea58a9cffeeaffeef8db65 Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Sat, 18 May 2024 12:57:31 +0900 Subject: [PATCH 01/21] chore: add aws-sdk-rust for dependencies Signed-off-by: Ryota Sakamoto --- Cargo.lock | 553 ++++++++++++++++++++++++++++++++++++++++++++++++++++- Cargo.toml | 5 + 2 files changed, 557 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index ff4ba12..30795f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -156,6 +156,352 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" +[[package]] +name = "aws-config" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40ddbfb5db93d62521f47b3f223da0884a2f02741ff54cb9cda192a0e73ba08b" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-sdk-sso", + "aws-sdk-ssooidc", + "aws-sdk-sts", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "hex", + "http 0.2.12", + "hyper 0.14.28", + "ring", + "time", + "tokio", + "tracing", + "url", + "zeroize", +] + +[[package]] +name = "aws-credential-types" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e16838e6c9e12125face1c1eff1343c75e3ff540de98ff7ebd61874a89bcfeb9" +dependencies = [ + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "zeroize", +] + +[[package]] +name = "aws-runtime" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75588e7ee5e8496eed939adac2035a6dbab9f7eb2acdd9ab2d31856dab6f3955" +dependencies = [ + "aws-credential-types", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http 0.2.12", + "http-body 0.4.6", + "percent-encoding", + "pin-project-lite", + "tracing", + "uuid", +] + +[[package]] +name = "aws-sdk-dynamodb" +version = "1.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1da0290e57949a362d3f106285bb539e8a282e6c1b0053f6e02b3fc14f2d730" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http 0.2.12", + "once_cell", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-ec2" +version = "1.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c68f8f8eae987440aac23ea70cf69c811d998f1ebce70276da754eb776451433" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-query", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "fastrand", + "http 0.2.12", + "once_cell", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-sso" +version = "1.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fef2d9ca2b43051224ed326ed9960a85e277b7d554a2cd0397e57c0553d86e64" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "http 0.2.12", + "once_cell", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-ssooidc" +version = "1.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c869d1f5c4ee7437b79c3c1664ddbf7a60231e893960cf82b2b299a5ccf2cc5d" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "http 0.2.12", + "once_cell", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-sts" +version = "1.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e2b4a632a59e4fab7abf1db0d94a3136ad7871aba46bebd1fdb95c7054afcdb" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-query", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "http 0.2.12", + "once_cell", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sigv4" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58b56f1cbe6fd4d0c2573df72868f20ab1c125ca9c9dbce17927a463433a2e57" +dependencies = [ + "aws-credential-types", + "aws-smithy-http", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "form_urlencoded", + "hex", + "hmac 0.12.1", + "http 0.2.12", + "http 1.1.0", + "once_cell", + "percent-encoding", + "sha2 0.10.8", + "time", + "tracing", +] + +[[package]] +name = "aws-smithy-async" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62220bc6e97f946ddd51b5f1361f78996e704677afc518a4ff66b7a72ea1378c" +dependencies = [ + "futures-util", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "aws-smithy-http" +version = "0.60.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a7de001a1b9a25601016d8057ea16e31a45fdca3751304c8edf4ad72e706c08" +dependencies = [ + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "bytes-utils", + "futures-core", + "http 0.2.12", + "http-body 0.4.6", + "once_cell", + "percent-encoding", + "pin-project-lite", + "pin-utils", + "tracing", +] + +[[package]] +name = "aws-smithy-json" +version = "0.60.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4683df9469ef09468dad3473d129960119a0d3593617542b7d52086c8486f2d6" +dependencies = [ + "aws-smithy-types", +] + +[[package]] +name = "aws-smithy-query" +version = "0.60.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2fbd61ceb3fe8a1cb7352e42689cec5335833cd9f94103a61e98f9bb61c64bb" +dependencies = [ + "aws-smithy-types", + "urlencoding", +] + +[[package]] +name = "aws-smithy-runtime" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9ac79e9f3a4d576f3cd4a470a0275b138d9e7b11b1cd514a6858ae0a79dd5bb" +dependencies = [ + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "fastrand", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", + "http-body 1.0.0", + "hyper 0.14.28", + "hyper-rustls", + "once_cell", + "pin-project-lite", + "pin-utils", + "rustls", + "tokio", + "tracing", +] + +[[package]] +name = "aws-smithy-runtime-api" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04ec42c2f5c0e7796a2848dde4d9f3bf8ce12ccbb3d5aa40c52fa0cdd61a1c47" +dependencies = [ + "aws-smithy-async", + "aws-smithy-types", + "bytes", + "http 0.2.12", + "http 1.1.0", + "pin-project-lite", + "tokio", + "tracing", + "zeroize", +] + +[[package]] +name = "aws-smithy-types" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf98d97bba6ddaba180f1b1147e202d8fe04940403a95a3f826c790f931bbd1" +dependencies = [ + "base64-simd", + "bytes", + "bytes-utils", + "futures-core", + "http 0.2.12", + "http 1.1.0", + "http-body 0.4.6", + "http-body 1.0.0", + "http-body-util", + "itoa", + "num-integer", + "pin-project-lite", + "pin-utils", + "ryu", + "serde", + "time", + "tokio", + "tokio-util", +] + +[[package]] +name = "aws-smithy-xml" +version = "0.60.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d123fbc2a4adc3c301652ba8e149bf4bc1d1725affb9784eb20c953ace06bf55" +dependencies = [ + "xmlparser", +] + +[[package]] +name = "aws-types" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a807d90cd50a969b3d95e4e7ad1491fcae13c6e83948d8728363ecc09d66343a" +dependencies = [ + "aws-credential-types", + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "http 0.2.12", + "rustc_version", + "tracing", +] + [[package]] name = "backon" version = "0.4.4" @@ -189,12 +535,28 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + [[package]] name = "base64" version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" +[[package]] +name = "base64-simd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "339abbe78e73178762e23bea9dfd08e697eb3f3301cd4be981c0f78ba5859195" +dependencies = [ + "outref", + "vsimd", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -275,6 +637,16 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +[[package]] +name = "bytes-utils" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dafe3a8757b027e2be6e4e5601ed563c55989fcf1546e933c66c8eb3a058d35" +dependencies = [ + "bytes", + "either", +] + [[package]] name = "bzip2" version = "0.4.4" @@ -639,6 +1011,11 @@ name = "dynein" version = "0.2.1" dependencies = [ "assert_cmd", + "aws-config", + "aws-sdk-dynamodb", + "aws-sdk-ec2", + "aws-smithy-runtime-api", + "aws-types", "backon", "base64 0.22.0", "brotli", @@ -1134,6 +1511,22 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http 0.2.12", + "hyper 0.14.28", + "log", + "rustls", + "rustls-native-certs", + "tokio", + "tokio-rustls", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -1411,6 +1804,15 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.18" @@ -1511,6 +1913,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "outref" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4030760ffd992bef45b0ae3f10ce1aba99e33464c90d14dd7c039884963ddc7a" + [[package]] name = "parking_lot" version = "0.12.2" @@ -1795,6 +2203,12 @@ dependencies = [ "regex-syntax", ] +[[package]] +name = "regex-lite" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b661b2f27137bdbc16f00eda72866a92bb28af1753ffbd56744fb6e2e9cd8e" + [[package]] name = "regex-syntax" version = "0.8.3" @@ -1827,7 +2241,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile", + "rustls-pemfile 2.1.2", "serde", "serde_json", "serde_urlencoded", @@ -1843,6 +2257,21 @@ dependencies = [ "winreg", ] +[[package]] +name = "ring" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +dependencies = [ + "cc", + "cfg-if", + "getrandom", + "libc", + "spin", + "untrusted", + "windows-sys 0.52.0", +] + [[package]] name = "rusoto_core" version = "0.48.0" @@ -1968,6 +2397,39 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring", + "rustls-webpki", + "sct", +] + +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile 1.0.4", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + [[package]] name = "rustls-pemfile" version = "2.1.2" @@ -1984,6 +2446,16 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "beb461507cee2c2ff151784c52762cf4d9ff6a61f3e80968600ed24fa837fa54" +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "ryu" version = "1.0.17" @@ -2014,6 +2486,16 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "security-framework" version = "2.10.0" @@ -2232,6 +2714,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + [[package]] name = "strsim" version = "0.11.1" @@ -2349,6 +2837,7 @@ dependencies = [ "powerfmt", "serde", "time-core", + "time-macros", ] [[package]] @@ -2357,6 +2846,16 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +[[package]] +name = "time-macros" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +dependencies = [ + "num-conv", + "time-core", +] + [[package]] name = "tinyvec" version = "1.6.0" @@ -2412,6 +2911,16 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls", + "tokio", +] + [[package]] name = "tokio-util" version = "0.7.10" @@ -2484,9 +2993,21 @@ checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", "pin-project-lite", + "tracing-attributes", "tracing-core", ] +[[package]] +name = "tracing-attributes" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.60", +] + [[package]] name = "tracing-core" version = "0.1.32" @@ -2569,6 +3090,12 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "url" version = "2.5.0" @@ -2580,12 +3107,24 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + [[package]] name = "utf8parse" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +[[package]] +name = "uuid" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" + [[package]] name = "vcpkg" version = "0.2.15" @@ -2598,6 +3137,12 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "vsimd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" + [[package]] name = "wait-timeout" version = "0.2.0" @@ -2912,6 +3457,12 @@ version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193" +[[package]] +name = "xmlparser" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" + [[package]] name = "zeroize" version = "1.7.0" diff --git a/Cargo.toml b/Cargo.toml index 00b330d..2387d48 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,6 +30,11 @@ name = "dy" path = "src/main.rs" [dependencies] +aws-config = "1.4.0" +aws-sdk-dynamodb = "1.28.0" +aws-sdk-ec2 = "1.42.0" +aws-smithy-runtime-api = "1.6.0" +aws-types = "1.2.1" chrono = "0.4" clap = { version = "4.5.4", features = ["derive"] } dialoguer = "0.11.0" From f71ebaf3ded44d06f21123bf6d1315f9a5aeec26 Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Sat, 18 May 2024 13:08:25 +0900 Subject: [PATCH 02/21] refactor: use sdk in list_tables_api Signed-off-by: Ryota Sakamoto --- src/app.rs | 14 ++++++++++++++ src/control.rs | 10 ++++++---- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/src/app.rs b/src/app.rs index 69c9113..625f568 100644 --- a/src/app.rs +++ b/src/app.rs @@ -15,6 +15,8 @@ */ use ::serde::{Deserialize, Serialize}; +use aws_config::{meta::region::RegionProviderChain, BehaviorVersion, SdkConfig}; +use aws_types::region::Region as SdkRegion; use backon::ExponentialBuilder; use log::{debug, error, info}; use rusoto_dynamodb::{AttributeDefinition, KeySchemaElement, TableDescription}; @@ -325,6 +327,18 @@ impl Context { }) } + pub async fn effective_sdk_config(&self) -> SdkConfig { + let region = self.effective_region(); + let region_name = region.name(); + let sdk_region = SdkRegion::new(region_name.to_owned()); + + let provider = RegionProviderChain::first_try(sdk_region); + aws_config::defaults(BehaviorVersion::v2024_03_28()) + .region(provider) + .load() + .await + } + pub fn effective_region(&self) -> Region { // if region is overwritten by --region comamnd, use it. if let Some(ow_region) = &self.overwritten_region { diff --git a/src/control.rs b/src/control.rs index f7efaba..b49f46c 100644 --- a/src/control.rs +++ b/src/control.rs @@ -16,6 +16,7 @@ // This module interact with DynamoDB Control Plane APIs use ::serde::{Deserialize, Serialize}; +use aws_sdk_dynamodb::Client as DynamoDbSdkClient; use chrono::DateTime; use futures::future::join_all; use log::{debug, error}; @@ -23,7 +24,7 @@ use rusoto_dynamodb::{ AttributeDefinition, BackupSummary, BillingModeSummary, CreateBackupInput, CreateGlobalSecondaryIndexAction, CreateTableInput, DeleteTableInput, DescribeTableInput, DynamoDb, DynamoDbClient, GlobalSecondaryIndexDescription, GlobalSecondaryIndexUpdate, - KeySchemaElement, ListBackupsInput, ListTablesInput, LocalSecondaryIndexDescription, + KeySchemaElement, ListBackupsInput, LocalSecondaryIndexDescription, Projection, ProvisionedThroughput, ProvisionedThroughputDescription, RestoreTableFromBackupInput, StreamSpecification, TableDescription, UpdateTableInput, }; @@ -737,9 +738,10 @@ fn generate_essential_key_definitions( /// Basically called by list_tables function, which is called from `$ dy list`. /// To make ListTables API result reusable, separated API logic into this standalone function. async fn list_tables_api(cx: app::Context) -> Vec { - let ddb = DynamoDbClient::new(cx.effective_region()); - let req: ListTablesInput = Default::default(); - match ddb.list_tables(req).await { + let config = cx.effective_sdk_config().await; + let ddb = DynamoDbSdkClient::new(&config); + + match ddb.list_tables().send().await { Err(e) => { debug!("ListTables API call got an error -- {:#?}", e); error!("{}", e.to_string()); From 9636019e84694094545e146e221166c490826b19 Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Sat, 18 May 2024 13:13:32 +0900 Subject: [PATCH 03/21] refactor: use sdk in list_tables_all_regions Signed-off-by: Ryota Sakamoto --- src/app.rs | 15 ++++++++++----- src/control.rs | 27 ++++++++++++++------------- 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/src/app.rs b/src/app.rs index 625f568..56acbbe 100644 --- a/src/app.rs +++ b/src/app.rs @@ -46,6 +46,7 @@ const CONFIG_DIR: &str = ".dynein"; const CONFIG_PATH_ENV_VAR_NAME: &str = "DYNEIN_CONFIG_DIR"; const CONFIG_FILE_NAME: &str = "config.yml"; const CACHE_FILE_NAME: &str = "cache.yml"; +const LOCAL_REGION: &str = "local"; pub enum DyneinFileType { ConfigFile, @@ -419,9 +420,8 @@ impl Context { found_table_schema.map(|schema| schema.to_owned()) } - pub fn with_region(mut self, ec2_region: &rusoto_ec2::Region) -> Self { - self.overwritten_region = - Some(Region::from_str(&ec2_region.to_owned().region_name.unwrap()).unwrap()); + pub fn with_region(mut self, ec2_region: &str) -> Self { + self.overwritten_region = Some(Region::from_str(ec2_region).unwrap()); self } @@ -434,6 +434,11 @@ impl Context { self.should_strict_for_query .unwrap_or_else(|| self.config.as_ref().map_or(false, |c| c.query.strict_mode)) } + + pub fn is_local(&self) -> bool { + let region = self.effective_region(); + region.name() == LOCAL_REGION + } } #[derive(Error, Debug)] @@ -496,7 +501,7 @@ Public functions pub fn region_from_str(s: Option, p: Option) -> Option { let port = p.unwrap_or(8000); match s.as_deref() { - Some("local") => Some(region_dynamodb_local(port)), + Some(LOCAL_REGION) => Some(region_dynamodb_local(port)), Some(x) => Region::from_str(x).ok(), // convert Result into Option None => None, } @@ -778,7 +783,7 @@ fn region_dynamodb_local(port: u32) -> Region { &endpoint_url ); Region::Custom { - name: "local".to_owned(), + name: LOCAL_REGION.to_owned(), endpoint: endpoint_url, } } diff --git a/src/control.rs b/src/control.rs index b49f46c..796659c 100644 --- a/src/control.rs +++ b/src/control.rs @@ -17,6 +17,7 @@ // This module interact with DynamoDB Control Plane APIs use ::serde::{Deserialize, Serialize}; use aws_sdk_dynamodb::Client as DynamoDbSdkClient; +use aws_sdk_ec2::Client as Ec2SdkClient; use chrono::DateTime; use futures::future::join_all; use log::{debug, error}; @@ -24,11 +25,10 @@ use rusoto_dynamodb::{ AttributeDefinition, BackupSummary, BillingModeSummary, CreateBackupInput, CreateGlobalSecondaryIndexAction, CreateTableInput, DeleteTableInput, DescribeTableInput, DynamoDb, DynamoDbClient, GlobalSecondaryIndexDescription, GlobalSecondaryIndexUpdate, - KeySchemaElement, ListBackupsInput, LocalSecondaryIndexDescription, - Projection, ProvisionedThroughput, ProvisionedThroughputDescription, - RestoreTableFromBackupInput, StreamSpecification, TableDescription, UpdateTableInput, + KeySchemaElement, ListBackupsInput, LocalSecondaryIndexDescription, Projection, + ProvisionedThroughput, ProvisionedThroughputDescription, RestoreTableFromBackupInput, + StreamSpecification, TableDescription, UpdateTableInput, }; -use rusoto_ec2::{DescribeRegionsRequest, Ec2, Ec2Client}; use rusoto_signature::Region; use std::{ io::{self, Error as IOError, Write}, @@ -99,13 +99,14 @@ Public functions ================================================= */ pub async fn list_tables_all_regions(cx: app::Context) { - let region = cx.effective_region(); - let ec2 = Ec2Client::new(match region.name() { - "local" => Region::UsEast1, - _ => region.clone(), - }); - let input: DescribeRegionsRequest = Default::default(); - match ec2.describe_regions(input).await { + // get all regions from us-east-1 regardless specified region + let config = cx + .clone() + .with_region("us-east-1") + .effective_sdk_config() + .await; + let ec2 = Ec2SdkClient::new(&config); + match ec2.describe_regions().send().await { Err(e) => { error!("{}", e.to_string()); std::process::exit(1); @@ -115,11 +116,11 @@ pub async fn list_tables_all_regions(cx: app::Context) { res.regions .expect("regions should exist") // Vec .iter() - .map(|r| list_tables(cx.clone().with_region(r))), + .map(|r| list_tables(cx.clone().with_region(r.region_name.as_ref().unwrap()))), ) .await; - if region.name() == "local" { + if cx.is_local() { list_tables(cx.clone()).await; } } From 4570dc4f49a2b5e7a8ac4fb49325c26314dec9a4 Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Sat, 18 May 2024 13:15:55 +0900 Subject: [PATCH 04/21] refactor: use sdk in delete_table Signed-off-by: Ryota Sakamoto --- src/control.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/control.rs b/src/control.rs index 796659c..9233c99 100644 --- a/src/control.rs +++ b/src/control.rs @@ -23,7 +23,7 @@ use futures::future::join_all; use log::{debug, error}; use rusoto_dynamodb::{ AttributeDefinition, BackupSummary, BillingModeSummary, CreateBackupInput, - CreateGlobalSecondaryIndexAction, CreateTableInput, DeleteTableInput, DescribeTableInput, + CreateGlobalSecondaryIndexAction, CreateTableInput, DescribeTableInput, DynamoDb, DynamoDbClient, GlobalSecondaryIndexDescription, GlobalSecondaryIndexUpdate, KeySchemaElement, ListBackupsInput, LocalSecondaryIndexDescription, Projection, ProvisionedThroughput, ProvisionedThroughputDescription, RestoreTableFromBackupInput, @@ -493,13 +493,10 @@ pub async fn delete_table(cx: app::Context, name: String, skip_confirmation: boo return; } - let ddb = DynamoDbClient::new(cx.effective_region()); - - // The only argument can be passed to DeleteTable operation is "table_name". - // https://rusoto.github.io/rusoto/rusoto_dynamodb/struct.DeleteTableInput.html - let req: DeleteTableInput = DeleteTableInput { table_name: name }; + let config = cx.effective_sdk_config().await; + let ddb = DynamoDbSdkClient::new(&config); - match ddb.delete_table(req).await { + match ddb.delete_table().table_name(name).send().await { Err(e) => { debug!("DeleteTable API call got an error -- {:#?}", e); error!("{}", e.to_string()); From 23d391d1cb009e966e29fb6b39aea20395ddcc91 Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Sat, 18 May 2024 13:21:32 +0900 Subject: [PATCH 05/21] refactor: use sdk in list_backups_api Signed-off-by: Ryota Sakamoto --- src/control.rs | 48 ++++++++++++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 20 deletions(-) diff --git a/src/control.rs b/src/control.rs index 9233c99..06d2b5a 100644 --- a/src/control.rs +++ b/src/control.rs @@ -16,16 +16,19 @@ // This module interact with DynamoDB Control Plane APIs use ::serde::{Deserialize, Serialize}; -use aws_sdk_dynamodb::Client as DynamoDbSdkClient; +use aws_sdk_dynamodb::{ + types::{BackupStatus as SdkBackupStatus, BackupSummary as SdkBackupSummary}, + Client as DynamoDbSdkClient, +}; use aws_sdk_ec2::Client as Ec2SdkClient; use chrono::DateTime; use futures::future::join_all; use log::{debug, error}; use rusoto_dynamodb::{ - AttributeDefinition, BackupSummary, BillingModeSummary, CreateBackupInput, + AttributeDefinition, BillingModeSummary, CreateBackupInput, CreateGlobalSecondaryIndexAction, CreateTableInput, DescribeTableInput, DynamoDb, DynamoDbClient, GlobalSecondaryIndexDescription, GlobalSecondaryIndexUpdate, - KeySchemaElement, ListBackupsInput, LocalSecondaryIndexDescription, Projection, + KeySchemaElement, LocalSecondaryIndexDescription, Projection, ProvisionedThroughput, ProvisionedThroughputDescription, RestoreTableFromBackupInput, StreamSpecification, TableDescription, UpdateTableInput, }; @@ -574,11 +577,16 @@ pub async fn list_backups(cx: app::Context, all_tables: bool) -> Result<(), IOEr for backup in backups { let line = [ backup.table_name.expect("table name should exist"), - backup.backup_status.expect("status should exist"), + backup + .backup_status + .expect("status should exist") + .as_str() + .to_string(), epoch_to_rfc3339( backup .backup_creation_date_time - .expect("creation date should exist"), + .expect("creation date should exist") + .as_secs_f64(), ), backup.backup_name.expect("backup name should exist") + &format!( @@ -598,10 +606,12 @@ pub async fn list_backups(cx: app::Context, all_tables: bool) -> Result<(), IOEr /// Currently overwriting properties during rstore is not supported. pub async fn restore(cx: app::Context, backup_name: Option, restore_name: Option) { // let backups = list_backups_api(&cx, false).await; - let available_backups: Vec = list_backups_api(&cx, false) + let available_backups: Vec = list_backups_api(&cx, false) .await .into_iter() - .filter(|b: &BackupSummary| b.to_owned().backup_status.unwrap() == "AVAILABLE") + .filter(|b: &SdkBackupSummary| { + b.to_owned().backup_status == Some(SdkBackupStatus::Available) + }) .collect(); // let available_backups: Vec = backups.iter().filter(|b| b.backup_status.to_owned().unwrap() == "AVAILABLE").collect(); if available_backups.is_empty() { @@ -618,7 +628,7 @@ pub async fn restore(cx: app::Context, backup_name: Option, restore_name format!( "{} ({}, {} bytes)", b.to_owned().backup_name.unwrap(), - epoch_to_rfc3339(b.backup_creation_date_time.unwrap()), + epoch_to_rfc3339(b.backup_creation_date_time.unwrap().as_secs_f64()), b.backup_size_bytes.unwrap() ) }) @@ -751,18 +761,16 @@ async fn list_tables_api(cx: app::Context) -> Vec { } /// This function is a private function that simply calls ListBackups API and return results -async fn list_backups_api(cx: &app::Context, all_tables: bool) -> Vec { - let ddb = DynamoDbClient::new(cx.effective_region()); - let req: ListBackupsInput = ListBackupsInput { - table_name: if all_tables { - None - } else { - Some(cx.effective_table_name()) - }, - ..Default::default() - }; +async fn list_backups_api(cx: &app::Context, all_tables: bool) -> Vec { + let config = cx.effective_sdk_config().await; + let ddb = DynamoDbSdkClient::new(&config); + + let mut req = ddb.list_backups(); + if !all_tables { + req = req.table_name(cx.effective_table_name()); + } - match ddb.list_backups(req).await { + match req.send().await { Err(e) => { debug!("ListBackups API call got an error -- {:#?}", e); // app::bye(1, &e.to_string()) // it doesn't meet return value requirement. @@ -777,7 +785,7 @@ async fn list_backups_api(cx: &app::Context, all_tables: bool) -> Vec, + available_backups: Vec, ) -> String { available_backups .into_iter() From 88ab10185fb0258565e7b8bf282294e83ff2ce4b Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Sat, 18 May 2024 13:22:32 +0900 Subject: [PATCH 06/21] chore: remove rusoto_ec2 from dependencies Signed-off-by: Ryota Sakamoto --- Cargo.lock | 15 --------------- Cargo.toml | 1 - 2 files changed, 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 30795f4..c558c27 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1038,7 +1038,6 @@ dependencies = [ "reqwest", "rusoto_core", "rusoto_dynamodb", - "rusoto_ec2", "rusoto_signature", "serde", "serde_json", @@ -2329,20 +2328,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "rusoto_ec2" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "666c2f36b125e43229892f1a0d81ad28c0d0231d3b8b00ab0e8120975d6138ca" -dependencies = [ - "async-trait", - "bytes", - "futures", - "rusoto_core", - "serde_urlencoded", - "xml-rs", -] - [[package]] name = "rusoto_signature" version = "0.48.0" diff --git a/Cargo.toml b/Cargo.toml index 2387d48..dbc76bf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,7 +47,6 @@ reqwest = "0.12.4" rusoto_signature = "0.48.0" rusoto_core = "0.48.0" rusoto_dynamodb = "0.48.0" -rusoto_ec2 = "0.48.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" serde_yaml = "0.9.34" From b5737fa4fa5c464d74ad5763d59895d1d9960091 Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Sat, 18 May 2024 13:27:02 +0900 Subject: [PATCH 07/21] refactor: use sdk in backup Signed-off-by: Ryota Sakamoto --- src/control.rs | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/src/control.rs b/src/control.rs index 06d2b5a..c70bfb0 100644 --- a/src/control.rs +++ b/src/control.rs @@ -25,10 +25,9 @@ use chrono::DateTime; use futures::future::join_all; use log::{debug, error}; use rusoto_dynamodb::{ - AttributeDefinition, BillingModeSummary, CreateBackupInput, - CreateGlobalSecondaryIndexAction, CreateTableInput, DescribeTableInput, - DynamoDb, DynamoDbClient, GlobalSecondaryIndexDescription, GlobalSecondaryIndexUpdate, - KeySchemaElement, LocalSecondaryIndexDescription, Projection, + AttributeDefinition, BillingModeSummary, CreateGlobalSecondaryIndexAction, CreateTableInput, + DescribeTableInput, DynamoDb, DynamoDbClient, GlobalSecondaryIndexDescription, + GlobalSecondaryIndexUpdate, KeySchemaElement, LocalSecondaryIndexDescription, Projection, ProvisionedThroughput, ProvisionedThroughputDescription, RestoreTableFromBackupInput, StreamSpecification, TableDescription, UpdateTableInput, }; @@ -524,27 +523,24 @@ pub async fn backup(cx: app::Context, all_tables: bool) { if all_tables { println!("NOTE: --all-tables option is ignored without --list option. Just trying to create a backup for the target table...") }; - debug!( - "Taking a backof of the table '{}'", - cx.effective_table_name() - ); + + let table_name = cx.effective_table_name(); + debug!("Taking a backof of the table '{}'", table_name); let epoch: u64 = time::SystemTime::now() .duration_since(time::SystemTime::UNIX_EPOCH) .expect("should be able to generate UNIX EPOCH") .as_secs(); - let ddb = DynamoDbClient::new(cx.effective_region()); - - // You need to pass "table_name" and "backup_name". There's no other fields. - // https://rusoto.github.io/rusoto/rusoto_dynamodb/struct.CreateBackupInput.html - let req: CreateBackupInput = CreateBackupInput { - table_name: cx.effective_table_name(), - backup_name: format!("{}--dynein-{}", cx.effective_table_name(), epoch), - }; + let config = cx.effective_sdk_config().await; + let ddb = DynamoDbSdkClient::new(&config); - debug!("this is the req: {:?}", req); + let req = ddb + .create_backup() + .table_name(&table_name) + .backup_name(format!("{}--dynein-{}", table_name, epoch)); + debug!("backup req: {:?}", req); - match ddb.create_backup(req).await { + match req.send().await { Err(e) => { debug!("CreateBackup API call got an error -- {:#?}", e); app::bye(1, &e.to_string()); From c2e3a8fcfa76ee5c0998b3459c573ad1c95f83b8 Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Sat, 18 May 2024 13:37:45 +0900 Subject: [PATCH 08/21] refactor: move key from app.rs to key.rs Key is used by some functions so it should be divided from app.rs Signed-off-by: Ryota Sakamoto --- src/app.rs | 138 ++++++------------------------------------------- src/batch.rs | 9 ++-- src/control.rs | 9 ++-- src/data.rs | 20 +++---- src/key.rs | 128 +++++++++++++++++++++++++++++++++++++++++++++ src/main.rs | 1 + 6 files changed, 164 insertions(+), 141 deletions(-) create mode 100644 src/key.rs diff --git a/src/app.rs b/src/app.rs index 56acbbe..c9d5d53 100644 --- a/src/app.rs +++ b/src/app.rs @@ -19,7 +19,7 @@ use aws_config::{meta::region::RegionProviderChain, BehaviorVersion, SdkConfig}; use aws_types::region::Region as SdkRegion; use backon::ExponentialBuilder; use log::{debug, error, info}; -use rusoto_dynamodb::{AttributeDefinition, KeySchemaElement, TableDescription}; +use rusoto_dynamodb::{AttributeDefinition, TableDescription}; use rusoto_signature::Region; use serde_yaml::Error as SerdeYAMLError; use std::convert::{TryFrom, TryInto}; @@ -27,7 +27,7 @@ use std::time::Duration; use std::{ collections::HashMap, env, error, - fmt::{self, Display, Error as FmtError, Formatter}, + fmt::{self, Formatter}, fs, io::Error as IOError, path, @@ -37,6 +37,7 @@ use tempfile::NamedTempFile; use thiserror::Error; use super::control; +use super::key; /* ================================================= struct / enum / const @@ -57,8 +58,8 @@ pub enum DyneinFileType { pub struct TableSchema { pub region: String, pub name: String, - pub pk: Key, - pub sk: Option, + pub pk: key::Key, + pub sk: Option, pub indexes: Option>, pub mode: control::Mode, } @@ -69,86 +70,8 @@ pub struct IndexSchema { /// Type of index. i.e. GSI (Global Secondary Index) or LSI (Local Secondary Index). /// Use 'kind' as 'type' is a keyword in Rust. pub kind: IndexType, - pub pk: Key, - pub sk: Option, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct Key { - pub name: String, - /// Data type of the primary key. i.e. "S" (String), "N" (Number), or "B" (Binary). - /// Use 'kind' as 'type' is a keyword in Rust. - pub kind: KeyType, -} - -impl Key { - /// return String with " ()", e.g. "myPk (S)". Used in desc command outputs. - pub fn display(&self) -> String { - format!("{} ({})", self.name, self.kind) - } -} - -/// Restrict acceptable DynamoDB data types for primary keys. -/// enum witn methods/FromStr ref: https://docs.rs/rusoto_signature/0.42.0/src/rusoto_signature/region.rs.html#226-258 -#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] -pub enum KeyType { - S, - N, - B, -} - -/// implement Display for KeyType to simply print a single letter "S", "N", or "B". -impl Display for KeyType { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "{}", - match self { - KeyType::S => "S", - KeyType::N => "N", - KeyType::B => "B", - } - ) - } -} - -#[derive(Debug, PartialEq)] -pub struct ParseKeyTypeError { - message: String, -} - -impl std::error::Error for ParseKeyTypeError { - fn description(&self) -> &str { - &self.message - } -} - -impl Display for ParseKeyTypeError { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> { - write!(f, "{}", self.message) - } -} - -impl ParseKeyTypeError { - /// Parses a region given as a string literal into a type `KeyType' - pub fn new(input: &str) -> Self { - Self { - message: format!("Not a valid DynamoDB primary key type: {}", input), - } - } -} - -impl FromStr for KeyType { - type Err = ParseKeyTypeError; - - fn from_str(s: &str) -> Result { - match s { - "S" => Ok(Self::S), - "N" => Ok(Self::N), - "B" => Ok(Self::B), - x => Err(ParseKeyTypeError::new(x)), - } - } + pub pk: key::Key, + pub sk: Option, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -632,8 +555,8 @@ pub fn insert_to_table_cache( TableSchema { region: String::from(region.name()), name: table_name, - pk: typed_key("HASH", &desc).expect("pk should exist"), - sk: typed_key("RANGE", &desc), + pk: key::typed_key("HASH", &desc).expect("pk should exist"), + sk: key::typed_key("RANGE", &desc), indexes: index_schemas(&desc), mode: control::extract_mode(&desc.billing_mode_summary), }, @@ -658,37 +581,6 @@ pub fn remove_dynein_files() -> Result<(), DyneinConfigError> { Ok(()) } -/// returns Option of a tuple (attribute_name, attribute_type (S/N/B)). -/// Used when you want to know "what is the Partition Key name and its data type of this table". -pub fn typed_key(pk_or_sk: &str, desc: &TableDescription) -> Option { - // extracting key schema of "base table" here - let ks = desc.clone().key_schema.unwrap(); - typed_key_for_schema(pk_or_sk, &ks, &desc.clone().attribute_definitions.unwrap()) -} - -/// Receives key data type (HASH or RANGE), KeySchemaElement(s), and AttributeDefinition(s), -/// In many cases it's called by typed_key, but when retrieving index schema, this method can be used directly so put it as public. -pub fn typed_key_for_schema( - pk_or_sk: &str, - ks: &[KeySchemaElement], - attrs: &[AttributeDefinition], -) -> Option { - // Fetch Partition Key ("HASH") or Sort Key ("RANGE") from given Key Schema. pk should always exists, but sk may not. - let target_key = ks.iter().find(|x| x.key_type == pk_or_sk); - target_key.map(|key| Key { - name: key.clone().attribute_name, - // kind should be one of S/N/B, Which can be retrieved from AttributeDefinition's attribute_type. - kind: KeyType::from_str( - &attrs - .iter() - .find(|at| at.attribute_name == key.attribute_name) - .expect("primary key should be in AttributeDefinition.") - .attribute_type, - ) - .unwrap(), - }) -} - // If you explicitly specify target table by `--table/-t` option, this function executes DescribeTable API to gather table schema info. // Otherwise, load table schema info from config file. // fn table_schema(region: &Region, config: &config::Config, table_overwritten: Option) -> TableSchema { @@ -706,8 +598,8 @@ pub async fn table_schema(cx: &Context) -> TableSchema { TableSchema { region: String::from(cx.effective_region().name()), name: desc.clone().table_name.unwrap(), - pk: typed_key("HASH", &desc).expect("pk should exist"), - sk: typed_key("RANGE", &desc), + pk: key::typed_key("HASH", &desc).expect("pk should exist"), + sk: key::typed_key("RANGE", &desc), indexes: index_schemas(&desc), mode: control::extract_mode(&desc.billing_mode_summary), } @@ -741,9 +633,9 @@ pub fn index_schemas(desc: &TableDescription) -> Option> { indexes.push(IndexSchema { name: gsi.index_name.unwrap(), kind: IndexType::Gsi, - pk: typed_key_for_schema("HASH", &gsi.key_schema.clone().unwrap(), attr_defs) + pk: key::typed_key_for_schema("HASH", &gsi.key_schema.clone().unwrap(), attr_defs) .expect("pk should exist"), - sk: typed_key_for_schema("RANGE", &gsi.key_schema.unwrap(), attr_defs), + sk: key::typed_key_for_schema("RANGE", &gsi.key_schema.unwrap(), attr_defs), }); } }; @@ -753,9 +645,9 @@ pub fn index_schemas(desc: &TableDescription) -> Option> { indexes.push(IndexSchema { name: lsi.index_name.unwrap(), kind: IndexType::Lsi, - pk: typed_key_for_schema("HASH", &lsi.key_schema.clone().unwrap(), attr_defs) + pk: key::typed_key_for_schema("HASH", &lsi.key_schema.clone().unwrap(), attr_defs) .expect("pk should exist"), - sk: typed_key_for_schema("RANGE", &lsi.key_schema.unwrap(), attr_defs), + sk: key::typed_key_for_schema("RANGE", &lsi.key_schema.unwrap(), attr_defs), }); } }; diff --git a/src/batch.rs b/src/batch.rs index fae04d7..f8440ff 100644 --- a/src/batch.rs +++ b/src/batch.rs @@ -29,6 +29,7 @@ use std::{collections::HashMap, error, fmt, fs, future::Future, io::Error as IOE use super::app; use super::data; +use super::key; /* ================================================= struct / enum / const @@ -646,25 +647,25 @@ fn validate_item_keys( fn validate_key_type( key_name: &str, - expected_key_type: &app::KeyType, + expected_key_type: &key::KeyType, attrs: &HashMap, ) -> Result<(), DyneinBatchError> { match expected_key_type { - app::KeyType::S => { + key::KeyType::S => { if attrs[key_name].s.is_none() { return Err(DyneinBatchError::InvalidInput( generate_type_mismatch_error_message(key_name, "String"), )); } } - app::KeyType::N => { + key::KeyType::N => { if attrs[key_name].n.is_none() { return Err(DyneinBatchError::InvalidInput( generate_type_mismatch_error_message(key_name, "Number"), )); } } - app::KeyType::B => { + key::KeyType::B => { if attrs[key_name].b.is_none() { return Err(DyneinBatchError::InvalidInput( generate_type_mismatch_error_message(key_name, "Binary"), diff --git a/src/control.rs b/src/control.rs index c70bfb0..5ab7b95 100644 --- a/src/control.rs +++ b/src/control.rs @@ -41,6 +41,7 @@ use dialoguer::{theme::ColorfulTheme, Confirm, Select}; use tabwriter::TabWriter; use super::app; +use super::key; /* ================================================= struct / enum / const @@ -244,10 +245,10 @@ pub fn print_table_description(region: Region, desc: TableDescription) { region: String::from(region.name()), status: String::from(&desc.clone().table_status.unwrap()), schema: PrintPrimaryKeys { - pk: app::typed_key("HASH", &desc) + pk: key::typed_key("HASH", &desc) .expect("pk should exist") .display(), - sk: app::typed_key("RANGE", &desc).map(|k| k.display()), + sk: key::typed_key("RANGE", &desc).map(|k| k.display()), }, mode: mode.clone(), @@ -867,10 +868,10 @@ fn extract_secondary_indexes( let idx = PrintSecondaryIndex { name: String::from(idx.retrieve_index_name().as_ref().unwrap()), schema: PrintPrimaryKeys { - pk: app::typed_key_for_schema("HASH", ks, attr_defs) + pk: key::typed_key_for_schema("HASH", ks, attr_defs) .expect("pk should exist") .display(), - sk: app::typed_key_for_schema("RANGE", ks, attr_defs).map(|k| k.display()), + sk: key::typed_key_for_schema("RANGE", ks, attr_defs).map(|k| k.display()), }, capacity: idx.extract_index_capacity(mode), }; diff --git a/src/data.rs b/src/data.rs index a860467..80ee2b6 100644 --- a/src/data.rs +++ b/src/data.rs @@ -23,7 +23,6 @@ use std::{ vec::Vec, }; -use crate::app::{Key, KeyType}; use crate::parser::{AttributeDefinition, AttributeType, DyneinParser, ParseError}; use log::{debug, error}; use rusoto_dynamodb::{ @@ -35,6 +34,7 @@ use tabwriter::TabWriter; // use bytes::Bytes; use super::app; +use super::key; /* ================================================= struct / enum / const @@ -784,8 +784,8 @@ fn strip_item( .collect() } -impl From for AttributeDefinition { - fn from(value: Key) -> Self { +impl From for AttributeDefinition { + fn from(value: key::Key) -> Self { AttributeDefinition::new(value.name, value.kind) } } @@ -800,7 +800,7 @@ fn generate_query_expressions( let expression: String = String::from("#DYNEIN_PKNAME = :DYNEIN_PKVAL"); let mut names = HashMap::::new(); let mut vals = HashMap::::new(); - let mut sort_key_of_target_table_or_index: Option = None; + let mut sort_key_of_target_table_or_index: Option = None; match index { None => @@ -881,12 +881,12 @@ fn generate_query_expressions( } } -impl From for AttributeType { - fn from(value: KeyType) -> Self { +impl From for AttributeType { + fn from(value: key::KeyType) -> Self { match value { - KeyType::S => AttributeType::S, - KeyType::N => AttributeType::N, - KeyType::B => AttributeType::B, + key::KeyType::S => AttributeType::S, + key::KeyType::N => AttributeType::N, + key::KeyType::B => AttributeType::B, } } } @@ -894,7 +894,7 @@ impl From for AttributeType { /// Using existing key condition expr (e.g. "myId <= :idVal") and supplementary mappings (expression_attribute_names, expression_attribute_values), /// this method returns GeneratedQueryParams struct. Note that it's called only when sort key expression (ske) exists. fn append_sort_key_expression( - sort_key: Option, + sort_key: Option, partition_key_expression: &str, sort_key_expression: &str, mut names: HashMap, diff --git a/src/key.rs b/src/key.rs new file mode 100644 index 0000000..8ebccaa --- /dev/null +++ b/src/key.rs @@ -0,0 +1,128 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use ::serde::{Deserialize, Serialize}; +use rusoto_dynamodb::{AttributeDefinition, KeySchemaElement, TableDescription}; +use std::str::FromStr; + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Key { + pub name: String, + /// Data type of the primary key. i.e. "S" (String), "N" (Number), or "B" (Binary). + /// Use 'kind' as 'type' is a keyword in Rust. + pub kind: KeyType, +} + +impl Key { + /// return String with " ()", e.g. "myPk (S)". Used in desc command outputs. + pub fn display(&self) -> String { + format!("{} ({})", self.name, self.kind) + } +} + +/// Restrict acceptable DynamoDB data types for primary keys. +/// enum witn methods/FromStr ref: https://docs.rs/rusoto_signature/0.42.0/src/rusoto_signature/region.rs.html#226-258 +#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] +pub enum KeyType { + S, + N, + B, +} + +/// implement Display for KeyType to simply print a single letter "S", "N", or "B". +impl std::fmt::Display for KeyType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}", + match self { + KeyType::S => "S", + KeyType::N => "N", + KeyType::B => "B", + } + ) + } +} + +#[derive(Debug, PartialEq)] +pub struct ParseKeyTypeError { + message: String, +} + +impl std::error::Error for ParseKeyTypeError { + fn description(&self) -> &str { + &self.message + } +} + +impl std::fmt::Display for ParseKeyTypeError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!(f, "{}", self.message) + } +} + +impl ParseKeyTypeError { + /// Parses a region given as a string literal into a type `KeyType' + pub fn new(input: &str) -> Self { + Self { + message: format!("Not a valid DynamoDB primary key type: {}", input), + } + } +} + +impl FromStr for KeyType { + type Err = ParseKeyTypeError; + + fn from_str(s: &str) -> Result { + match s { + "S" => Ok(Self::S), + "N" => Ok(Self::N), + "B" => Ok(Self::B), + x => Err(ParseKeyTypeError::new(x)), + } + } +} + +/// returns Option of a tuple (attribute_name, attribute_type (S/N/B)). +/// Used when you want to know "what is the Partition Key name and its data type of this table". +pub fn typed_key(pk_or_sk: &str, desc: &TableDescription) -> Option { + // extracting key schema of "base table" here + let ks = desc.clone().key_schema.unwrap(); + typed_key_for_schema(pk_or_sk, &ks, &desc.clone().attribute_definitions.unwrap()) +} + +/// Receives key data type (HASH or RANGE), KeySchemaElement(s), and AttributeDefinition(s), +/// In many cases it's called by typed_key, but when retrieving index schema, this method can be used directly so put it as public. +pub fn typed_key_for_schema( + pk_or_sk: &str, + ks: &[KeySchemaElement], + attrs: &[AttributeDefinition], +) -> Option { + // Fetch Partition Key ("HASH") or Sort Key ("RANGE") from given Key Schema. pk should always exists, but sk may not. + let target_key = ks.iter().find(|x| x.key_type == pk_or_sk); + target_key.map(|key| Key { + name: key.clone().attribute_name, + // kind should be one of S/N/B, Which can be retrieved from AttributeDefinition's attribute_type. + kind: KeyType::from_str( + &attrs + .iter() + .find(|at| at.attribute_name == key.attribute_name) + .expect("primary key should be in AttributeDefinition.") + .attribute_type, + ) + .unwrap(), + }) +} diff --git a/src/main.rs b/src/main.rs index 48c99f9..ee7b854 100644 --- a/src/main.rs +++ b/src/main.rs @@ -32,6 +32,7 @@ mod bootstrap; mod cmd; mod control; mod data; +mod key; mod parser; mod shell; mod transfer; From ba99b53cb67003b7b679c087ca03f8c903737907 Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Sat, 18 May 2024 13:49:01 +0900 Subject: [PATCH 09/21] refactor: move utility functions from control.rs to util.rs Signed-off-by: Ryota Sakamoto --- Cargo.lock | 2 +- src/app.rs | 7 +- src/control.rs | 288 +++++------------------------------------------- src/main.rs | 1 + src/transfer.rs | 6 +- src/util.rs | 273 +++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 310 insertions(+), 267 deletions(-) create mode 100644 src/util.rs diff --git a/Cargo.lock b/Cargo.lock index c558c27..8181af5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2990,7 +2990,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn", ] [[package]] diff --git a/src/app.rs b/src/app.rs index c9d5d53..798e5f7 100644 --- a/src/app.rs +++ b/src/app.rs @@ -38,6 +38,7 @@ use thiserror::Error; use super::control; use super::key; +use super::util; /* ================================================= struct / enum / const @@ -61,7 +62,7 @@ pub struct TableSchema { pub pk: key::Key, pub sk: Option, pub indexes: Option>, - pub mode: control::Mode, + pub mode: util::Mode, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -558,7 +559,7 @@ pub fn insert_to_table_cache( pk: key::typed_key("HASH", &desc).expect("pk should exist"), sk: key::typed_key("RANGE", &desc), indexes: index_schemas(&desc), - mode: control::extract_mode(&desc.billing_mode_summary), + mode: util::extract_mode(&desc.billing_mode_summary), }, ); cache.tables = Some(table_schema_hashmap); @@ -601,7 +602,7 @@ pub async fn table_schema(cx: &Context) -> TableSchema { pk: key::typed_key("HASH", &desc).expect("pk should exist"), sk: key::typed_key("RANGE", &desc), indexes: index_schemas(&desc), - mode: control::extract_mode(&desc.billing_mode_summary), + mode: util::extract_mode(&desc.billing_mode_summary), } } None => { diff --git a/src/control.rs b/src/control.rs index 5ab7b95..1e7ee84 100644 --- a/src/control.rs +++ b/src/control.rs @@ -15,21 +15,17 @@ */ // This module interact with DynamoDB Control Plane APIs -use ::serde::{Deserialize, Serialize}; use aws_sdk_dynamodb::{ types::{BackupStatus as SdkBackupStatus, BackupSummary as SdkBackupSummary}, Client as DynamoDbSdkClient, }; use aws_sdk_ec2::Client as Ec2SdkClient; -use chrono::DateTime; use futures::future::join_all; use log::{debug, error}; use rusoto_dynamodb::{ - AttributeDefinition, BillingModeSummary, CreateGlobalSecondaryIndexAction, CreateTableInput, - DescribeTableInput, DynamoDb, DynamoDbClient, GlobalSecondaryIndexDescription, - GlobalSecondaryIndexUpdate, KeySchemaElement, LocalSecondaryIndexDescription, Projection, - ProvisionedThroughput, ProvisionedThroughputDescription, RestoreTableFromBackupInput, - StreamSpecification, TableDescription, UpdateTableInput, + BillingModeSummary, CreateGlobalSecondaryIndexAction, CreateTableInput, DescribeTableInput, + DynamoDb, DynamoDbClient, GlobalSecondaryIndexUpdate, Projection, ProvisionedThroughput, + RestoreTableFromBackupInput, TableDescription, UpdateTableInput, }; use rusoto_signature::Region; use std::{ @@ -41,61 +37,7 @@ use dialoguer::{theme::ColorfulTheme, Confirm, Select}; use tabwriter::TabWriter; use super::app; -use super::key; - -/* ================================================= -struct / enum / const -================================================= */ - -// TableDescription doesn't implement Serialize -// https://docs.rs/rusoto_dynamodb/0.42.0/rusoto_dynamodb/struct.TableDescription.html -#[derive(Serialize, Deserialize, Debug)] -struct PrintDescribeTable { - name: String, - region: String, - status: String, - schema: PrintPrimaryKeys, - - mode: Mode, - capacity: Option, - - gsi: Option>, - lsi: Option>, - - stream: Option, - - count: i64, - size_bytes: i64, - created_at: String, -} - -const PROVISIONED_API_SPEC: &str = "PROVISIONED"; -const ONDEMAND_API_SPEC: &str = "PAY_PER_REQUEST"; - -#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] -pub enum Mode { - Provisioned, - OnDemand, -} - -#[derive(Serialize, Deserialize, Debug)] -struct PrintPrimaryKeys { - pk: String, - sk: Option, -} - -#[derive(Serialize, Deserialize, Debug)] -struct PrintCapacityUnits { - wcu: i64, - rcu: i64, -} - -#[derive(Serialize, Deserialize, Debug)] -struct PrintSecondaryIndex { - name: String, - schema: PrintPrimaryKeys, - capacity: Option, -} +use super::util; /* ================================================= Public functions @@ -173,7 +115,7 @@ pub async fn describe_all_tables(cx: app::Context) { } /// Executed when you call `$ dy desc (table)`. Retrieve TableDescription via describe_table_api function, -/// then print them in convenient way using print_table_description function (default/yaml). +/// then print them in convenient way using util::print_table_description function (default/yaml). pub async fn describe_table(cx: app::Context, target_table_to_desc: Option) { debug!("context: {:#?}", &cx); debug!("positional arg table name: {:?}", &target_table_to_desc); @@ -205,7 +147,7 @@ pub async fn describe_table(cx: app::Context, target_table_to_desc: Option print_table_description(new_context.effective_region(), desc), + None | Some("yaml") => util::print_table_description(new_context.effective_region(), desc), // Some("raw") => println!("{:#?}", desc), Some(_) => { println!("ERROR: unsupported output type."); @@ -234,37 +176,6 @@ pub async fn describe_table_api(region: &Region, table_name: String) -> TableDes } } -/// Receives region (just to show in one line for reference) and TableDescription, -/// print them in readable YAML format. NOTE: '~' representes 'null' or 'no value' in YAML syntax. -pub fn print_table_description(region: Region, desc: TableDescription) { - let attr_defs = desc.clone().attribute_definitions.unwrap(); - let mode = extract_mode(&desc.billing_mode_summary); - - let print_table: PrintDescribeTable = PrintDescribeTable { - name: String::from(&desc.clone().table_name.unwrap()), - region: String::from(region.name()), - status: String::from(&desc.clone().table_status.unwrap()), - schema: PrintPrimaryKeys { - pk: key::typed_key("HASH", &desc) - .expect("pk should exist") - .display(), - sk: key::typed_key("RANGE", &desc).map(|k| k.display()), - }, - - mode: mode.clone(), - capacity: extract_capacity(&mode, &desc.provisioned_throughput), - - gsi: extract_secondary_indexes(&mode, &attr_defs, desc.global_secondary_indexes), - lsi: extract_secondary_indexes(&mode, &attr_defs, desc.local_secondary_indexes), - stream: extract_stream(desc.latest_stream_arn, desc.stream_specification), - - size_bytes: desc.table_size_bytes.unwrap(), - count: desc.item_count.unwrap(), - created_at: epoch_to_rfc3339(desc.creation_date_time.unwrap()), - }; - println!("{}", serde_yaml::to_string(&print_table).unwrap()); -} - /// This function is designed to be called from dynein command, mapped in main.rs. /// Note that it simply ignores --table option if specified. Newly created table name should be given by the 1st argument "name". pub async fn create_table(cx: app::Context, name: String, given_keys: Vec) { @@ -274,7 +185,7 @@ pub async fn create_table(cx: app::Context, name: String, given_keys: Vec print_table_description(cx.effective_region(), desc), + Ok(desc) => util::print_table_description(cx.effective_region(), desc), Err(e) => { debug!("CreateTable API call got an error -- {:#?}", e); error!("{}", e.to_string()); @@ -293,12 +204,12 @@ pub async fn create_table_api( &name, &given_keys ); - let (key_schema, attribute_definitions) = generate_essential_key_definitions(&given_keys); + let (key_schema, attribute_definitions) = util::generate_essential_key_definitions(&given_keys); let ddb = DynamoDbClient::new(cx.effective_region()); let req: CreateTableInput = CreateTableInput { table_name: name, - billing_mode: Some(String::from(ONDEMAND_API_SPEC)), + billing_mode: Some(String::from(util::ONDEMAND_API_SPEC)), key_schema, // Vec attribute_definitions, // Vec ..Default::default() @@ -322,7 +233,7 @@ pub async fn create_index(cx: app::Context, index_name: String, given_keys: Vec< &cx.effective_table_name() ); - let (key_schema, attribute_definitions) = generate_essential_key_definitions(&given_keys); + let (key_schema, attribute_definitions) = util::generate_essential_key_definitions(&given_keys); let ddb = DynamoDbClient::new(cx.effective_region()); let create_gsi_action = CreateGlobalSecondaryIndexAction { @@ -354,7 +265,7 @@ pub async fn create_index(cx: app::Context, index_name: String, given_keys: Vec< } Ok(res) => { debug!("Returned result: {:#?}", res); - print_table_description(cx.effective_region(), res.table_description.unwrap()); + util::print_table_description(cx.effective_region(), res.table_description.unwrap()); } } } @@ -371,11 +282,11 @@ pub async fn update_table( describe_table_api(&cx.effective_region(), table_name_to_update.clone()).await; // Map given string into "Mode" enum. Note that in cmd.rs clap already limits acceptable values. - let switching_to_mode: Option = match mode_string { + let switching_to_mode: Option = match mode_string { None => None, Some(ms) => match ms.as_str() { - "provisioned" => Some(Mode::Provisioned), - "ondemand" => Some(Mode::OnDemand), + "provisioned" => Some(util::Mode::Provisioned), + "ondemand" => Some(util::Mode::OnDemand), _ => panic!("You shouldn't see this message as --mode can takes only 'provisioned' or 'ondemand'."), }, }; @@ -386,7 +297,7 @@ pub async fn update_table( None => { match extract_mode(&desc.clone().billing_mode_summary) { // When currently OnDemand mode and you're not going to change the it, set None for CU. - Mode::OnDemand => { + util::Mode::OnDemand => { if wcu.is_some() || rcu.is_some() { println!("Ignoring --rcu/--wcu options as the table mode is OnDemand."); }; @@ -394,7 +305,7 @@ pub async fn update_table( } // When currently Provisioned mode and you're not going to change the it, // pass given rcu/wcu, and use current values if missing. Provisioned table should have valid capacity units so unwrap() here. - Mode::Provisioned => Some(ProvisionedThroughput { + util::Mode::Provisioned => Some(ProvisionedThroughput { read_capacity_units: rcu.unwrap_or_else(|| { desc.clone() .provisioned_throughput @@ -415,14 +326,14 @@ pub async fn update_table( // When the user trying to switch mode. Some(target_mode) => match target_mode { // when switching Provisioned->OnDemand mode, ProvisionedThroughput can be None. - Mode::OnDemand => { + util::Mode::OnDemand => { if wcu.is_some() || rcu.is_some() { println!("Ignoring --rcu/--wcu options as --mode ondemand."); }; None } // when switching OnDemand->Provisioned mode, set given wcu/rcu, fill with "5" as a default if not given. - Mode::Provisioned => Some(ProvisionedThroughput { + util::Mode::Provisioned => Some(ProvisionedThroughput { read_capacity_units: rcu.unwrap_or(5), write_capacity_units: wcu.unwrap_or(5), }), @@ -442,7 +353,7 @@ pub async fn update_table( ) .await { - Ok(desc) => print_table_description(cx.effective_region(), desc), + Ok(desc) => util::print_table_description(cx.effective_region(), desc), Err(e) => { debug!("UpdateTable API call got an error -- {:#?}", e); error!("{}", e.to_string()); @@ -465,7 +376,7 @@ pub async fn update_table( async fn update_table_api( cx: app::Context, table_name_to_update: String, - switching_to_mode: Option, + switching_to_mode: Option, provisioned_throughput: Option, ) -> Result> { debug!("Trying to update the table '{}'.", &table_name_to_update); @@ -474,7 +385,7 @@ async fn update_table_api( let req: UpdateTableInput = UpdateTableInput { table_name: table_name_to_update, - billing_mode: switching_to_mode.map(mode_to_billing_mode_api_spec), + billing_mode: switching_to_mode.map(util::mode_to_billing_mode_api_spec), provisioned_throughput, // NOTE: In this function we set `global_secondary_index_updates` to None. GSI update is handled in different commands (e.g. dy admin create index xxx --keys) global_secondary_index_updates: None, /* intentional */ @@ -579,7 +490,7 @@ pub async fn list_backups(cx: app::Context, all_tables: bool) -> Result<(), IOEr .expect("status should exist") .as_str() .to_string(), - epoch_to_rfc3339( + util::epoch_to_rfc3339( backup .backup_creation_date_time .expect("creation date should exist") @@ -625,7 +536,7 @@ pub async fn restore(cx: app::Context, backup_name: Option, restore_name format!( "{} ({}, {} bytes)", b.to_owned().backup_name.unwrap(), - epoch_to_rfc3339(b.backup_creation_date_time.unwrap().as_secs_f64()), + util::epoch_to_rfc3339(b.backup_creation_date_time.unwrap().as_secs_f64()), b.backup_size_bytes.unwrap() ) }) @@ -673,21 +584,21 @@ pub async fn restore(cx: app::Context, backup_name: Option, restore_name debug!("Returned result: {:#?}", res); println!("Table restoration from: '{}' has been started", &backup_arn); let desc = res.table_description.unwrap(); - print_table_description(cx.effective_region(), desc); + util::print_table_description(cx.effective_region(), desc); } } } /// Map "BilingModeSummary" field in table description returned from DynamoDB API, /// into convenient mode name ("Provisioned" or "OnDemand") -pub fn extract_mode(bs: &Option) -> Mode { - let provisioned_mode = Mode::Provisioned; - let ondemand_mode = Mode::OnDemand; +pub fn extract_mode(bs: &Option) -> util::Mode { + let provisioned_mode = util::Mode::Provisioned; + let ondemand_mode = util::Mode::OnDemand; match bs { // if BillingModeSummary field doesn't exist, the table is Provisioned Mode. None => provisioned_mode, Some(x) => { - if x.clone().billing_mode.unwrap() == ONDEMAND_API_SPEC { + if x.clone().billing_mode.unwrap() == util::ONDEMAND_API_SPEC { ondemand_mode } else { provisioned_mode @@ -700,46 +611,6 @@ pub fn extract_mode(bs: &Option) -> Mode { Private functions ================================================= */ -/// Using Vec of String which is passed via command line, -/// generate KeySchemaElement(s) & AttributeDefinition(s), that are essential information to create DynamoDB tables or GSIs. -fn generate_essential_key_definitions( - given_keys: &[String], -) -> (Vec, Vec) { - let mut key_schema: Vec = vec![]; - let mut attribute_definitions: Vec = vec![]; - for (key_id, key_str) in given_keys.iter().enumerate() { - let key_and_type = key_str.split(',').collect::>(); - if key_and_type.len() >= 3 { - error!( - "Invalid format for --keys option: '{}'. Valid format is '--keys myPk,S mySk,N'", - &key_str - ); - std::process::exit(1); - } - - // assumes first given key is Partition key, and second given key is Sort key (if any). - key_schema.push(KeySchemaElement { - attribute_name: String::from(key_and_type[0]), - key_type: if key_id == 0 { - String::from("HASH") - } else { - String::from("RANGE") - }, - }); - - // If data type of key is omitted, dynein assumes it as String (S). - attribute_definitions.push(AttributeDefinition { - attribute_name: String::from(key_and_type[0]), - attribute_type: if key_and_type.len() == 2 { - key_and_type[1].to_uppercase() - } else { - String::from("S") - }, - }); - } - (key_schema, attribute_definitions) -} - /// Basically called by list_tables function, which is called from `$ dy list`. /// To make ListTables API result reusable, separated API logic into this standalone function. async fn list_tables_api(cx: app::Context) -> Vec { @@ -791,106 +662,3 @@ fn fetch_arn_from_backup_name( .backup_arn /* Option */ .unwrap() } - -fn epoch_to_rfc3339(epoch: f64) -> String { - let utc_datetime = DateTime::from_timestamp(epoch as i64, 0).unwrap(); - utc_datetime.to_rfc3339() -} - -/// Takes "Mode" enum and return exact string value required by DynamoDB API. -/// i.e. this function returns "PROVISIONED" or "PAY_PER_REQUEST". -fn mode_to_billing_mode_api_spec(mode: Mode) -> String { - match mode { - Mode::OnDemand => String::from(ONDEMAND_API_SPEC), - Mode::Provisioned => String::from(PROVISIONED_API_SPEC), - } -} - -fn extract_capacity( - mode: &Mode, - cap_desc: &Option, -) -> Option { - if mode == &Mode::OnDemand { - None - } else { - let desc = cap_desc.as_ref().unwrap(); - Some(PrintCapacityUnits { - wcu: desc.write_capacity_units.unwrap(), - rcu: desc.read_capacity_units.unwrap(), - }) - } -} - -trait IndexDesc { - fn retrieve_index_name(&self) -> &Option; - fn retrieve_key_schema(&self) -> &Option>; - fn extract_index_capacity(&self, m: &Mode) -> Option; -} - -impl IndexDesc for GlobalSecondaryIndexDescription { - fn retrieve_index_name(&self) -> &Option { - &self.index_name - } - fn retrieve_key_schema(&self) -> &Option> { - &self.key_schema - } - fn extract_index_capacity(&self, m: &Mode) -> Option { - if m == &Mode::OnDemand { - None - } else { - extract_capacity(m, &self.provisioned_throughput) - } - } -} - -impl IndexDesc for LocalSecondaryIndexDescription { - fn retrieve_index_name(&self) -> &Option { - &self.index_name - } - fn retrieve_key_schema(&self) -> &Option> { - &self.key_schema - } - fn extract_index_capacity(&self, _: &Mode) -> Option { - None // Unlike GSI, LSI doesn't have it's own capacity. - } -} - -// FYI: https://grammarist.com/usage/indexes-indices/ -fn extract_secondary_indexes( - mode: &Mode, - attr_defs: &[AttributeDefinition], - option_indexes: Option>, -) -> Option> { - if let Some(indexes) = option_indexes { - let mut xs = Vec::::new(); - for idx in &indexes { - let ks = &idx.retrieve_key_schema().as_ref().unwrap(); - let idx = PrintSecondaryIndex { - name: String::from(idx.retrieve_index_name().as_ref().unwrap()), - schema: PrintPrimaryKeys { - pk: key::typed_key_for_schema("HASH", ks, attr_defs) - .expect("pk should exist") - .display(), - sk: key::typed_key_for_schema("RANGE", ks, attr_defs).map(|k| k.display()), - }, - capacity: idx.extract_index_capacity(mode), - }; - xs.push(idx); - } - Some(xs) - } else { - None - } -} - -fn extract_stream(arn: Option, spec: Option) -> Option { - if arn.is_none() { - None - } else { - Some(format!( - "{} ({})", - arn.unwrap(), - spec.unwrap().stream_view_type.unwrap() - )) - } -} diff --git a/src/main.rs b/src/main.rs index ee7b854..714e1ee 100644 --- a/src/main.rs +++ b/src/main.rs @@ -36,6 +36,7 @@ mod key; mod parser; mod shell; mod transfer; +mod util; /* ================================================= helper functions diff --git a/src/transfer.rs b/src/transfer.rs index 0ae614e..3ea64fd 100644 --- a/src/transfer.rs +++ b/src/transfer.rs @@ -33,8 +33,8 @@ use thiserror::Error; use super::app; use super::batch; -use super::control; use super::data; +use super::util; #[derive(Error, Debug)] pub enum DyneinExportError { @@ -148,7 +148,7 @@ pub async fn export( let ts: app::TableSchema = app::table_schema(&cx).await; let format_str: Option<&str> = format.as_deref(); - if ts.mode == control::Mode::Provisioned { + if ts.mode == util::Mode::Provisioned { let msg = "WARN: For the best performance on import/export, dynein recommends OnDemand mode. However the target table is Provisioned mode now. Proceed anyway?"; if !Confirm::new().with_prompt(msg).interact()? { app::bye(0, "Operation has been cancelled."); @@ -294,7 +294,7 @@ pub async fn import( let format_str: Option<&str> = format.as_deref(); let ts: app::TableSchema = app::table_schema(&cx).await; - if ts.mode == control::Mode::Provisioned { + if ts.mode == util::Mode::Provisioned { let msg = "WARN: For the best performance on import/export, dynein recommends OnDemand mode. However the target table is Provisioned mode now. Proceed anyway?"; if !Confirm::new().with_prompt(msg).interact()? { println!("Operation has been cancelled."); diff --git a/src/util.rs b/src/util.rs new file mode 100644 index 0000000..76deffe --- /dev/null +++ b/src/util.rs @@ -0,0 +1,273 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use ::serde::{Deserialize, Serialize}; +use chrono::DateTime; +use log::error; +use rusoto_dynamodb::{ + AttributeDefinition, BillingModeSummary, GlobalSecondaryIndexDescription, KeySchemaElement, + LocalSecondaryIndexDescription, ProvisionedThroughputDescription, StreamSpecification, + TableDescription, +}; +use rusoto_signature::Region; + +use super::key; + +/* ================================================= +struct / enum / const +================================================= */ + +// TableDescription doesn't implement Serialize +// https://docs.rs/rusoto_dynamodb/0.42.0/rusoto_dynamodb/struct.TableDescription.html +#[derive(Serialize, Deserialize, Debug)] +struct PrintDescribeTable { + name: String, + region: String, + status: String, + schema: PrintPrimaryKeys, + + mode: Mode, + capacity: Option, + + gsi: Option>, + lsi: Option>, + + stream: Option, + + count: i64, + size_bytes: i64, + created_at: String, +} + +pub const PROVISIONED_API_SPEC: &str = "PROVISIONED"; +pub const ONDEMAND_API_SPEC: &str = "PAY_PER_REQUEST"; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +pub enum Mode { + Provisioned, + OnDemand, +} + +#[derive(Serialize, Deserialize, Debug)] +struct PrintPrimaryKeys { + pk: String, + sk: Option, +} + +#[derive(Serialize, Deserialize, Debug)] +struct PrintCapacityUnits { + wcu: i64, + rcu: i64, +} + +#[derive(Serialize, Deserialize, Debug)] +struct PrintSecondaryIndex { + name: String, + schema: PrintPrimaryKeys, + capacity: Option, +} + +/// Receives region (just to show in one line for reference) and TableDescription, +/// print them in readable YAML format. NOTE: '~' representes 'null' or 'no value' in YAML syntax. +pub fn print_table_description(region: Region, desc: TableDescription) { + let attr_defs = desc.clone().attribute_definitions.unwrap(); + let mode = extract_mode(&desc.billing_mode_summary); + + let print_table: PrintDescribeTable = PrintDescribeTable { + name: String::from(&desc.clone().table_name.unwrap()), + region: String::from(region.name()), + status: String::from(&desc.clone().table_status.unwrap()), + schema: PrintPrimaryKeys { + pk: key::typed_key("HASH", &desc) + .expect("pk should exist") + .display(), + sk: key::typed_key("RANGE", &desc).map(|k| k.display()), + }, + + mode: mode.clone(), + capacity: extract_capacity(&mode, &desc.provisioned_throughput), + + gsi: extract_secondary_indexes(&mode, &attr_defs, desc.global_secondary_indexes), + lsi: extract_secondary_indexes(&mode, &attr_defs, desc.local_secondary_indexes), + stream: extract_stream(desc.latest_stream_arn, desc.stream_specification), + + size_bytes: desc.table_size_bytes.unwrap(), + count: desc.item_count.unwrap(), + created_at: epoch_to_rfc3339(desc.creation_date_time.unwrap()), + }; + println!("{}", serde_yaml::to_string(&print_table).unwrap()); +} + +/// Using Vec of String which is passed via command line, +/// generate KeySchemaElement(s) & AttributeDefinition(s), that are essential information to create DynamoDB tables or GSIs. +pub fn generate_essential_key_definitions( + given_keys: &[String], +) -> (Vec, Vec) { + let mut key_schema: Vec = vec![]; + let mut attribute_definitions: Vec = vec![]; + for (key_id, key_str) in given_keys.iter().enumerate() { + let key_and_type = key_str.split(',').collect::>(); + if key_and_type.len() >= 3 { + error!( + "Invalid format for --keys option: '{}'. Valid format is '--keys myPk,S mySk,N'", + &key_str + ); + std::process::exit(1); + } + + // assumes first given key is Partition key, and second given key is Sort key (if any). + key_schema.push(KeySchemaElement { + attribute_name: String::from(key_and_type[0]), + key_type: if key_id == 0 { + String::from("HASH") + } else { + String::from("RANGE") + }, + }); + + // If data type of key is omitted, dynein assumes it as String (S). + attribute_definitions.push(AttributeDefinition { + attribute_name: String::from(key_and_type[0]), + attribute_type: if key_and_type.len() == 2 { + key_and_type[1].to_uppercase() + } else { + String::from("S") + }, + }); + } + (key_schema, attribute_definitions) +} + +/// Map "BilingModeSummary" field in table description returned from DynamoDB API, +/// into convenient mode name ("Provisioned" or "OnDemand") +pub fn extract_mode(bs: &Option) -> Mode { + let provisioned_mode = Mode::Provisioned; + let ondemand_mode = Mode::OnDemand; + match bs { + // if BillingModeSummary field doesn't exist, the table is Provisioned Mode. + None => provisioned_mode, + Some(x) => { + if x.clone().billing_mode.unwrap() == ONDEMAND_API_SPEC { + ondemand_mode + } else { + provisioned_mode + } + } + } +} + +// FYI: https://grammarist.com/usage/indexes-indices/ +fn extract_secondary_indexes( + mode: &Mode, + attr_defs: &[AttributeDefinition], + option_indexes: Option>, +) -> Option> { + if let Some(indexes) = option_indexes { + let mut xs = Vec::::new(); + for idx in &indexes { + let ks = &idx.retrieve_key_schema().as_ref().unwrap(); + let idx = PrintSecondaryIndex { + name: String::from(idx.retrieve_index_name().as_ref().unwrap()), + schema: PrintPrimaryKeys { + pk: key::typed_key_for_schema("HASH", ks, attr_defs) + .expect("pk should exist") + .display(), + sk: key::typed_key_for_schema("RANGE", ks, attr_defs).map(|k| k.display()), + }, + capacity: idx.extract_index_capacity(mode), + }; + xs.push(idx); + } + Some(xs) + } else { + None + } +} + +fn extract_stream(arn: Option, spec: Option) -> Option { + if arn.is_none() { + None + } else { + Some(format!( + "{} ({})", + arn.unwrap(), + spec.unwrap().stream_view_type.unwrap() + )) + } +} + +pub fn epoch_to_rfc3339(epoch: f64) -> String { + let utc_datetime = DateTime::from_timestamp(epoch as i64, 0).unwrap(); + utc_datetime.to_rfc3339() +} + +/// Takes "Mode" enum and return exact string value required by DynamoDB API. +/// i.e. this function returns "PROVISIONED" or "PAY_PER_REQUEST". +pub fn mode_to_billing_mode_api_spec(mode: Mode) -> String { + match mode { + Mode::OnDemand => String::from(ONDEMAND_API_SPEC), + Mode::Provisioned => String::from(PROVISIONED_API_SPEC), + } +} + +fn extract_capacity( + mode: &Mode, + cap_desc: &Option, +) -> Option { + if mode == &Mode::OnDemand { + None + } else { + let desc = cap_desc.as_ref().unwrap(); + Some(PrintCapacityUnits { + wcu: desc.write_capacity_units.unwrap(), + rcu: desc.read_capacity_units.unwrap(), + }) + } +} + +trait IndexDesc { + fn retrieve_index_name(&self) -> &Option; + fn retrieve_key_schema(&self) -> &Option>; + fn extract_index_capacity(&self, m: &Mode) -> Option; +} + +impl IndexDesc for GlobalSecondaryIndexDescription { + fn retrieve_index_name(&self) -> &Option { + &self.index_name + } + fn retrieve_key_schema(&self) -> &Option> { + &self.key_schema + } + fn extract_index_capacity(&self, m: &Mode) -> Option { + if m == &Mode::OnDemand { + None + } else { + extract_capacity(m, &self.provisioned_throughput) + } + } +} + +impl IndexDesc for LocalSecondaryIndexDescription { + fn retrieve_index_name(&self) -> &Option { + &self.index_name + } + fn retrieve_key_schema(&self) -> &Option> { + &self.key_schema + } + fn extract_index_capacity(&self, _: &Mode) -> Option { + None // Unlike GSI, LSI doesn't have it's own capacity. + } +} From 543c935e4fc6bfcb44642d4900848c0929a6b1f7 Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Sat, 18 May 2024 14:22:24 +0900 Subject: [PATCH 10/21] feat: remove rusoto_dynamodb Signed-off-by: Ryota Sakamoto --- Cargo.lock | 62 +------- Cargo.toml | 2 - src/app.rs | 10 +- src/batch.rs | 198 +++++++++++-------------- src/bootstrap.rs | 33 +++-- src/cmd.rs | 4 +- src/control.rs | 225 ++++++++++++++-------------- src/data.rs | 379 ++++++++++++++++++----------------------------- src/key.rs | 6 +- src/parser.rs | 72 +++------ src/transfer.rs | 7 +- src/util.rs | 73 ++++----- 12 files changed, 442 insertions(+), 629 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8181af5..038a5e4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1036,8 +1036,6 @@ dependencies = [ "rand", "regex", "reqwest", - "rusoto_core", - "rusoto_dynamodb", "rusoto_signature", "serde", "serde_json", @@ -1526,19 +1524,6 @@ dependencies = [ "tokio-rustls", ] -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper 0.14.28", - "native-tls", - "tokio", - "tokio-native-tls", -] - [[package]] name = "hyper-tls" version = "0.6.0" @@ -2230,7 +2215,7 @@ dependencies = [ "http-body 1.0.0", "http-body-util", "hyper 1.3.1", - "hyper-tls 0.6.0", + "hyper-tls", "hyper-util", "ipnet", "js-sys", @@ -2271,31 +2256,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "rusoto_core" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1db30db44ea73551326269adcf7a2169428a054f14faf9e1768f2163494f2fa2" -dependencies = [ - "async-trait", - "base64 0.13.1", - "bytes", - "crc32fast", - "futures", - "http 0.2.12", - "hyper 0.14.28", - "hyper-tls 0.5.0", - "lazy_static", - "log", - "rusoto_credential", - "rusoto_signature", - "rustc_version", - "serde", - "serde_json", - "tokio", - "xml-rs", -] - [[package]] name = "rusoto_credential" version = "0.48.0" @@ -2314,20 +2274,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rusoto_dynamodb" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63ad8e126a46122a171587bbee590c5a51f311b65a5e83bb78a1f2adee720762" -dependencies = [ - "async-trait", - "bytes", - "futures", - "rusoto_core", - "serde", - "serde_json", -] - [[package]] name = "rusoto_signature" version = "0.48.0" @@ -3436,12 +3382,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "xml-rs" -version = "0.8.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193" - [[package]] name = "xmlparser" version = "0.13.6" diff --git a/Cargo.toml b/Cargo.toml index dbc76bf..c6d8a2a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,8 +45,6 @@ log = "0.4.21" regex = "1.10.4" reqwest = "0.12.4" rusoto_signature = "0.48.0" -rusoto_core = "0.48.0" -rusoto_dynamodb = "0.48.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" serde_yaml = "0.9.34" diff --git a/src/app.rs b/src/app.rs index 798e5f7..165faea 100644 --- a/src/app.rs +++ b/src/app.rs @@ -16,10 +16,10 @@ use ::serde::{Deserialize, Serialize}; use aws_config::{meta::region::RegionProviderChain, BehaviorVersion, SdkConfig}; +use aws_sdk_dynamodb::types::{AttributeDefinition, TableDescription}; use aws_types::region::Region as SdkRegion; use backon::ExponentialBuilder; use log::{debug, error, info}; -use rusoto_dynamodb::{AttributeDefinition, TableDescription}; use rusoto_signature::Region; use serde_yaml::Error as SerdeYAMLError; use std::convert::{TryFrom, TryInto}; @@ -255,6 +255,11 @@ impl Context { pub async fn effective_sdk_config(&self) -> SdkConfig { let region = self.effective_region(); let region_name = region.name(); + + self.effective_sdk_config_with_region(region_name).await + } + + pub async fn effective_sdk_config_with_region(&self, region_name: &str) -> SdkConfig { let sdk_region = SdkRegion::new(region_name.to_owned()); let provider = RegionProviderChain::first_try(sdk_region); @@ -511,7 +516,7 @@ pub async fn use_table( debug!("describing the table: {}", tbl); let region = cx.effective_region(); let tbl = tbl.clone(); - let desc: TableDescription = control::describe_table_api(®ion, tbl.clone()).await; + let desc: TableDescription = control::describe_table_api(cx, ®ion, tbl.clone()).await; save_using_target(cx, desc)?; println!("Now you're using the table '{}' ({}).", tbl, ®ion.name()); }, @@ -591,6 +596,7 @@ pub async fn table_schema(cx: &Context) -> TableSchema { Some(table_name) => { // TODO: reduce # of DescribeTable API calls. table_schema function is called every time you do something. let desc: TableDescription = control::describe_table_api( + cx, &cx.effective_region(), table_name, /* should be equal to 'cx.effective_table_name()' */ ) diff --git a/src/batch.rs b/src/batch.rs index f8440ff..34a4f8a 100644 --- a/src/batch.rs +++ b/src/batch.rs @@ -15,15 +15,15 @@ */ use crate::parser::DyneinParser; +use aws_sdk_dynamodb::{ + operation::batch_write_item::BatchWriteItemError, + types::{AttributeValue, DeleteRequest, PutRequest, WriteRequest}, + Client as DynamoDbSdkClient, +}; use backon::Retryable; use base64::{engine::general_purpose, Engine as _}; use bytes::Bytes; use log::{debug, error, warn}; -use rusoto_core::RusotoError; -use rusoto_dynamodb::{ - AttributeValue, BatchWriteItemError, BatchWriteItemInput, DeleteRequest, DynamoDb, - DynamoDbClient, PutRequest, WriteRequest, -}; use serde_json::Value as JsonValue; use std::{collections::HashMap, error, fmt, fs, future::Future, io::Error as IOError, pin::Pin}; @@ -39,7 +39,7 @@ struct / enum / const pub enum DyneinBatchError { LoadData(IOError), PraseJSON(serde_json::Error), - BatchWriteError(RusotoError), + BatchWriteError(aws_sdk_dynamodb::error::SdkError), InvalidInput(String), ParseError(crate::parser::ParseError), } @@ -75,8 +75,8 @@ impl From for DyneinBatchError { Self::PraseJSON(e) } } -impl From> for DyneinBatchError { - fn from(e: RusotoError) -> Self { +impl From> for DyneinBatchError { + fn from(e: aws_sdk_dynamodb::error::SdkError) -> Self { Self::BatchWriteError(e) } } @@ -156,10 +156,11 @@ pub fn build_batch_request_items_from_json( */ let item: HashMap = ddbjson_attributes_to_attrvals(raw_item); - write_requests.push(WriteRequest { - put_request: Some(PutRequest { item }), - delete_request: None, - }); + write_requests.push( + WriteRequest::builder() + .put_request(PutRequest::builder().set_item(Some(item)).build().unwrap()) + .build(), + ); } else { error!("[skip] no field named 'Item' under PutRequest"); } @@ -188,10 +189,11 @@ pub fn build_batch_request_items_from_json( */ let key: HashMap = ddbjson_attributes_to_attrvals(raw_key); - write_requests.push(WriteRequest { - put_request: None, - delete_request: Some(DeleteRequest { key }), - }); + write_requests.push( + WriteRequest::builder() + .delete_request(DeleteRequest::builder().set_key(Some(key)).build().unwrap()) + .build(), + ); } else { error!("[skip] no field named 'Key' under DeleteRequest"); } @@ -216,55 +218,65 @@ pub fn build_batch_request_items_from_json( async fn batch_write_item_api( cx: app::Context, request_items: HashMap>, -) -> Result>>, RusotoError> { +) -> Result< + Option>>, + aws_sdk_dynamodb::error::SdkError, +> { debug!( "Calling BatchWriteItem API with request_items: {:?}", &request_items ); - let ddb = DynamoDbClient::new(cx.effective_region()); - let req: BatchWriteItemInput = BatchWriteItemInput { - request_items, - ..Default::default() - }; + let config = cx.effective_sdk_config().await; + let ddb = DynamoDbSdkClient::new(&config); let retry_setting = cx .retry .map(|v| v.batch_write_item.to_owned().unwrap_or(v.default)); let res = match retry_setting { Some(backoff) => { - let f = || async { ddb.clone().batch_write_item(req.clone()).await }; + let f = || async { + ddb.batch_write_item() + .set_request_items(Some(request_items.clone())) + .send() + .await + }; f.retry(&backoff) - .when(|err| match err { - RusotoError::Service(BatchWriteItemError::ProvisionedThroughputExceeded(e)) => { + .when(|err| match err.as_service_error() { + Some(BatchWriteItemError::ProvisionedThroughputExceededException(e)) => { warn!("Retry batch_write_item : {}", e); true } - RusotoError::Service(BatchWriteItemError::InternalServerError(e)) => { + Some(BatchWriteItemError::InternalServerError(e)) => { warn!("Retry batch_write_item : {}", e); true } - RusotoError::Service(BatchWriteItemError::RequestLimitExceeded(e)) => { + Some(BatchWriteItemError::RequestLimitExceeded(e)) => { warn!("Retry batch_write_item : {}", e); true } - RusotoError::HttpDispatch(e) => { - warn!("Retry batch_write_item : {}", e); - true - } - RusotoError::Unknown(response) => { - if response.body_as_str().contains("ThrottlingException") { - warn!("Retry batch_write_item : {}", err); - true - } else { - false - } - } + // aws_sdk_dynamodb::error::SdkError::DispatchFailure(e) => { + // warn!("Retry batch_write_item : {}", &e); + // true + // } + // aws_sdk_dynamodb::error::SdkError::a(response) => { + // if response.body_as_str().contains("ThrottlingException") { + // warn!("Retry batch_write_item : {}", err); + // true + // } else { + // false + // } + // } _ => false, }) .await } - None => ddb.batch_write_item(req).await, + None => { + ddb.batch_write_item() + .set_request_items(Some(request_items)) + .send() + .await + } }; match res { Ok(res) => Ok(res.unprocessed_items), @@ -277,7 +289,8 @@ async fn batch_write_item_api( pub fn batch_write_untill_processed( cx: app::Context, request_items: HashMap>, -) -> Pin>>>> { +) -> Pin>>>> +{ Box::pin(async move { match batch_write_item_api(cx.clone(), request_items).await { Ok(result) => { @@ -326,10 +339,11 @@ pub async fn batch_write_item( for item in items.iter() { let attrs = parser.parse_dynein_format(None, item)?; validate_item_keys(&attrs, &ts)?; - write_requests.push(WriteRequest { - put_request: Some(PutRequest { item: attrs }), - delete_request: None, - }); + write_requests.push( + WriteRequest::builder() + .put_request(PutRequest::builder().set_item(Some(attrs)).build().unwrap()) + .build(), + ); } } @@ -337,10 +351,11 @@ pub async fn batch_write_item( for key in keys.iter() { let attrs = parser.parse_dynein_format(None, key)?; validate_item_keys(&attrs, &ts)?; - write_requests.push(WriteRequest { - put_request: None, - delete_request: Some(DeleteRequest { key: attrs }), - }); + write_requests.push( + WriteRequest::builder() + .delete_request(DeleteRequest::builder().set_key(Some(attrs)).build().unwrap()) + .build(), + ); } } @@ -380,12 +395,6 @@ pub async fn convert_jsonvals_to_request_items( let mut write_requests = Vec::::new(); for item_jsonval in items_jsonval { - // Initialize a WriteRequest, which consists of a put_request for a single item. - let mut write_request = WriteRequest { - delete_request: None, - put_request: None, - }; - // Focusing on an item - iterate over attributes in an item. let mut item = HashMap::::new(); for (attr_name, body) in item_jsonval @@ -400,8 +409,11 @@ pub async fn convert_jsonvals_to_request_items( } // Fill meaningful put_request here, then push it to the write_requests. Then go to the next item. - write_request.put_request = Some(PutRequest { item }); - write_requests.push(write_request); + write_requests.push( + WriteRequest::builder() + .put_request(PutRequest::builder().set_item(Some(item)).build().unwrap()) + .build(), + ); } // A single table name as a key, and insert all (up to 25) write_requests under the single table. @@ -438,12 +450,6 @@ pub async fn csv_matrix_to_request_items( let mut write_requests = Vec::::new(); for cells in matrix { - // Initialize a WriteRequest, which consists of a put_request for a single item. - let mut write_request = WriteRequest { - delete_request: None, - put_request: None, - }; - // Build an item. Note that DynamoDB data type of attributes are left to how serde_json::from_str parse the value in the cell. let mut item = HashMap::::new(); for i in 0..headers.len() { @@ -459,8 +465,11 @@ pub async fn csv_matrix_to_request_items( } // Fill meaningful put_request here, then push it to the write_requests. Then go to the next item. - write_request.put_request = Some(PutRequest { item }); - write_requests.push(write_request); + write_requests.push( + WriteRequest::builder() + .put_request(PutRequest::builder().set_item(Some(item)).build().unwrap()) + .build(), + ); } // A single table name as a key, and insert all (up to 25) write_requests under the single table. @@ -541,47 +550,27 @@ fn ddbjson_val_to_attrval(ddb_jsonval: &JsonValue) -> Option { // following list of if-else statements would be return value of this function. if let Some(x) = ddb_jsonval.get("S") { - Some(AttributeValue { - s: Some(x.as_str().unwrap().to_string()), - ..Default::default() - }) + Some(AttributeValue::S(x.as_str().unwrap().to_string())) } else if let Some(x) = ddb_jsonval.get("N") { - Some(AttributeValue { - n: Some(x.as_str().unwrap().to_string()), - ..Default::default() - }) + Some(AttributeValue::N(x.as_str().unwrap().to_string())) } else if let Some(x) = ddb_jsonval.get("B") { - Some(AttributeValue { - b: Some(json_binary_val_to_bytes(x)), - ..Default::default() - }) + Some(AttributeValue::B(aws_sdk_dynamodb::primitives::Blob::new(json_binary_val_to_bytes(x)))) } else if let Some(x) = ddb_jsonval.get("BOOL") { - Some(AttributeValue { - bool: Some(x.as_bool().unwrap()), - ..Default::default() - }) + Some(AttributeValue::Bool(x.as_bool().unwrap())) } else if let Some(x) = ddb_jsonval.get("SS") { - Some(AttributeValue { - ss: Some(set_logic(x)), - ..Default::default() - }) + Some(AttributeValue::Ss(set_logic(x))) } else if let Some(x) = ddb_jsonval.get("NS") { - Some(AttributeValue { - ns: Some(set_logic(x)), - ..Default::default() - }) + Some(AttributeValue::Ns(set_logic(x))) } else if let Some(x) = ddb_jsonval.get("BS") { let binary_set = x .as_array() .expect("should be valid JSON array") .iter() .map(json_binary_val_to_bytes) - .collect::>(); + .map(aws_sdk_dynamodb::primitives::Blob::new) + .collect::>(); debug!("Binary Set: {:?}", binary_set); - Some(AttributeValue { - bs: Some(binary_set), - ..Default::default() - }) + Some(AttributeValue::Bs(binary_set)) } else if let Some(x) = ddb_jsonval.get("L") { let list_element = x .as_array() @@ -590,21 +579,12 @@ fn ddbjson_val_to_attrval(ddb_jsonval: &JsonValue) -> Option { .map(|el| ddbjson_val_to_attrval(el).expect("failed to digest a list element")) .collect::>(); debug!("List Element: {:?}", list_element); - Some(AttributeValue { - l: Some(list_element), - ..Default::default() - }) + Some(AttributeValue::L(list_element)) } else if let Some(x) = ddb_jsonval.get("M") { let inner_map: HashMap = ddbjson_attributes_to_attrvals(x); - Some(AttributeValue { - m: Some(inner_map), - ..Default::default() - }) + Some(AttributeValue::M(inner_map)) } else if ddb_jsonval.get("NULL").is_some() { - Some(AttributeValue { - null: Some(true), - ..Default::default() - }) + Some(AttributeValue::Null(true)) } else { None } @@ -652,21 +632,21 @@ fn validate_key_type( ) -> Result<(), DyneinBatchError> { match expected_key_type { key::KeyType::S => { - if attrs[key_name].s.is_none() { + if attrs[key_name].as_s().is_err() { return Err(DyneinBatchError::InvalidInput( generate_type_mismatch_error_message(key_name, "String"), )); } } key::KeyType::N => { - if attrs[key_name].n.is_none() { + if attrs[key_name].as_n().is_err() { return Err(DyneinBatchError::InvalidInput( generate_type_mismatch_error_message(key_name, "Number"), )); } } key::KeyType::B => { - if attrs[key_name].b.is_none() { + if attrs[key_name].as_b().is_err() { return Err(DyneinBatchError::InvalidInput( generate_type_mismatch_error_message(key_name, "Binary"), )); diff --git a/src/bootstrap.rs b/src/bootstrap.rs index e0786bf..f9c09d1 100644 --- a/src/bootstrap.rs +++ b/src/bootstrap.rs @@ -21,12 +21,12 @@ use std::{ thread, time, }; +use aws_sdk_dynamodb::{ + operation::{batch_write_item::BatchWriteItemError, create_table::CreateTableError}, + types::{AttributeValue, PutRequest, WriteRequest}, +}; use futures::future::join_all; use log::{debug, error}; -use rusoto_core::RusotoError; -use rusoto_dynamodb::{ - AttributeValue, BatchWriteItemError, CreateTableError, PutRequest, WriteRequest, -}; use rusoto_signature::Region; use brotli::Decompressor; @@ -47,7 +47,7 @@ pub enum DyneinBootstrapError { PraseJSON(serde_json::Error), ReqwestError(reqwest::Error), ZipError(zip::result::ZipError), - BatchError(RusotoError), + BatchError(aws_sdk_dynamodb::error::SdkError), } impl fmt::Display for DyneinBootstrapError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -91,8 +91,8 @@ impl From for DyneinBootstrapError { Self::ZipError(e) } } -impl From> for DyneinBootstrapError { - fn from(e: RusotoError) -> Self { +impl From> for DyneinBootstrapError { + fn from(e: aws_sdk_dynamodb::error::SdkError) -> Self { Self::BatchError(e) } } @@ -199,10 +199,11 @@ see https://github.com/awslabs/dynein#working-with-dynamodb-items for detail ) }) .collect(); - write_requests.push(WriteRequest { - put_request: Some(PutRequest { item: item_attrval }), - delete_request: None, - }); + write_requests.push( + WriteRequest::builder() + .put_request(PutRequest::builder().set_item(Some(item_attrval)).build().unwrap()) + .build(), + ); if write_requests.len() == 25 { break 'batch; }; @@ -320,13 +321,13 @@ async fn prepare_table(cx: &app::Context, table_name: &str, keys: &[&str]) { desc.table_status.unwrap() ); } - Err(e) => match e { - RusotoError::Service(CreateTableError::ResourceInUse(_)) => println!( + Err(e) => match e.into_service_error() { + CreateTableError::ResourceInUseException(_) => println!( "[skip] Table '{}' already exists in {} region, skipping to create new one.", &table_name, &cx.effective_region().name() ), - _ => { + e => { debug!("CreateTable API call got an error -- {:#?}", e); error!("{}", e.to_string()); std::process::exit(1); @@ -342,12 +343,12 @@ async fn wait_table_creation(cx: &app::Context, mut processing_tables: Vec<&str> let create_table_results = join_all( processing_tables .iter() - .map(|t| control::describe_table_api(r, (*t).to_string())), + .map(|t| control::describe_table_api(cx, r, (*t).to_string())), ) .await; let statuses: Vec = create_table_results .iter() - .map(|desc| desc.table_status.to_owned().unwrap()) + .map(|desc| desc.table_status.to_owned().unwrap().to_string()) .collect(); debug!("Current table statues: {:?}", statuses); processing_tables = processing_tables diff --git a/src/cmd.rs b/src/cmd.rs index 495a633..7e3ff9c 100644 --- a/src/cmd.rs +++ b/src/cmd.rs @@ -119,7 +119,7 @@ pub enum Sub { Scan { /// Limit number of items to return. #[clap(short, long, default_value = "100", verbatim_doc_comment)] - limit: i64, + limit: i32, /// Attributes to show, separated by commas, which is mapped to ProjectionExpression (e.g. --attributes name,address,age). /// Note that primary key(s) are always included in results regardless of what you've passed to --attributes. @@ -184,7 +184,7 @@ pub enum Sub { /// Limit the number of items to return. By default, the number of items is determined by DynamoDB. #[clap(short, long, verbatim_doc_comment)] - limit: Option, + limit: Option, /// Attributes to show, separated by commas, which is mapped to ProjectionExpression (e.g. --attributes name,address,age). /// Note that primary key(s) are always included in results regardless of what you've passed to --attributes. diff --git a/src/control.rs b/src/control.rs index 1e7ee84..56aef68 100644 --- a/src/control.rs +++ b/src/control.rs @@ -16,17 +16,16 @@ // This module interact with DynamoDB Control Plane APIs use aws_sdk_dynamodb::{ - types::{BackupStatus as SdkBackupStatus, BackupSummary as SdkBackupSummary}, + types::{ + BackupStatus, BackupSummary, BillingMode, CreateGlobalSecondaryIndexAction, + GlobalSecondaryIndexUpdate, Projection, ProjectionType, ProvisionedThroughput, + TableDescription, + }, Client as DynamoDbSdkClient, }; use aws_sdk_ec2::Client as Ec2SdkClient; use futures::future::join_all; use log::{debug, error}; -use rusoto_dynamodb::{ - BillingModeSummary, CreateGlobalSecondaryIndexAction, CreateTableInput, DescribeTableInput, - DynamoDb, DynamoDbClient, GlobalSecondaryIndexUpdate, Projection, ProvisionedThroughput, - RestoreTableFromBackupInput, TableDescription, UpdateTableInput, -}; use rusoto_signature::Region; use std::{ io::{self, Error as IOError, Write}, @@ -126,6 +125,7 @@ pub async fn describe_table(cx: app::Context, target_table_to_desc: Option TableDescription { - let ddb = DynamoDbClient::new(region.clone()); - let req: DescribeTableInput = DescribeTableInput { table_name }; +pub async fn describe_table_api( + cx: &app::Context, + region: &Region, + table_name: String, +) -> TableDescription { + let config = cx.effective_sdk_config_with_region(region.name()).await; + let ddb = DynamoDbSdkClient::new(&config); - match ddb.describe_table(req).await { + match ddb.describe_table().table_name(table_name).send().await { Err(e) => { debug!("DescribeTable API call got an error -- {:#?}", e); error!("{}", e.to_string()); @@ -198,7 +202,10 @@ pub async fn create_table_api( cx: app::Context, name: String, given_keys: Vec, -) -> Result> { +) -> Result< + TableDescription, + aws_sdk_dynamodb::error::SdkError, +> { debug!( "Trying to create a table '{}' with keys '{:?}'", &name, &given_keys @@ -206,19 +213,20 @@ pub async fn create_table_api( let (key_schema, attribute_definitions) = util::generate_essential_key_definitions(&given_keys); - let ddb = DynamoDbClient::new(cx.effective_region()); - let req: CreateTableInput = CreateTableInput { - table_name: name, - billing_mode: Some(String::from(util::ONDEMAND_API_SPEC)), - key_schema, // Vec - attribute_definitions, // Vec - ..Default::default() - }; + let config = cx.effective_sdk_config().await; + let ddb = DynamoDbSdkClient::new(&config); - ddb.create_table(req).await.map(|res| { - res.table_description - .expect("Table Description returned from API should be valid.") - }) + ddb.create_table() + .table_name(name) + .billing_mode(BillingMode::PayPerRequest) + .set_key_schema(Some(key_schema)) + .set_attribute_definitions(Some(attribute_definitions)) + .send() + .await + .map(|res| { + res.table_description + .expect("Table Description returned from API should be valid.") + }) } pub async fn create_index(cx: app::Context, index_name: String, given_keys: Vec) { @@ -235,29 +243,32 @@ pub async fn create_index(cx: app::Context, index_name: String, given_keys: Vec< let (key_schema, attribute_definitions) = util::generate_essential_key_definitions(&given_keys); - let ddb = DynamoDbClient::new(cx.effective_region()); - let create_gsi_action = CreateGlobalSecondaryIndexAction { - index_name, - key_schema, - projection: Projection { - projection_type: Some(String::from("ALL")), - non_key_attributes: None, - }, - provisioned_throughput: None, // TODO: assign default rcu/wcu if base table is Provisioned mode. currently it works only for OnDemand talbe. - }; - let gsi_update = GlobalSecondaryIndexUpdate { - create: Some(create_gsi_action), - update: None, - delete: None, - }; - let req: UpdateTableInput = UpdateTableInput { - table_name: cx.effective_table_name(), - attribute_definitions: Some(attribute_definitions), // contains minimum necessary/missing attributes to add to define new GSI. - global_secondary_index_updates: Some(vec![gsi_update]), - ..Default::default() - }; + let config = cx.effective_sdk_config().await; + let ddb = DynamoDbSdkClient::new(&config); - match ddb.update_table(req).await { + let create_gsi_action = CreateGlobalSecondaryIndexAction::builder() + .index_name(index_name) + .set_key_schema(Some(key_schema)) + .projection( + Projection::builder() + .projection_type(ProjectionType::All) + .build(), + ) + .set_provisioned_throughput(None) // TODO: assign default rcu/wcu if base table is Provisioned mode. currently it works only for OnDemand talbe. + .build().unwrap(); + + let gsi_update = GlobalSecondaryIndexUpdate::builder() + .create(create_gsi_action) + .build(); + + match ddb + .update_table() + .table_name(cx.effective_table_name()) + .set_attribute_definitions(Some(attribute_definitions)) + .global_secondary_index_updates(gsi_update) + .send() + .await + { Err(e) => { debug!("UpdateTable API call got an error -- {:#?}", e); error!("{}", e.to_string()); @@ -279,7 +290,7 @@ pub async fn update_table( ) { // Retrieve TableDescription of the table to update, current (before update) status. let desc: TableDescription = - describe_table_api(&cx.effective_region(), table_name_to_update.clone()).await; + describe_table_api(&cx, &cx.effective_region(), table_name_to_update.clone()).await; // Map given string into "Mode" enum. Note that in cmd.rs clap already limits acceptable values. let switching_to_mode: Option = match mode_string { @@ -295,7 +306,7 @@ pub async fn update_table( let provisioned_throughput: Option = match &switching_to_mode { // when --mode is not given, no mode switch happens. Check the table's current mode. None => { - match extract_mode(&desc.clone().billing_mode_summary) { + match util::extract_mode(&desc.clone().billing_mode_summary) { // When currently OnDemand mode and you're not going to change the it, set None for CU. util::Mode::OnDemand => { if wcu.is_some() || rcu.is_some() { @@ -305,22 +316,24 @@ pub async fn update_table( } // When currently Provisioned mode and you're not going to change the it, // pass given rcu/wcu, and use current values if missing. Provisioned table should have valid capacity units so unwrap() here. - util::Mode::Provisioned => Some(ProvisionedThroughput { - read_capacity_units: rcu.unwrap_or_else(|| { - desc.clone() - .provisioned_throughput - .unwrap() - .read_capacity_units - .unwrap() - }), - write_capacity_units: wcu.unwrap_or_else(|| { - desc.clone() - .provisioned_throughput - .unwrap() - .write_capacity_units - .unwrap() - }), - }), + util::Mode::Provisioned => Some( + ProvisionedThroughput::builder() + .read_capacity_units(rcu.unwrap_or_else(|| { + desc.clone() + .provisioned_throughput + .unwrap() + .read_capacity_units + .unwrap() + })) + .write_capacity_units(wcu.unwrap_or_else(|| { + desc.clone() + .provisioned_throughput + .unwrap() + .write_capacity_units + .unwrap() + })) + .build().unwrap(), + ), } } // When the user trying to switch mode. @@ -333,10 +346,12 @@ pub async fn update_table( None } // when switching OnDemand->Provisioned mode, set given wcu/rcu, fill with "5" as a default if not given. - util::Mode::Provisioned => Some(ProvisionedThroughput { - read_capacity_units: rcu.unwrap_or(5), - write_capacity_units: wcu.unwrap_or(5), - }), + util::Mode::Provisioned => Some( + ProvisionedThroughput::builder() + .read_capacity_units(rcu.unwrap_or(5)) + .write_capacity_units(wcu.unwrap_or(5)) + .build().unwrap(), + ), }, }; @@ -378,24 +393,25 @@ async fn update_table_api( table_name_to_update: String, switching_to_mode: Option, provisioned_throughput: Option, -) -> Result> { +) -> Result< + TableDescription, + aws_sdk_dynamodb::error::SdkError, +> { debug!("Trying to update the table '{}'.", &table_name_to_update); - let ddb = DynamoDbClient::new(cx.effective_region()); - - let req: UpdateTableInput = UpdateTableInput { - table_name: table_name_to_update, - billing_mode: switching_to_mode.map(util::mode_to_billing_mode_api_spec), - provisioned_throughput, - // NOTE: In this function we set `global_secondary_index_updates` to None. GSI update is handled in different commands (e.g. dy admin create index xxx --keys) - global_secondary_index_updates: None, /* intentional */ - ..Default::default() - }; + let config = cx.effective_sdk_config().await; + let ddb = DynamoDbSdkClient::new(&config); - ddb.update_table(req).await.map(|res| { - res.table_description - .expect("Table Description returned from API should be valid.") - }) + ddb.update_table() + .table_name(table_name_to_update) + .set_billing_mode(switching_to_mode.map(|v| v.into())) + .set_provisioned_throughput(provisioned_throughput) + .send() + .await + .map(|res| { + res.table_description + .expect("Table Description returned from API should be valid.") + }) } pub async fn delete_table(cx: app::Context, name: String, skip_confirmation: bool) { @@ -514,12 +530,10 @@ pub async fn list_backups(cx: app::Context, all_tables: bool) -> Result<(), IOEr /// Currently overwriting properties during rstore is not supported. pub async fn restore(cx: app::Context, backup_name: Option, restore_name: Option) { // let backups = list_backups_api(&cx, false).await; - let available_backups: Vec = list_backups_api(&cx, false) + let available_backups: Vec = list_backups_api(&cx, false) .await .into_iter() - .filter(|b: &SdkBackupSummary| { - b.to_owned().backup_status == Some(SdkBackupStatus::Available) - }) + .filter(|b: &BackupSummary| b.to_owned().backup_status == Some(BackupStatus::Available)) .collect(); // let available_backups: Vec = backups.iter().filter(|b| b.backup_status.to_owned().unwrap() == "AVAILABLE").collect(); if available_backups.is_empty() { @@ -565,15 +579,16 @@ pub async fn restore(cx: app::Context, backup_name: Option, restore_name Some(restore) => restore, }; - let ddb = DynamoDbClient::new(cx.effective_region()); - // https://docs.rs/rusoto_dynamodb/0.44.0/rusoto_dynamodb/struct.RestoreTableFromBackupInput.html - let req: RestoreTableFromBackupInput = RestoreTableFromBackupInput { - backup_arn: backup_arn.clone(), - target_table_name, - ..Default::default() - }; + let config = cx.effective_sdk_config().await; + let ddb = DynamoDbSdkClient::new(&config); - match ddb.restore_table_from_backup(req).await { + match ddb + .restore_table_from_backup() + .backup_arn(backup_arn.clone()) + .target_table_name(target_table_name) + .send() + .await + { Err(e) => { debug!("RestoreTableFromBackup API call got an error -- {:#?}", e); /* e.g. ... Possibly see "BackupInUse" error: @@ -589,24 +604,6 @@ pub async fn restore(cx: app::Context, backup_name: Option, restore_name } } -/// Map "BilingModeSummary" field in table description returned from DynamoDB API, -/// into convenient mode name ("Provisioned" or "OnDemand") -pub fn extract_mode(bs: &Option) -> util::Mode { - let provisioned_mode = util::Mode::Provisioned; - let ondemand_mode = util::Mode::OnDemand; - match bs { - // if BillingModeSummary field doesn't exist, the table is Provisioned Mode. - None => provisioned_mode, - Some(x) => { - if x.clone().billing_mode.unwrap() == util::ONDEMAND_API_SPEC { - ondemand_mode - } else { - provisioned_mode - } - } - } -} - /* ================================================= Private functions ================================================= */ @@ -629,7 +626,7 @@ async fn list_tables_api(cx: app::Context) -> Vec { } /// This function is a private function that simply calls ListBackups API and return results -async fn list_backups_api(cx: &app::Context, all_tables: bool) -> Vec { +async fn list_backups_api(cx: &app::Context, all_tables: bool) -> Vec { let config = cx.effective_sdk_config().await; let ddb = DynamoDbSdkClient::new(&config); @@ -653,7 +650,7 @@ async fn list_backups_api(cx: &app::Context, all_tables: bool) -> Vec, + available_backups: Vec, ) -> String { available_backups .into_iter() diff --git a/src/data.rs b/src/data.rs index 80ee2b6..d3d0b55 100644 --- a/src/data.rs +++ b/src/data.rs @@ -24,11 +24,12 @@ use std::{ }; use crate::parser::{AttributeDefinition, AttributeType, DyneinParser, ParseError}; -use log::{debug, error}; -use rusoto_dynamodb::{ - AttributeValue, DeleteItemInput, DynamoDb, DynamoDbClient, GetItemInput, PutItemInput, - QueryInput, ScanInput, ScanOutput, UpdateItemInput, +use aws_sdk_dynamodb::{ + operation::scan::ScanOutput, + types::{AttributeValue, ReturnValue}, + Client as DynamoDbSdkClient, }; +use log::{debug, error}; use serde_json::Value as JsonValue; use tabwriter::TabWriter; // use bytes::Bytes; @@ -126,7 +127,7 @@ pub async fn scan( consistent_read: bool, attributes: &Option, keys_only: bool, - limit: i64, + limit: i32, ) { let ts: app::TableSchema = app::table_schema(&cx).await; @@ -165,7 +166,7 @@ pub async fn scan_api( consistent_read: bool, attributes: &Option, keys_only: bool, - limit: Option, + limit: Option, esk: Option>, ) -> ScanOutput { debug!("context: {:#?}", &cx); @@ -173,30 +174,31 @@ pub async fn scan_api( let scan_params: GeneratedScanParams = generate_scan_expressions(&ts, attributes, keys_only); - let ddb = DynamoDbClient::new(cx.effective_region()); - let req: ScanInput = ScanInput { - table_name: ts.name.to_string(), - index_name: index, - limit, - projection_expression: scan_params.exp, - expression_attribute_names: scan_params.names, - consistent_read: Some(consistent_read), - exclusive_start_key: esk, - ..Default::default() - }; - - ddb.scan(req).await.unwrap_or_else(|e| { - debug!("Scan API call got an error -- {:?}", e); - error!("{}", e.to_string()); - std::process::exit(1); - }) + let config = cx.effective_sdk_config().await; + let ddb = DynamoDbSdkClient::new(&config); + + ddb.scan() + .table_name(ts.name) + .set_index_name(index) + .set_limit(limit) + .set_projection_expression(scan_params.exp) + .set_expression_attribute_names(scan_params.names) + .consistent_read(consistent_read) + .set_exclusive_start_key(esk) + .send() + .await + .unwrap_or_else(|e| { + debug!("Scan API call got an error -- {:?}", e); + error!("{}", e.to_string()); + std::process::exit(1); + }) } pub struct QueryParams { pub pval: String, pub sort_key_expression: Option, pub index: Option, - pub limit: Option, + pub limit: Option, pub consistent_read: bool, pub descending: bool, pub attributes: Option, @@ -231,21 +233,22 @@ pub async fn query(cx: app::Context, params: QueryParams) { &ts.name, &query_params ); - let ddb = DynamoDbClient::new(cx.effective_region()); - let req: QueryInput = QueryInput { - table_name: ts.name.to_string(), - index_name: params.index, - limit: params.limit, - key_condition_expression: query_params.exp, - expression_attribute_names: query_params.names, - expression_attribute_values: query_params.vals, - consistent_read: Some(params.consistent_read), - scan_index_forward: params.descending.then_some(false), - ..Default::default() - }; + let config = cx.effective_sdk_config().await; + let ddb = DynamoDbSdkClient::new(&config); + + let req = ddb + .query() + .table_name(ts.name.to_string()) + .set_index_name(params.index) + .set_limit(params.limit) + .set_key_condition_expression(query_params.exp) + .set_expression_attribute_names(query_params.names) + .set_expression_attribute_values(query_params.vals) + .consistent_read(params.consistent_read) + .set_scan_index_forward(params.descending.then_some(false)); debug!("Request: {:#?}", req); - match ddb.query(req).await { + match req.send().await { Ok(res) => { match res.items { None => panic!("This message should not be shown"), // as Query returns 'Some([])' if there's no item to return. @@ -288,15 +291,17 @@ pub async fn get_item(cx: app::Context, pval: String, sval: Option, cons &ts.name, &primary_keys ); - let ddb = DynamoDbClient::new(cx.effective_region()); - let req: GetItemInput = GetItemInput { - table_name: ts.name, - key: primary_keys, - consistent_read: Some(consistent_read), - ..Default::default() - }; - - match ddb.get_item(req).await { + let config = cx.effective_sdk_config().await; + let ddb = DynamoDbSdkClient::new(&config); + + match ddb + .get_item() + .table_name(ts.name) + .set_key(Some(primary_keys)) + .consistent_read(consistent_read) + .send() + .await + { Ok(res) => match res.item { None => println!("No item found."), Some(item) => match cx.output.as_deref() { @@ -358,15 +363,16 @@ pub async fn put_item(cx: app::Context, pval: String, sval: Option, item debug!("Calling PutItem API to insert: {:?}", &full_item_image); - let ddb = DynamoDbClient::new(cx.effective_region()); - let req: PutItemInput = PutItemInput { - table_name: ts.name.to_string(), - item: full_item_image, // HashMap, - // return_values: `PutItem does not recognize any values other than NONE or ALL_OLD`. So leave it as default (NONE). - ..Default::default() - }; + let config = cx.effective_sdk_config().await; + let ddb = DynamoDbSdkClient::new(&config); - match ddb.put_item(req).await { + match ddb + .put_item() + .table_name(ts.name.to_string()) + .set_item(Some(full_item_image)) + .send() + .await + { Ok(_) => { println!("Successfully put an item to the table '{}'.", &ts.name); } @@ -389,14 +395,16 @@ pub async fn delete_item(cx: app::Context, pval: String, sval: Option) { &ts.name, &primary_keys ); - let ddb = DynamoDbClient::new(cx.effective_region()); - let req: DeleteItemInput = DeleteItemInput { - table_name: ts.name.to_string(), - key: primary_keys, - ..Default::default() - }; + let config = cx.effective_sdk_config().await; + let ddb = DynamoDbSdkClient::new(&config); - match ddb.delete_item(req).await { + match ddb + .delete_item() + .table_name(ts.name.to_string()) + .set_key(Some(primary_keys)) + .send() + .await + { // NOTE: DynamoDB DeleteItem API is idempotent and returns "OK" even if an item trying to delete doesn't exist. Ok(_) => { println!( @@ -444,18 +452,20 @@ pub async fn update_item( panic!("Neither --set nor --remove is not specified, but this should not be catched here."); }; - let ddb = DynamoDbClient::new(cx.effective_region()); - let req: UpdateItemInput = UpdateItemInput { - table_name: ts.name.to_string(), - key: primary_keys, - update_expression: update_params.exp, - expression_attribute_names: update_params.names, - expression_attribute_values: update_params.vals, - return_values: Some(String::from("ALL_NEW")), // ask DynamoDB to return updated item. - ..Default::default() - }; - - match ddb.update_item(req).await { + let config = cx.effective_sdk_config().await; + let ddb = DynamoDbSdkClient::new(&config); + + match ddb + .update_item() + .table_name(ts.name.to_string()) + .set_key(Some(primary_keys)) + .set_update_expression(update_params.exp) + .set_expression_attribute_names(update_params.names) + .set_expression_attribute_values(update_params.vals) + .return_values(ReturnValue::AllNew) // ask DynamoDB to return updated item. + .send() + .await + { Ok(res) => { println!("Successfully updated an item in the table '{}'.", &ts.name); println!( @@ -613,17 +623,12 @@ fn build_attrval_scalar(_ktype: &str, _kval: &str) -> AttributeValue { _ktype, _kval ); - let mut attrval: AttributeValue = AttributeValue { - ..Default::default() - }; match _ktype { - "S" => attrval.s = Some(String::from(_kval)), - "N" => attrval.n = Some(String::from(_kval)), // NOTE: pass string, not number + "S" => AttributeValue::S(String::from(_kval)), + "N" => AttributeValue::N(String::from(_kval)), // NOTE: pass string, not number // "B" => { attrval.b = Some(Bytes::from(_kval.clone().as_str())) }, _ => panic!("ERROR: Unknown DynamoDB Data Type: {}", _ktype), } - - attrval } // for SS and NS DynamoDB Attributes. @@ -635,47 +640,32 @@ fn build_attrval_set(ktype: &str, kval: &[JsonValue]) -> AttributeValue { ktype, kval ); - let mut attrval: AttributeValue = AttributeValue { - ..Default::default() - }; match ktype { - "SS" => { - attrval.ss = Some( - kval.iter() - .map(|x| x.as_str().unwrap().to_string()) - .collect(), - ) - } - "NS" => { - attrval.ns = Some( - kval.iter() - .map(|x| x.as_i64().unwrap().to_string()) - .collect(), - ) - } + "SS" => AttributeValue::Ss( + kval.iter() + .map(|x| x.as_str().unwrap().to_string()) + .collect(), + ), + "NS" => AttributeValue::Ns( + kval.iter() + .map(|x| x.as_i64().unwrap().to_string()) + .collect(), + ), // NOTE: Currently BS is not supported. // "BS": Vec (serialize_with = "::rusoto_core::serialization::SerdeBlobList::serialize_blob_list") _ => panic!("ERROR: Unknown DynamoDB Data Type: {}", ktype), } - - attrval } /// for "L" DynamoDB Attributes /// used only for 'simplified JSON' format. Not compatible with DynamoDB JSON. fn build_attrval_list(vec: &[JsonValue], enable_set_inference: bool) -> AttributeValue { - let mut attrval: AttributeValue = AttributeValue { - ..Default::default() - }; - let mut inside_attrvals = Vec::::new(); for v in vec { debug!("this is an element of vec: {:?}", v); inside_attrvals.push(dispatch_jsonvalue_to_attrval(v, enable_set_inference)); } - attrval.l = Some(inside_attrvals); - - attrval + AttributeValue::L(inside_attrvals) } /// for "M" DynamoDB Attributes @@ -684,10 +674,6 @@ fn build_attrval_map( json_map: &serde_json::Map, enable_set_inference: bool, ) -> AttributeValue { - let mut result = AttributeValue { - ..Default::default() - }; - let mut mapval = HashMap::::new(); for (k, v) in json_map { debug!("working on key '{}', and value '{:?}'", k, v); @@ -696,31 +682,17 @@ fn build_attrval_map( dispatch_jsonvalue_to_attrval(v, enable_set_inference), ); } - result.m = Some(mapval); - - result + AttributeValue::M(mapval) } /// Convert from serde_json::Value (standard JSON values) into DynamoDB style AttributeValue pub fn dispatch_jsonvalue_to_attrval(jv: &JsonValue, enable_set_inference: bool) -> AttributeValue { match jv { // scalar types - JsonValue::String(val) => AttributeValue { - s: Some(val.to_string()), - ..Default::default() - }, - JsonValue::Number(val) => AttributeValue { - n: Some(val.to_string()), - ..Default::default() - }, - JsonValue::Bool(val) => AttributeValue { - bool: Some(*val), - ..Default::default() - }, - JsonValue::Null => AttributeValue { - null: Some(true), - ..Default::default() - }, + JsonValue::String(val) => AttributeValue::S(val.to_string()), + JsonValue::Number(val) => AttributeValue::N(val.to_string()), + JsonValue::Bool(val) => AttributeValue::Bool(*val), + JsonValue::Null => AttributeValue::Null(true), // document types. they can be recursive. JsonValue::Object(obj) => build_attrval_map(obj, enable_set_inference), @@ -747,7 +719,7 @@ pub fn dispatch_jsonvalue_to_attrval(jv: &JsonValue, enable_set_inference: bool) /// `strip_items` calls `strip_item` for each item. fn strip_items( - items: &[HashMap], + items: &[HashMap], ) -> Vec> { items.iter().map(strip_item).collect() } @@ -774,13 +746,12 @@ fn strip_items( /// by utilizing Serialize derive of the struct: /// https://docs.rs/rusoto_dynamodb/0.42.0/src/rusoto_dynamodb/generated.rs.html#38 /// https://docs.rs/rusoto_dynamodb/0.42.0/rusoto_dynamodb/struct.AttributeValue.html -fn strip_item( - item: &HashMap, -) -> HashMap { +fn strip_item(item: &HashMap) -> HashMap { item.iter() .map(|attr| // Serialization: `serde_json::to_value(sth: rusoto_dynamodb::AttributeValue)` - (attr.0.to_string(), serde_json::to_value(attr.1).unwrap())) + // TODO: fix attr.1 + (attr.0.to_string(), serde_json::to_value("attr.1").unwrap())) .collect() } @@ -1029,55 +1000,38 @@ fn attrval_to_cell_print(optional_attrval: Option) -> String { match optional_attrval { None => String::from(""), Some(attrval) => { - if let Some(v) = &attrval.s { - String::from(v) - } else if let Some(v) = &attrval.n { - String::from(v) - } else if let Some(v) = &attrval.bool { - v.to_string() - } else if let Some(vs) = &attrval.ss { - serde_json::to_string(&vs).unwrap() - } else if let Some(vs) = &attrval.ns { - serde_json::to_string( - &vs.iter() + match attrval { + AttributeValue::S(v) => v, + AttributeValue::N(v) => v, + AttributeValue::Bool(v) => v.to_string(), + AttributeValue::Ss(v) => serde_json::to_string(&v).unwrap(), + AttributeValue::Ns(v) => serde_json::to_string( + &v.iter() .map(|v| str_to_json_num(v)) .collect::>(), ) - .unwrap() - } else if attrval.null.is_some() { - String::from("null") - } else { - String::from("(snip)") - } // B, BS, L, and M are not shown. + .unwrap(), + AttributeValue::Null(_) => String::from("null"), + _ => String::from("(snip)"), // B, BS, L, and M are not shown. + } } } } /// https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html pub fn attrval_to_type(attrval: &AttributeValue) -> Option { - // following list of if-else statements would be return value of this function. - if attrval.s.is_some() { - Some(String::from("String")) - } else if attrval.n.is_some() { - Some(String::from("Number")) - } else if attrval.b.is_some() { - Some(String::from("Binary")) - } else if attrval.bool.is_some() { - Some(String::from("Boolian")) - } else if attrval.null.is_some() { - Some(String::from("Null")) - } else if attrval.ss.is_some() { - Some(String::from("Set (String)")) - } else if attrval.ns.is_some() { - Some(String::from("Set (Number)")) - } else if attrval.bs.is_some() { - Some(String::from("Set (Binary)")) - } else if attrval.m.is_some() { - Some(String::from("Map")) - } else if attrval.l.is_some() { - Some(String::from("List")) - } else { - None + match attrval { + AttributeValue::S(_) => Some(String::from("String")), + AttributeValue::N(_) => Some(String::from("Number")), + AttributeValue::B(_) => Some(String::from("Binary")), + AttributeValue::Bool(_) => Some(String::from("Boolian")), + AttributeValue::Null(_) => Some(String::from("Null")), + AttributeValue::Ss(_) => Some(String::from("Set (String)")), + AttributeValue::Ns(_) => Some(String::from("Set (Number)")), + AttributeValue::Bs(_) => Some(String::from("Set (Binary)")), + AttributeValue::M(_) => Some(String::from("Map")), + AttributeValue::L(_) => Some(String::from("List")), + _ => None, } } @@ -1169,33 +1123,20 @@ fn str_to_json_num(s: &str) -> JsonValue { fn attrval_to_jsonval(attrval: &AttributeValue) -> JsonValue { let unsupported: &str = "<<>>"; // following list of if-else statements would be return value of this function. - if let Some(v) = &attrval.s { - serde_json::to_value(v).unwrap() - } else if let Some(v) = &attrval.n { - str_to_json_num(v) - } else if let Some(v) = &attrval.bool { - serde_json::to_value(v).unwrap() - } else if let Some(vs) = &attrval.ss { - serde_json::to_value(vs).unwrap() - } else if let Some(vs) = &attrval.ns { - vs.iter().map(|v| str_to_json_num(v)).collect() - } - // In List (L) type, each element is a DynamoDB AttributeValue (e.g. {"S": "xxxx"}). recursively apply this method to elements. - else if let Some(vlst) = &attrval.l { - vlst.iter().map(attrval_to_jsonval).collect() - } else if let Some(vmap) = &attrval.m { - attrval_to_json_map(vmap) - } else if attrval.null.is_some() { - serde_json::to_value(()).unwrap() - } - // Binary (B) and BinarySet (BS) attributes are not supported to display in JSON output format. - else if attrval.b.is_some() || attrval.bs.is_some() { - serde_json::to_value(unsupported).unwrap() - } else { - panic!( + match attrval { + AttributeValue::S(v) => serde_json::to_value(v).unwrap(), + AttributeValue::N(v) => str_to_json_num(v), + AttributeValue::Bool(v) => serde_json::to_value(v).unwrap(), + AttributeValue::Null(_) => serde_json::to_value(()).unwrap(), + AttributeValue::Ss(v) => serde_json::to_value(v).unwrap(), + AttributeValue::Ns(v) => v.iter().map(|v| str_to_json_num(v)).collect(), + AttributeValue::B(_) | AttributeValue::Bs(_) => serde_json::to_value(unsupported).unwrap(), + AttributeValue::M(v) => attrval_to_json_map(v), + AttributeValue::L(v) => v.iter().map(attrval_to_jsonval).collect(), + _ => panic!( "DynamoDB AttributeValue is not in valid status: {:#?}", &attrval - ); + ), } } @@ -1287,10 +1228,7 @@ mod tests { actual.vals, Some(HashMap::from([( ":DYNEIN_ATTRVAL0".to_owned(), - AttributeValue { - n: Some("123".to_owned()), - ..Default::default() - }, + AttributeValue::N("123".to_owned()), )])) ); } @@ -1318,17 +1256,11 @@ mod tests { Some(HashMap::from([ ( ":DYNEIN_ATTRVAL0".to_owned(), - AttributeValue { - n: Some("0".to_owned()), - ..Default::default() - }, + AttributeValue::N("0".to_owned()), ), ( ":DYNEIN_ATTRVAL1".to_owned(), - AttributeValue { - s: Some("OPEN".to_owned()), - ..Default::default() - }, + AttributeValue::S("OPEN".to_owned()), ), ])), ); @@ -1352,10 +1284,7 @@ mod tests { actual.vals, Some(HashMap::from([( ":DYNEIN_ATTRVAL0".to_owned(), - AttributeValue { - s: Some("Math".to_owned()), - ..Default::default() - }, + AttributeValue::S("Math".to_owned()), )])), ); } @@ -1378,10 +1307,7 @@ mod tests { actual.vals, Some(HashMap::from([( ":DYNEIN_ATTRVAL0".to_owned(), - AttributeValue { - n: Some("1".to_owned()), - ..Default::default() - }, + AttributeValue::N("1".to_owned()), )])), ); } @@ -1404,10 +1330,7 @@ mod tests { actual.vals, Some(HashMap::from([( ":DYNEIN_ATTRVAL0".to_owned(), - AttributeValue { - n: Some("1".to_owned()), - ..Default::default() - }, + AttributeValue::N("1".to_owned()), )])), ); } @@ -1466,17 +1389,11 @@ mod tests { Some(HashMap::from([ ( ":DYNEIN_ATTRVAL0".to_owned(), - AttributeValue { - n: Some("0".to_owned()), - ..Default::default() - } + AttributeValue::S("0".to_owned()) ), ( ":DYNEIN_ATTRVAL1".to_owned(), - AttributeValue { - s: Some("2020-02-24T22:22:22Z".to_owned()), - ..Default::default() - } + AttributeValue::S("2020-02-24T22:22:22Z".to_owned()) ), ])) ); diff --git a/src/key.rs b/src/key.rs index 8ebccaa..96a9350 100644 --- a/src/key.rs +++ b/src/key.rs @@ -15,7 +15,7 @@ */ use ::serde::{Deserialize, Serialize}; -use rusoto_dynamodb::{AttributeDefinition, KeySchemaElement, TableDescription}; +use aws_sdk_dynamodb::types::{AttributeDefinition, KeySchemaElement, TableDescription}; use std::str::FromStr; #[derive(Serialize, Deserialize, Debug, Clone)] @@ -112,7 +112,7 @@ pub fn typed_key_for_schema( attrs: &[AttributeDefinition], ) -> Option { // Fetch Partition Key ("HASH") or Sort Key ("RANGE") from given Key Schema. pk should always exists, but sk may not. - let target_key = ks.iter().find(|x| x.key_type == pk_or_sk); + let target_key = ks.iter().find(|x| x.key_type == pk_or_sk.into()); target_key.map(|key| Key { name: key.clone().attribute_name, // kind should be one of S/N/B, Which can be retrieved from AttributeDefinition's attribute_type. @@ -121,7 +121,7 @@ pub fn typed_key_for_schema( .iter() .find(|at| at.attribute_name == key.attribute_name) .expect("primary key should be in AttributeDefinition.") - .attribute_type, + .attribute_type.as_str(), ) .unwrap(), }) diff --git a/src/parser.rs b/src/parser.rs index 8bc21f9..e4f21f5 100644 --- a/src/parser.rs +++ b/src/parser.rs @@ -15,12 +15,12 @@ */ use crate::pest::Parser; +use aws_sdk_dynamodb::types::AttributeValue; use base64::engine::{general_purpose, DecodePaddingMode, GeneralPurpose, GeneralPurposeConfig}; use base64::{DecodeError, Engine}; use bytes::Bytes; use itertools::Itertools; use pest::iterators::Pair; -use rusoto_dynamodb::AttributeValue; use std::collections::hash_map::Entry; use std::collections::HashMap; use std::fmt::{Debug, Display, Formatter, Write}; @@ -452,54 +452,24 @@ impl AttrVal { fn convert_attribute_value(self) -> AttributeValue { match self { - AttrVal::N(number) => AttributeValue { - n: Some(number), - ..Default::default() - }, - AttrVal::S(str) => AttributeValue { - s: Some(str), - ..Default::default() - }, - AttrVal::Bool(boolean) => AttributeValue { - bool: Some(boolean), - ..Default::default() - }, - AttrVal::Null(isnull) => AttributeValue { - null: Some(isnull), - ..Default::default() - }, - AttrVal::B(binary) => AttributeValue { - b: Some(binary), - ..Default::default() - }, - AttrVal::L(list) => AttributeValue { - l: Some( - list.into_iter() - .map(|x| x.convert_attribute_value()) - .collect(), - ), - ..Default::default() - }, - AttrVal::M(map) => AttributeValue { - m: Some( - map.into_iter() - .map(|(key, val)| (key, val.convert_attribute_value())) - .collect(), - ), - ..Default::default() - }, - AttrVal::NS(list) => AttributeValue { - ns: Some(list), - ..Default::default() - }, - AttrVal::SS(list) => AttributeValue { - ss: Some(list), - ..Default::default() - }, - AttrVal::BS(list) => AttributeValue { - bs: Some(list), - ..Default::default() - }, + AttrVal::N(number) => AttributeValue::N(number), + AttrVal::S(str) => AttributeValue::S(str), + AttrVal::Bool(boolean) => AttributeValue::Bool(boolean), + AttrVal::Null(isnull) => AttributeValue::Null(isnull), + AttrVal::B(binary) => AttributeValue::B(aws_sdk_dynamodb::primitives::Blob::new(binary)), + AttrVal::L(list) => AttributeValue::L( + list.into_iter() + .map(|x| x.convert_attribute_value()) + .collect(), + ), + AttrVal::M(map) => AttributeValue::M( + map.into_iter() + .map(|(key, val)| (key, val.convert_attribute_value())) + .collect(), + ), + AttrVal::NS(list) => AttributeValue::Ns(list), + AttrVal::SS(list) => AttributeValue::Ss(list), + AttrVal::BS(list) => AttributeValue::Bs(list.into_iter().map(aws_sdk_dynamodb::primitives::Blob::new).collect()), } } } @@ -1474,13 +1444,13 @@ impl DyneinParser { let result = GeneratedParser::parse(Rule::map_literal, exp); match result { Ok(mut pair) => { - let item = parse_literal(pair.next().unwrap())?.convert_attribute_value(); + let item = parse_literal(pair.next().unwrap())?.convert_attribute_value().as_m().unwrap().to_owned(); // content must be map literal let mut image = match initial_item { Some(init_item) => init_item, None => HashMap::new(), }; - image.extend(item.m.unwrap()); + image.extend(item); Ok(image) } Err(err) => Err(ParseError::ParsingError(Box::new(err))), diff --git a/src/transfer.rs b/src/transfer.rs index 3ea64fd..0c71aa9 100644 --- a/src/transfer.rs +++ b/src/transfer.rs @@ -28,7 +28,10 @@ use dialoguer::Confirm; use log::{debug, error}; use serde_json::{de::StrRead, Deserializer, StreamDeserializer, Value as JsonValue}; -use rusoto_dynamodb::{AttributeValue, ScanOutput, WriteRequest}; +use aws_sdk_dynamodb::{ + operation::scan::ScanOutput, + types::{AttributeValue, WriteRequest}, +}; use thiserror::Error; use super::app; @@ -201,7 +204,7 @@ pub async fn export( .open(tmp_output_filename)?; tmp_output_file.set_len(0)?; - let mut last_evaluated_key: Option> = None; + let mut last_evaluated_key: Option> = None; let mut progress_status = ProgressState::new(MAX_NUMBER_OF_OBSERVES); loop { // Invoke Scan API here. At the 1st iteration exclusive_start_key would be "None" as defined above, outside of the loop. diff --git a/src/util.rs b/src/util.rs index 76deffe..fb113c9 100644 --- a/src/util.rs +++ b/src/util.rs @@ -15,13 +15,13 @@ */ use ::serde::{Deserialize, Serialize}; +use aws_sdk_dynamodb::types::{ + AttributeDefinition, BillingMode, BillingModeSummary, GlobalSecondaryIndexDescription, + KeySchemaElement, KeyType, LocalSecondaryIndexDescription, ProvisionedThroughputDescription, + ScalarAttributeType, StreamSpecification, TableDescription, +}; use chrono::DateTime; use log::error; -use rusoto_dynamodb::{ - AttributeDefinition, BillingModeSummary, GlobalSecondaryIndexDescription, KeySchemaElement, - LocalSecondaryIndexDescription, ProvisionedThroughputDescription, StreamSpecification, - TableDescription, -}; use rusoto_signature::Region; use super::key; @@ -52,15 +52,21 @@ struct PrintDescribeTable { created_at: String, } -pub const PROVISIONED_API_SPEC: &str = "PROVISIONED"; -pub const ONDEMAND_API_SPEC: &str = "PAY_PER_REQUEST"; - #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] pub enum Mode { Provisioned, OnDemand, } +impl Into for Mode { + fn into(self) -> BillingMode { + match self { + Mode::Provisioned => BillingMode::Provisioned, + Mode::OnDemand => BillingMode::PayPerRequest, + } + } +} + #[derive(Serialize, Deserialize, Debug)] struct PrintPrimaryKeys { pk: String, @@ -89,7 +95,7 @@ pub fn print_table_description(region: Region, desc: TableDescription) { let print_table: PrintDescribeTable = PrintDescribeTable { name: String::from(&desc.clone().table_name.unwrap()), region: String::from(region.name()), - status: String::from(&desc.clone().table_status.unwrap()), + status: String::from(desc.clone().table_status.unwrap().as_str()), schema: PrintPrimaryKeys { pk: key::typed_key("HASH", &desc) .expect("pk should exist") @@ -106,7 +112,7 @@ pub fn print_table_description(region: Region, desc: TableDescription) { size_bytes: desc.table_size_bytes.unwrap(), count: desc.item_count.unwrap(), - created_at: epoch_to_rfc3339(desc.creation_date_time.unwrap()), + created_at: epoch_to_rfc3339(desc.creation_date_time.unwrap().as_secs_f64()), }; println!("{}", serde_yaml::to_string(&print_table).unwrap()); } @@ -129,24 +135,28 @@ pub fn generate_essential_key_definitions( } // assumes first given key is Partition key, and second given key is Sort key (if any). - key_schema.push(KeySchemaElement { - attribute_name: String::from(key_and_type[0]), - key_type: if key_id == 0 { - String::from("HASH") - } else { - String::from("RANGE") - }, - }); + key_schema.push( + KeySchemaElement::builder() + .attribute_name(String::from(key_and_type[0])) + .key_type(if key_id == 0 { + KeyType::Hash + } else { + KeyType::Range + }) + .build().unwrap(), + ); // If data type of key is omitted, dynein assumes it as String (S). - attribute_definitions.push(AttributeDefinition { - attribute_name: String::from(key_and_type[0]), - attribute_type: if key_and_type.len() == 2 { - key_and_type[1].to_uppercase() - } else { - String::from("S") - }, - }); + attribute_definitions.push( + AttributeDefinition::builder() + .attribute_name(String::from(key_and_type[0])) + .attribute_type(if key_and_type.len() == 2 { + ScalarAttributeType::from(key_and_type[1].to_uppercase().as_ref()) + } else { + ScalarAttributeType::S + }) + .build().unwrap(), + ) } (key_schema, attribute_definitions) } @@ -160,7 +170,7 @@ pub fn extract_mode(bs: &Option) -> Mode { // if BillingModeSummary field doesn't exist, the table is Provisioned Mode. None => provisioned_mode, Some(x) => { - if x.clone().billing_mode.unwrap() == ONDEMAND_API_SPEC { + if x.clone().billing_mode.unwrap() == BillingMode::PayPerRequest { ondemand_mode } else { provisioned_mode @@ -214,15 +224,6 @@ pub fn epoch_to_rfc3339(epoch: f64) -> String { utc_datetime.to_rfc3339() } -/// Takes "Mode" enum and return exact string value required by DynamoDB API. -/// i.e. this function returns "PROVISIONED" or "PAY_PER_REQUEST". -pub fn mode_to_billing_mode_api_spec(mode: Mode) -> String { - match mode { - Mode::OnDemand => String::from(ONDEMAND_API_SPEC), - Mode::Provisioned => String::from(PROVISIONED_API_SPEC), - } -} - fn extract_capacity( mode: &Mode, cap_desc: &Option, From 1d4ba74931aca696d9ea44b89cb7fbce67cb8973 Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Mon, 20 May 2024 12:08:34 +0900 Subject: [PATCH 11/21] chore: remove rusoto_signature Signed-off-by: Ryota Sakamoto --- Cargo.lock | 194 +++-------------------------------------------- Cargo.toml | 1 - src/app.rs | 43 +++++------ src/bootstrap.rs | 18 ++--- src/control.rs | 24 +++--- src/util.rs | 5 +- 6 files changed, 50 insertions(+), 235 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 038a5e4..a3ce9d8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -139,17 +139,6 @@ dependencies = [ "wait-timeout", ] -[[package]] -name = "async-trait" -version = "0.1.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "autocfg" version = "1.2.0" @@ -349,12 +338,12 @@ dependencies = [ "bytes", "form_urlencoded", "hex", - "hmac 0.12.1", + "hmac", "http 0.2.12", "http 1.1.0", "once_cell", "percent-encoding", - "sha2 0.10.8", + "sha2", "time", "tracing", ] @@ -529,12 +518,6 @@ dependencies = [ "rustc-demangle", ] -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - [[package]] name = "base64" version = "0.21.7" @@ -569,15 +552,6 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array", -] - [[package]] name = "block-buffer" version = "0.10.4" @@ -695,7 +669,6 @@ dependencies = [ "iana-time-zone", "js-sys", "num-traits", - "serde", "wasm-bindgen", "windows-targets 0.52.5", ] @@ -877,16 +850,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "deflate64" version = "0.1.8" @@ -932,22 +895,13 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - [[package]] name = "digest" version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer 0.10.4", + "block-buffer", "crypto-common", "subtle", ] @@ -961,16 +915,6 @@ dependencies = [ "dirs-sys", ] -[[package]] -name = "dirs-next" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" -dependencies = [ - "cfg-if", - "dirs-sys-next", -] - [[package]] name = "dirs-sys" version = "0.4.1" @@ -983,17 +927,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "dirs-sys-next" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" -dependencies = [ - "libc", - "redox_users", - "winapi", -] - [[package]] name = "doc-comment" version = "0.3.3" @@ -1036,7 +969,6 @@ dependencies = [ "rand", "regex", "reqwest", - "rusoto_signature", "serde", "serde_json", "serde_yaml", @@ -1361,23 +1293,13 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -[[package]] -name = "hmac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac", - "digest 0.9.0", -] - [[package]] name = "hmac" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.7", + "digest", ] [[package]] @@ -1715,17 +1637,6 @@ dependencies = [ "crc", ] -[[package]] -name = "md-5" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5a279bb9607f9f53c22d496eade00d138d1bdcccd07d74650387cf94942a15" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug", -] - [[package]] name = "memchr" version = "2.7.2" @@ -1831,12 +1742,6 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" -[[package]] -name = "opaque-debug" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" - [[package]] name = "openssl" version = "0.10.64" @@ -1932,8 +1837,8 @@ version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" dependencies = [ - "digest 0.10.7", - "hmac 0.12.1", + "digest", + "hmac", ] [[package]] @@ -1984,7 +1889,7 @@ checksum = "d7a240022f37c361ec1878d646fc5b7d7c4d28d5946e1a80ad5a7a4f4ca0bdcd" dependencies = [ "once_cell", "pest", - "sha2 0.10.8", + "sha2", ] [[package]] @@ -2256,50 +2161,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "rusoto_credential" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee0a6c13db5aad6047b6a44ef023dbbc21a056b6dab5be3b79ce4283d5c02d05" -dependencies = [ - "async-trait", - "chrono", - "dirs-next", - "futures", - "hyper 0.14.28", - "serde", - "serde_json", - "shlex", - "tokio", - "zeroize", -] - -[[package]] -name = "rusoto_signature" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5ae95491c8b4847931e291b151127eccd6ff8ca13f33603eb3d0035ecb05272" -dependencies = [ - "base64 0.13.1", - "bytes", - "chrono", - "digest 0.9.0", - "futures", - "hex", - "hmac 0.11.0", - "http 0.2.12", - "hyper 0.14.28", - "log", - "md-5", - "percent-encoding", - "pin-project-lite", - "rusoto_credential", - "rustc_version", - "serde", - "sha2 0.9.9", - "tokio", -] - [[package]] name = "rustc-demangle" version = "0.1.23" @@ -2529,20 +2390,7 @@ checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.7", -] - -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", + "digest", ] [[package]] @@ -2553,7 +2401,7 @@ checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.7", + "digest", ] [[package]] @@ -3184,22 +3032,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - [[package]] name = "winapi-util" version = "0.1.8" @@ -3209,12 +3041,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - [[package]] name = "windows-core" version = "0.52.0" @@ -3409,7 +3235,7 @@ dependencies = [ "crossbeam-utils", "deflate64", "flate2", - "hmac 0.12.1", + "hmac", "lzma-rs", "pbkdf2", "sha1", diff --git a/Cargo.toml b/Cargo.toml index c6d8a2a..e55fa2b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,7 +44,6 @@ futures = "0.3.30" log = "0.4.21" regex = "1.10.4" reqwest = "0.12.4" -rusoto_signature = "0.48.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" serde_yaml = "0.9.34" diff --git a/src/app.rs b/src/app.rs index 165faea..eb2a4c4 100644 --- a/src/app.rs +++ b/src/app.rs @@ -17,10 +17,9 @@ use ::serde::{Deserialize, Serialize}; use aws_config::{meta::region::RegionProviderChain, BehaviorVersion, SdkConfig}; use aws_sdk_dynamodb::types::{AttributeDefinition, TableDescription}; -use aws_types::region::Region as SdkRegion; +use aws_types::region::Region; use backon::ExponentialBuilder; use log::{debug, error, info}; -use rusoto_signature::Region; use serde_yaml::Error as SerdeYAMLError; use std::convert::{TryFrom, TryInto}; use std::time::Duration; @@ -31,7 +30,6 @@ use std::{ fs, io::Error as IOError, path, - str::FromStr, }; use tempfile::NamedTempFile; use thiserror::Error; @@ -254,13 +252,13 @@ impl Context { pub async fn effective_sdk_config(&self) -> SdkConfig { let region = self.effective_region(); - let region_name = region.name(); + let region_name = region.as_ref(); self.effective_sdk_config_with_region(region_name).await } pub async fn effective_sdk_config_with_region(&self, region_name: &str) -> SdkConfig { - let sdk_region = SdkRegion::new(region_name.to_owned()); + let sdk_region = Region::new(region_name.to_owned()); let provider = RegionProviderChain::first_try(sdk_region); aws_config::defaults(BehaviorVersion::v2024_03_28()) @@ -290,7 +288,8 @@ impl Context { // e.g. region set via AWS CLI (check: $ aws configure get region), or environment variable `AWS_DEFAULT_REGION`. // ref: https://docs.rs/rusoto_signature/0.42.0/src/rusoto_signature/region.rs.html#282-290 // ref: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html - Region::default() + // TODO: fix + Region::from_static("us-east-1") } pub fn effective_table_name(&self) -> String { @@ -324,7 +323,7 @@ impl Context { pub fn effective_cache_key(&self) -> String { format!( "{}/{}", - &self.effective_region().name(), + &self.effective_region().as_ref(), &self.effective_table_name() ) } @@ -350,7 +349,7 @@ impl Context { } pub fn with_region(mut self, ec2_region: &str) -> Self { - self.overwritten_region = Some(Region::from_str(ec2_region).unwrap()); + self.overwritten_region = Some(Region::new(ec2_region.to_owned())); self } @@ -366,7 +365,7 @@ impl Context { pub fn is_local(&self) -> bool { let region = self.effective_region(); - region.name() == LOCAL_REGION + region.as_ref() == LOCAL_REGION } } @@ -431,7 +430,7 @@ pub fn region_from_str(s: Option, p: Option) -> Option { let port = p.unwrap_or(8000); match s.as_deref() { Some(LOCAL_REGION) => Some(region_dynamodb_local(port)), - Some(x) => Region::from_str(x).ok(), // convert Result into Option + Some(x) => Some(Region::new(x.to_owned())), // convert Result into Option None => None, } } @@ -514,11 +513,10 @@ pub async fn use_table( match target_table { Some(tbl) => { debug!("describing the table: {}", tbl); - let region = cx.effective_region(); let tbl = tbl.clone(); - let desc: TableDescription = control::describe_table_api(cx, ®ion, tbl.clone()).await; + let desc: TableDescription = control::describe_table_api(cx, tbl.clone()).await; save_using_target(cx, desc)?; - println!("Now you're using the table '{}' ({}).", tbl, ®ion.name()); + println!("Now you're using the table '{}' ({}).", tbl, &cx.effective_region().as_ref()); }, None => bye(1, "You have to specify a table. How to use (1). 'dy use --table mytable', or (2) 'dy use mytable'."), }; @@ -538,14 +536,14 @@ pub fn insert_to_table_cache( let region: Region = cx.effective_region(); debug!( "Under the region '{}', trying to save table schema of '{}'", - ®ion.name(), + ®ion.as_ref(), &table_name ); // retrieve current cache from Context and update target table desc. // key to save the table desc is "/" -- e.g. "us-west-2/app_data" let mut cache: Cache = cx.clone().cache.expect("cx should have cache"); - let cache_key = format!("{}/{}", region.name(), table_name); + let cache_key = format!("{}/{}", region.as_ref(), table_name); let mut table_schema_hashmap: HashMap = match cache.tables { Some(ts) => ts, @@ -559,7 +557,7 @@ pub fn insert_to_table_cache( table_schema_hashmap.insert( cache_key, TableSchema { - region: String::from(region.name()), + region: String::from(region.as_ref()), name: table_name, pk: key::typed_key("HASH", &desc).expect("pk should exist"), sk: key::typed_key("RANGE", &desc), @@ -597,13 +595,12 @@ pub async fn table_schema(cx: &Context) -> TableSchema { // TODO: reduce # of DescribeTable API calls. table_schema function is called every time you do something. let desc: TableDescription = control::describe_table_api( cx, - &cx.effective_region(), table_name, /* should be equal to 'cx.effective_table_name()' */ ) .await; TableSchema { - region: String::from(cx.effective_region().name()), + region: String::from(cx.effective_region().as_ref()), name: desc.clone().table_name.unwrap(), pk: key::typed_key("HASH", &desc).expect("pk should exist"), sk: key::typed_key("RANGE", &desc), @@ -681,10 +678,8 @@ fn region_dynamodb_local(port: u32) -> Region { "setting DynamoDB Local '{}' as target region.", &endpoint_url ); - Region::Custom { - name: LOCAL_REGION.to_owned(), - endpoint: endpoint_url, - } + // TODO: fix + Region::from_static(LOCAL_REGION) } fn retrieve_dynein_file_path(file_type: DyneinFileType) -> Result { @@ -726,7 +721,7 @@ fn save_using_target(cx: &mut Context, desc: TableDescription) -> Result<(), Dyn let port: u32 = cx.effective_port(); // retrieve current config from Context and update "using target". - let region = Some(String::from(cx.effective_region().name())); + let region = Some(String::from(cx.effective_region().as_ref())); let config = cx.config.as_mut().expect("cx should have config"); config.using_region = region; config.using_table = Some(table_name); @@ -776,7 +771,7 @@ mod tests { should_strict_for_query: None, retry: None, }; - assert_eq!(cx1.effective_region(), Region::default()); + assert_eq!(cx1.effective_region(), Region::from_static("us-east-1")); // cx1.effective_table_name(); ... exit(1) let cx2 = Context { diff --git a/src/bootstrap.rs b/src/bootstrap.rs index f9c09d1..3992bca 100644 --- a/src/bootstrap.rs +++ b/src/bootstrap.rs @@ -27,7 +27,6 @@ use aws_sdk_dynamodb::{ }; use futures::future::join_all; use log::{debug, error}; -use rusoto_signature::Region; use brotli::Decompressor; use serde_json::Value as JsonValue; @@ -278,24 +277,24 @@ https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/AppendixSampleT println!( "\n\nNow all tables have sample data. Try following commands to play with dynein. Enjoy!" ); - println!(" $ dy --region {} ls", &cx.effective_region().name()); + println!(" $ dy --region {} ls", &cx.effective_region().as_ref()); println!( " $ dy --region {} desc --table Thread", - &cx.effective_region().name() + &cx.effective_region().as_ref() ); println!( " $ dy --region {} scan --table Thread", - &cx.effective_region().name() + &cx.effective_region().as_ref() ); println!( " $ dy --region {} use --table Thread", - &cx.effective_region().name() + &cx.effective_region().as_ref() ); println!(" $ dy scan"); println!("\nAfter you 'use' a table like above, dynein assume you're using the same region & table, which info is stored at ~/.dynein/config.yml and ~/.dynein/cache.yml"); println!( "Let's move on with the '{}' region you've just 'use'd...", - &cx.effective_region().name() + &cx.effective_region().as_ref() ); println!(" $ dy scan --table Forum"); println!(" $ dy scan -t ProductCatalog"); @@ -317,7 +316,7 @@ async fn prepare_table(cx: &app::Context, table_name: &str, keys: &[&str]) { println!( "Started to create table '{}' in {} region. status: {}", &table_name, - &cx.effective_region().name(), + &cx.effective_region().as_ref(), desc.table_status.unwrap() ); } @@ -325,7 +324,7 @@ async fn prepare_table(cx: &app::Context, table_name: &str, keys: &[&str]) { CreateTableError::ResourceInUseException(_) => println!( "[skip] Table '{}' already exists in {} region, skipping to create new one.", &table_name, - &cx.effective_region().name() + &cx.effective_region().as_ref() ), e => { debug!("CreateTable API call got an error -- {:#?}", e); @@ -339,11 +338,10 @@ async fn prepare_table(cx: &app::Context, table_name: &str, keys: &[&str]) { async fn wait_table_creation(cx: &app::Context, mut processing_tables: Vec<&str>) { debug!("tables in progress: {:?}", processing_tables); loop { - let r: &Region = &cx.effective_region(); let create_table_results = join_all( processing_tables .iter() - .map(|t| control::describe_table_api(cx, r, (*t).to_string())), + .map(|t| control::describe_table_api(cx, (*t).to_string())), ) .await; let statuses: Vec = create_table_results diff --git a/src/control.rs b/src/control.rs index 56aef68..94c0dd4 100644 --- a/src/control.rs +++ b/src/control.rs @@ -26,7 +26,6 @@ use aws_sdk_dynamodb::{ use aws_sdk_ec2::Client as Ec2SdkClient; use futures::future::join_all; use log::{debug, error}; -use rusoto_signature::Region; use std::{ io::{self, Error as IOError, Write}, time, @@ -76,7 +75,7 @@ pub async fn list_tables(cx: app::Context) { println!( "DynamoDB tables in region: {}", - cx.effective_region().name() + cx.effective_region().as_ref() ); if table_names.is_empty() { return println!(" No table in this region."); @@ -85,7 +84,7 @@ pub async fn list_tables(cx: app::Context) { // if let Some(table_in_config) = cx.clone().config.and_then(|x| x.table) { if let Some(table_in_config) = cx.clone().cached_using_table_schema() { for table_name in table_names { - if cx.clone().effective_region().name() == table_in_config.region + if cx.clone().effective_region().as_ref() == table_in_config.region && table_name == table_in_config.name { println!("* {}", table_name); @@ -126,14 +125,13 @@ pub async fn describe_table(cx: app::Context, target_table_to_desc: Option util::print_table_description(new_context.effective_region(), desc), + None | Some("yaml") => util::print_table_description(new_context.effective_region().as_ref(), desc), // Some("raw") => println!("{:#?}", desc), Some(_) => { println!("ERROR: unsupported output type."); @@ -160,10 +158,10 @@ pub async fn describe_table(cx: app::Context, target_table_to_desc: Option TableDescription { - let config = cx.effective_sdk_config_with_region(region.name()).await; + let region = cx.effective_region(); + let config = cx.effective_sdk_config_with_region(region.as_ref()).await; let ddb = DynamoDbSdkClient::new(&config); match ddb.describe_table().table_name(table_name).send().await { @@ -189,7 +187,7 @@ pub async fn create_table(cx: app::Context, name: String, given_keys: Vec util::print_table_description(cx.effective_region(), desc), + Ok(desc) => util::print_table_description(cx.effective_region().as_ref(), desc), Err(e) => { debug!("CreateTable API call got an error -- {:#?}", e); error!("{}", e.to_string()); @@ -276,7 +274,7 @@ pub async fn create_index(cx: app::Context, index_name: String, given_keys: Vec< } Ok(res) => { debug!("Returned result: {:#?}", res); - util::print_table_description(cx.effective_region(), res.table_description.unwrap()); + util::print_table_description(cx.effective_region().as_ref(), res.table_description.unwrap()); } } } @@ -290,7 +288,7 @@ pub async fn update_table( ) { // Retrieve TableDescription of the table to update, current (before update) status. let desc: TableDescription = - describe_table_api(&cx, &cx.effective_region(), table_name_to_update.clone()).await; + describe_table_api(&cx, table_name_to_update.clone()).await; // Map given string into "Mode" enum. Note that in cmd.rs clap already limits acceptable values. let switching_to_mode: Option = match mode_string { @@ -368,7 +366,7 @@ pub async fn update_table( ) .await { - Ok(desc) => util::print_table_description(cx.effective_region(), desc), + Ok(desc) => util::print_table_description(cx.effective_region().as_ref(), desc), Err(e) => { debug!("UpdateTable API call got an error -- {:#?}", e); error!("{}", e.to_string()); @@ -599,7 +597,7 @@ pub async fn restore(cx: app::Context, backup_name: Option, restore_name debug!("Returned result: {:#?}", res); println!("Table restoration from: '{}' has been started", &backup_arn); let desc = res.table_description.unwrap(); - util::print_table_description(cx.effective_region(), desc); + util::print_table_description(cx.effective_region().as_ref(), desc); } } } diff --git a/src/util.rs b/src/util.rs index fb113c9..af86e03 100644 --- a/src/util.rs +++ b/src/util.rs @@ -22,7 +22,6 @@ use aws_sdk_dynamodb::types::{ }; use chrono::DateTime; use log::error; -use rusoto_signature::Region; use super::key; @@ -88,13 +87,13 @@ struct PrintSecondaryIndex { /// Receives region (just to show in one line for reference) and TableDescription, /// print them in readable YAML format. NOTE: '~' representes 'null' or 'no value' in YAML syntax. -pub fn print_table_description(region: Region, desc: TableDescription) { +pub fn print_table_description(region: &str, desc: TableDescription) { let attr_defs = desc.clone().attribute_definitions.unwrap(); let mode = extract_mode(&desc.billing_mode_summary); let print_table: PrintDescribeTable = PrintDescribeTable { name: String::from(&desc.clone().table_name.unwrap()), - region: String::from(region.name()), + region: String::from(region), status: String::from(desc.clone().table_status.unwrap().as_str()), schema: PrintPrimaryKeys { pk: key::typed_key("HASH", &desc) From 1d4e3bf65e78ca102749049e3e30e5fe0af5ea29 Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Mon, 20 May 2024 13:08:34 +0900 Subject: [PATCH 12/21] test: fix test Signed-off-by: Ryota Sakamoto --- Cargo.lock | 1 + Cargo.toml | 1 + src/app.rs | 26 ++-- src/batch.rs | 19 ++- src/bootstrap.rs | 7 +- src/control.rs | 46 +++---- src/data.rs | 171 ++++++++++++-------------- src/key.rs | 5 +- src/parser.rs | 307 ++++++++++------------------------------------ src/util.rs | 12 +- tests/backup.rs | 2 +- tests/export.rs | 2 +- tests/restore.rs | 2 +- tests/util/mod.rs | 18 +-- 14 files changed, 224 insertions(+), 395 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a3ce9d8..0f7637a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -948,6 +948,7 @@ dependencies = [ "aws-sdk-dynamodb", "aws-sdk-ec2", "aws-smithy-runtime-api", + "aws-smithy-types", "aws-types", "backon", "base64 0.22.0", diff --git a/Cargo.toml b/Cargo.toml index e55fa2b..44eb54d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,6 +34,7 @@ aws-config = "1.4.0" aws-sdk-dynamodb = "1.28.0" aws-sdk-ec2 = "1.42.0" aws-smithy-runtime-api = "1.6.0" +aws-smithy-types = "1.1.9" aws-types = "1.2.1" chrono = "0.4" clap = { version = "4.5.4", features = ["derive"] } diff --git a/src/app.rs b/src/app.rs index eb2a4c4..9141a0b 100644 --- a/src/app.rs +++ b/src/app.rs @@ -594,8 +594,7 @@ pub async fn table_schema(cx: &Context) -> TableSchema { Some(table_name) => { // TODO: reduce # of DescribeTable API calls. table_schema function is called every time you do something. let desc: TableDescription = control::describe_table_api( - cx, - table_name, /* should be equal to 'cx.effective_table_name()' */ + cx, table_name, /* should be equal to 'cx.effective_table_name()' */ ) .await; @@ -663,7 +662,7 @@ pub fn index_schemas(desc: &TableDescription) -> Option> { } } -pub fn bye(code: i32, msg: &str) { +pub fn bye(code: i32, msg: &str) -> ! { println!("{}", msg); std::process::exit(code); } @@ -757,7 +756,6 @@ mod tests { use super::*; use std::convert::TryInto; use std::error::Error; - use std::str::FromStr; // to utilize Region::from_str // for unit tests #[test] fn test_context_functions() -> Result<(), Box> { @@ -790,29 +788,35 @@ mod tests { should_strict_for_query: None, retry: Some(RetryConfig::default().try_into()?), }; - assert_eq!(cx2.effective_region(), Region::from_str("ap-northeast-1")?); + assert_eq!( + cx2.effective_region(), + Region::from_static("ap-northeast-1") + ); assert_eq!(cx2.effective_table_name(), String::from("cfgtbl")); let cx3 = Context { - overwritten_region: Some(Region::from_str("us-east-1")?), // --region us-east-1 - overwritten_table_name: Some(String::from("argtbl")), // --table argtbl + overwritten_region: Some(Region::from_static("us-east-1")), // --region us-east-1 + overwritten_table_name: Some(String::from("argtbl")), // --table argtbl ..cx2.clone() }; - assert_eq!(cx3.effective_region(), Region::from_str("us-east-1")?); + assert_eq!(cx3.effective_region(), Region::from_static("us-east-1")); assert_eq!(cx3.effective_table_name(), String::from("argtbl")); let cx4 = Context { - overwritten_region: Some(Region::from_str("us-east-1")?), // --region us-east-1 + overwritten_region: Some(Region::from_static("us-east-1")), // --region us-east-1 ..cx2.clone() }; - assert_eq!(cx4.effective_region(), Region::from_str("us-east-1")?); + assert_eq!(cx4.effective_region(), Region::from_static("us-east-1")); assert_eq!(cx4.effective_table_name(), String::from("cfgtbl")); let cx5 = Context { overwritten_table_name: Some(String::from("argtbl")), // --table argtbl ..cx2.clone() }; - assert_eq!(cx5.effective_region(), Region::from_str("ap-northeast-1")?); + assert_eq!( + cx5.effective_region(), + Region::from_static("ap-northeast-1") + ); assert_eq!(cx5.effective_table_name(), String::from("argtbl")); Ok(()) diff --git a/src/batch.rs b/src/batch.rs index 34a4f8a..60b8bcd 100644 --- a/src/batch.rs +++ b/src/batch.rs @@ -158,7 +158,9 @@ pub fn build_batch_request_items_from_json( ddbjson_attributes_to_attrvals(raw_item); write_requests.push( WriteRequest::builder() - .put_request(PutRequest::builder().set_item(Some(item)).build().unwrap()) + .put_request( + PutRequest::builder().set_item(Some(item)).build().unwrap(), + ) .build(), ); } else { @@ -191,7 +193,9 @@ pub fn build_batch_request_items_from_json( ddbjson_attributes_to_attrvals(raw_key); write_requests.push( WriteRequest::builder() - .delete_request(DeleteRequest::builder().set_key(Some(key)).build().unwrap()) + .delete_request( + DeleteRequest::builder().set_key(Some(key)).build().unwrap(), + ) .build(), ); } else { @@ -353,7 +357,12 @@ pub async fn batch_write_item( validate_item_keys(&attrs, &ts)?; write_requests.push( WriteRequest::builder() - .delete_request(DeleteRequest::builder().set_key(Some(attrs)).build().unwrap()) + .delete_request( + DeleteRequest::builder() + .set_key(Some(attrs)) + .build() + .unwrap(), + ) .build(), ); } @@ -554,7 +563,9 @@ fn ddbjson_val_to_attrval(ddb_jsonval: &JsonValue) -> Option { } else if let Some(x) = ddb_jsonval.get("N") { Some(AttributeValue::N(x.as_str().unwrap().to_string())) } else if let Some(x) = ddb_jsonval.get("B") { - Some(AttributeValue::B(aws_sdk_dynamodb::primitives::Blob::new(json_binary_val_to_bytes(x)))) + Some(AttributeValue::B(aws_sdk_dynamodb::primitives::Blob::new( + json_binary_val_to_bytes(x), + ))) } else if let Some(x) = ddb_jsonval.get("BOOL") { Some(AttributeValue::Bool(x.as_bool().unwrap())) } else if let Some(x) = ddb_jsonval.get("SS") { diff --git a/src/bootstrap.rs b/src/bootstrap.rs index 3992bca..4b8748b 100644 --- a/src/bootstrap.rs +++ b/src/bootstrap.rs @@ -200,7 +200,12 @@ see https://github.com/awslabs/dynein#working-with-dynamodb-items for detail .collect(); write_requests.push( WriteRequest::builder() - .put_request(PutRequest::builder().set_item(Some(item_attrval)).build().unwrap()) + .put_request( + PutRequest::builder() + .set_item(Some(item_attrval)) + .build() + .unwrap(), + ) .build(), ); if write_requests.len() == 25 { diff --git a/src/control.rs b/src/control.rs index 94c0dd4..ffe2b36 100644 --- a/src/control.rs +++ b/src/control.rs @@ -123,11 +123,8 @@ pub async fn describe_table(cx: app::Context, target_table_to_desc: Option util::print_table_description(new_context.effective_region().as_ref(), desc), + None | Some("yaml") => { + util::print_table_description(new_context.effective_region().as_ref(), desc) + } // Some("raw") => println!("{:#?}", desc), Some(_) => { println!("ERROR: unsupported output type."); @@ -156,10 +155,7 @@ pub async fn describe_table(cx: app::Context, target_table_to_desc: Option TableDescription { +pub async fn describe_table_api(cx: &app::Context, table_name: String) -> TableDescription { let region = cx.effective_region(); let config = cx.effective_sdk_config_with_region(region.as_ref()).await; let ddb = DynamoDbSdkClient::new(&config); @@ -167,7 +163,7 @@ pub async fn describe_table_api( match ddb.describe_table().table_name(table_name).send().await { Err(e) => { debug!("DescribeTable API call got an error -- {:#?}", e); - error!("{}", e.to_string()); + error!("{}", e.into_service_error().meta()); std::process::exit(1); } Ok(res) => { @@ -190,7 +186,7 @@ pub async fn create_table(cx: app::Context, name: String, given_keys: Vec util::print_table_description(cx.effective_region().as_ref(), desc), Err(e) => { debug!("CreateTable API call got an error -- {:#?}", e); - error!("{}", e.to_string()); + error!("{}", e.into_service_error()); std::process::exit(1); } } @@ -253,7 +249,8 @@ pub async fn create_index(cx: app::Context, index_name: String, given_keys: Vec< .build(), ) .set_provisioned_throughput(None) // TODO: assign default rcu/wcu if base table is Provisioned mode. currently it works only for OnDemand talbe. - .build().unwrap(); + .build() + .unwrap(); let gsi_update = GlobalSecondaryIndexUpdate::builder() .create(create_gsi_action) @@ -274,7 +271,10 @@ pub async fn create_index(cx: app::Context, index_name: String, given_keys: Vec< } Ok(res) => { debug!("Returned result: {:#?}", res); - util::print_table_description(cx.effective_region().as_ref(), res.table_description.unwrap()); + util::print_table_description( + cx.effective_region().as_ref(), + res.table_description.unwrap(), + ); } } } @@ -287,8 +287,7 @@ pub async fn update_table( rcu: Option, ) { // Retrieve TableDescription of the table to update, current (before update) status. - let desc: TableDescription = - describe_table_api(&cx, table_name_to_update.clone()).await; + let desc: TableDescription = describe_table_api(&cx, table_name_to_update.clone()).await; // Map given string into "Mode" enum. Note that in cmd.rs clap already limits acceptable values. let switching_to_mode: Option = match mode_string { @@ -330,7 +329,8 @@ pub async fn update_table( .write_capacity_units .unwrap() })) - .build().unwrap(), + .build() + .unwrap(), ), } } @@ -348,7 +348,8 @@ pub async fn update_table( ProvisionedThroughput::builder() .read_capacity_units(rcu.unwrap_or(5)) .write_capacity_units(wcu.unwrap_or(5)) - .build().unwrap(), + .build() + .unwrap(), ), }, }; @@ -427,7 +428,7 @@ pub async fn delete_table(cx: app::Context, name: String, skip_confirmation: boo match ddb.delete_table().table_name(name).send().await { Err(e) => { debug!("DeleteTable API call got an error -- {:#?}", e); - error!("{}", e.to_string()); + error!("{}", e.into_service_error()); std::process::exit(1); } Ok(res) => { @@ -469,7 +470,7 @@ pub async fn backup(cx: app::Context, all_tables: bool) { match req.send().await { Err(e) => { debug!("CreateBackup API call got an error -- {:#?}", e); - app::bye(1, &e.to_string()); + app::bye(1, &e.into_service_error().to_string()); } Ok(res) => { debug!("Returned result: {:#?}", res); @@ -589,6 +590,7 @@ pub async fn restore(cx: app::Context, backup_name: Option, restore_name { Err(e) => { debug!("RestoreTableFromBackup API call got an error -- {:#?}", e); + app::bye(1, &e.into_service_error().to_string()); /* e.g. ... Possibly see "BackupInUse" error: [2020-08-14T13:16:07Z DEBUG dy::control] RestoreTableFromBackup API call got an error -- Service( BackupInUse( "Backup is being used to restore another table: arn:aws:dynamodb:us-west-2:111111111111:table/Music/backup/01527492829107-81b9b3dd",)) */ @@ -636,9 +638,7 @@ async fn list_backups_api(cx: &app::Context, all_tables: bool) -> Vec { debug!("ListBackups API call got an error -- {:#?}", e); - // app::bye(1, &e.to_string()) // it doesn't meet return value requirement. - println!("{}", &e.to_string()); - std::process::exit(1); + app::bye(1, &e.into_service_error().to_string()); } Ok(res) => res .backup_summaries diff --git a/src/data.rs b/src/data.rs index d3d0b55..2644a05 100644 --- a/src/data.rs +++ b/src/data.rs @@ -30,6 +30,7 @@ use aws_sdk_dynamodb::{ Client as DynamoDbSdkClient, }; use log::{debug, error}; +use serde::{ser::SerializeStruct, Serialize, Serializer}; use serde_json::Value as JsonValue; use tabwriter::TabWriter; // use bytes::Bytes; @@ -378,7 +379,7 @@ pub async fn put_item(cx: app::Context, pval: String, sval: Option, item } Err(e) => { debug!("PutItem API call got an error -- {:?}", e); - error!("{}", e.to_string()); + error!("{}", e.into_service_error().meta()); std::process::exit(1); } } @@ -717,6 +718,51 @@ pub fn dispatch_jsonvalue_to_attrval(jv: &JsonValue, enable_set_inference: bool) } } +struct AttributeValueWrapper(AttributeValue); + +impl Serialize for AttributeValueWrapper { + fn serialize(&self, serializer: S) -> Result { + let mut state = serializer.serialize_struct("Object", 1)?; + + match &self.0 { + AttributeValue::S(v) => state.serialize_field("S", v)?, + AttributeValue::N(v) => state.serialize_field("N", v)?, + AttributeValue::Bool(v) => state.serialize_field("BOOL", v)?, + AttributeValue::Null(_) => state.serialize_field("NULL", &true)?, + AttributeValue::Ss(v) => state.serialize_field("SS", v)?, + AttributeValue::Ns(v) => state.serialize_field("NS", v)?, + AttributeValue::B(v) => { + state.serialize_field("B", &aws_smithy_types::base64::encode(v))? + } + AttributeValue::Bs(v) => state.serialize_field( + "BS", + &v.iter() + .map(aws_smithy_types::base64::encode) + .collect::>(), + )?, + AttributeValue::M(v) => { + state.serialize_field( + "M", + &v.iter() + .map(|(k, v)| (k, AttributeValueWrapper(v.clone()))) + .collect::>(), + )?; + } + AttributeValue::L(v) => state.serialize_field( + "L", + &v.iter() + .map(|item| AttributeValueWrapper(item.clone())) + .collect::>(), + )?, + _ => panic!( + "DynamoDB AttributeValue is not in valid status: {:#?}", + &self.0 + ), + }; + state.end() + } +} + /// `strip_items` calls `strip_item` for each item. fn strip_items( items: &[HashMap], @@ -742,16 +788,14 @@ fn strip_items( /// to something like this: /// /// { "pkA": { "S": "e0a170d9-5ce3-443b-bbce-d0d49c71d151" } -/// -/// by utilizing Serialize derive of the struct: -/// https://docs.rs/rusoto_dynamodb/0.42.0/src/rusoto_dynamodb/generated.rs.html#38 -/// https://docs.rs/rusoto_dynamodb/0.42.0/rusoto_dynamodb/struct.AttributeValue.html fn strip_item(item: &HashMap) -> HashMap { item.iter() - .map(|attr| - // Serialization: `serde_json::to_value(sth: rusoto_dynamodb::AttributeValue)` - // TODO: fix attr.1 - (attr.0.to_string(), serde_json::to_value("attr.1").unwrap())) + .map(|attr| { + ( + attr.0.to_string(), + serde_json::to_value(AttributeValueWrapper(attr.1.to_owned())).unwrap(), + ) + }) .collect() } @@ -1356,10 +1400,7 @@ mod tests { actual.vals, Some(HashMap::from([( ":DYNEIN_ATTRVAL0".to_owned(), - AttributeValue { - s: Some("2020-02-24T22:22:22Z".to_owned()), - ..Default::default() - }, + AttributeValue::S("2020-02-24T22:22:22Z".to_owned()), )])) ); } @@ -1389,7 +1430,7 @@ mod tests { Some(HashMap::from([ ( ":DYNEIN_ATTRVAL0".to_owned(), - AttributeValue::S("0".to_owned()) + AttributeValue::N("0".to_owned()) ), ( ":DYNEIN_ATTRVAL1".to_owned(), @@ -1418,10 +1459,7 @@ mod tests { actual.vals, Some(HashMap::from([( ":DYNEIN_ATTRVAL0".to_owned(), - AttributeValue { - s: Some("value".to_owned()), - ..Default::default() - }, + AttributeValue::S("value".to_owned()), )])) ); } @@ -1446,10 +1484,7 @@ mod tests { actual.vals, Some(HashMap::from([( ":DYNEIN_ATTRVAL0".to_owned(), - AttributeValue { - s: Some("item1".to_owned()), - ..Default::default() - } + AttributeValue::S("item1".to_owned()), )])) ); } @@ -1476,17 +1511,11 @@ mod tests { Some(HashMap::from([ ( ":DYNEIN_ATTRVAL0".to_owned(), - AttributeValue { - n: Some("7".to_owned()), - ..Default::default() - } + AttributeValue::N("7".to_owned()), ), ( ":DYNEIN_ATTRVAL1".to_owned(), - AttributeValue { - n: Some("3".to_owned()), - ..Default::default() - } + AttributeValue::N("3".to_owned()), ), ])) ) @@ -1516,13 +1545,7 @@ mod tests { actual.vals, Some(HashMap::from([( ":DYNEIN_ATTRVAL0".to_owned(), - AttributeValue { - l: Some(vec![AttributeValue { - s: Some("item2".to_owned()), - ..Default::default() - }]), - ..Default::default() - } + AttributeValue::L(vec![AttributeValue::S("item2".to_owned())]), )])) ); } @@ -1551,13 +1574,7 @@ mod tests { actual.vals, Some(HashMap::from([( ":DYNEIN_ATTRVAL0".to_owned(), - AttributeValue { - l: Some(vec![AttributeValue { - s: Some("item2".to_owned()), - ..Default::default() - }]), - ..Default::default() - } + AttributeValue::L(vec![AttributeValue::S("item2".to_owned())]) )])) ); } @@ -1585,10 +1602,7 @@ mod tests { actual.vals, Some(HashMap::from([( ":DYNEIN_ATTRVAL0".to_owned(), - AttributeValue { - n: Some("123".to_owned()), - ..Default::default() - } + AttributeValue::N("123".to_owned()), ),])) ) } @@ -1644,27 +1658,15 @@ mod tests { let actual = dispatch_jsonvalue_to_attrval(&string_list, false); assert_eq!( actual, - AttributeValue { - l: Some(vec!( - AttributeValue { - s: Some("+44 1234567".to_owned()), - ..Default::default() - }, - AttributeValue { - s: Some("+44 2345678".to_owned()), - ..Default::default() - } - )), - ..Default::default() - } + AttributeValue::L(vec![ + AttributeValue::S("+44 1234567".to_owned()), + AttributeValue::S("+44 2345678".to_owned()), + ]), ); let actual = dispatch_jsonvalue_to_attrval(&string_list, true); assert_eq!( actual, - AttributeValue { - ss: Some(vec!("+44 1234567".to_owned(), "+44 2345678".to_owned())), - ..Default::default() - } + AttributeValue::Ss(vec!("+44 1234567".to_owned(), "+44 2345678".to_owned())), ); let number_list = r#" @@ -1676,27 +1678,15 @@ mod tests { let actual = dispatch_jsonvalue_to_attrval(&number_list, false); assert_eq!( actual, - AttributeValue { - l: Some(vec!( - AttributeValue { - n: Some("12345".to_owned()), - ..Default::default() - }, - AttributeValue { - n: Some("67890".to_owned()), - ..Default::default() - } - )), - ..Default::default() - } + AttributeValue::L(vec![ + AttributeValue::N("12345".to_owned()), + AttributeValue::N("67890".to_owned()), + ]) ); let actual = dispatch_jsonvalue_to_attrval(&number_list, true); assert_eq!( actual, - AttributeValue { - ns: Some(vec!["12345".to_owned(), "67890".to_owned()]), - ..Default::default() - } + AttributeValue::Ns(vec!["12345".to_owned(), "67890".to_owned()]), ); let mix_list = r#" @@ -1709,19 +1699,10 @@ mod tests { let actual = dispatch_jsonvalue_to_attrval(&mix_list, flag); assert_eq!( actual, - AttributeValue { - l: Some(vec!( - AttributeValue { - s: Some("text".to_owned()), - ..Default::default() - }, - AttributeValue { - n: Some("1234".to_owned()), - ..Default::default() - } - )), - ..Default::default() - } + AttributeValue::L(vec![ + AttributeValue::S("text".to_owned()), + AttributeValue::N("1234".to_owned()), + ]) ); } } diff --git a/src/key.rs b/src/key.rs index 96a9350..5c907d0 100644 --- a/src/key.rs +++ b/src/key.rs @@ -117,11 +117,12 @@ pub fn typed_key_for_schema( name: key.clone().attribute_name, // kind should be one of S/N/B, Which can be retrieved from AttributeDefinition's attribute_type. kind: KeyType::from_str( - &attrs + attrs .iter() .find(|at| at.attribute_name == key.attribute_name) .expect("primary key should be in AttributeDefinition.") - .attribute_type.as_str(), + .attribute_type + .as_str(), ) .unwrap(), }) diff --git a/src/parser.rs b/src/parser.rs index e4f21f5..febd82f 100644 --- a/src/parser.rs +++ b/src/parser.rs @@ -15,7 +15,7 @@ */ use crate::pest::Parser; -use aws_sdk_dynamodb::types::AttributeValue; +use aws_sdk_dynamodb::{primitives::Blob, types::AttributeValue}; use base64::engine::{general_purpose, DecodePaddingMode, GeneralPurpose, GeneralPurposeConfig}; use base64::{DecodeError, Engine}; use bytes::Bytes; @@ -456,7 +456,7 @@ impl AttrVal { AttrVal::S(str) => AttributeValue::S(str), AttrVal::Bool(boolean) => AttributeValue::Bool(boolean), AttrVal::Null(isnull) => AttributeValue::Null(isnull), - AttrVal::B(binary) => AttributeValue::B(aws_sdk_dynamodb::primitives::Blob::new(binary)), + AttrVal::B(binary) => AttributeValue::B(Blob::new(binary)), AttrVal::L(list) => AttributeValue::L( list.into_iter() .map(|x| x.convert_attribute_value()) @@ -469,7 +469,7 @@ impl AttrVal { ), AttrVal::NS(list) => AttributeValue::Ns(list), AttrVal::SS(list) => AttributeValue::Ss(list), - AttrVal::BS(list) => AttributeValue::Bs(list.into_iter().map(aws_sdk_dynamodb::primitives::Blob::new).collect()), + AttrVal::BS(list) => AttributeValue::Bs(list.into_iter().map(Blob::new).collect()), } } } @@ -1444,7 +1444,11 @@ impl DyneinParser { let result = GeneratedParser::parse(Rule::map_literal, exp); match result { Ok(mut pair) => { - let item = parse_literal(pair.next().unwrap())?.convert_attribute_value().as_m().unwrap().to_owned(); + let item = parse_literal(pair.next().unwrap())? + .convert_attribute_value() + .as_m() + .unwrap() + .to_owned(); // content must be map literal let mut image = match initial_item { Some(init_item) => init_item, @@ -2384,92 +2388,44 @@ mod tests { do_test!( AttrVal::N("123".to_owned()), - AttributeValue { - n: Some("123".to_owned()), - ..Default::default() - } + AttributeValue::N("123".to_owned()) ); do_test!( AttrVal::S("string".to_owned()), - AttributeValue { - s: Some("string".to_owned()), - ..Default::default() - } - ); - do_test!( - AttrVal::Bool(true), - AttributeValue { - bool: Some(true), - ..Default::default() - } - ); - do_test!( - AttrVal::Bool(false), - AttributeValue { - bool: Some(false), - ..Default::default() - } - ); - do_test!( - AttrVal::Null(true), - AttributeValue { - null: Some(true), - ..Default::default() - } + AttributeValue::S("string".to_owned()) ); + do_test!(AttrVal::Bool(true), AttributeValue::Bool(true)); + do_test!(AttrVal::Bool(false), AttributeValue::Bool(false)); + do_test!(AttrVal::Null(true), AttributeValue::Null(true)); do_test!( AttrVal::B(Bytes::from_static(b"123")), - AttributeValue { - b: Some(Bytes::from_static(b"123")), - ..Default::default() - } + AttributeValue::B(Blob::new(Bytes::from_static(b"123"))) ); do_test!( AttrVal::L(vec![AttrVal::N("123".to_owned())]), - AttributeValue { - l: Some(vec![AttributeValue { - n: Some("123".to_owned()), - ..Default::default() - }]), - ..Default::default() - } + AttributeValue::L(vec![AttributeValue::N("123".to_owned())]) ); do_test!( AttrVal::M(HashMap::from([( "m".to_owned(), AttrVal::N("123".to_owned()), )])), - AttributeValue { - m: Some(HashMap::from([( - "m".to_owned(), - AttributeValue { - n: Some("123".to_owned()), - ..Default::default() - } - )])), - ..Default::default() - } + AttributeValue::M(HashMap::from([( + "m".to_owned(), + AttributeValue::N("123".to_owned()) + )])) ); do_test!( AttrVal::NS(vec!["123".to_owned()]), - AttributeValue { - ns: Some(vec!["123".to_owned()]), - ..Default::default() - } + AttributeValue::Ns(vec!["123".to_owned()]) ); do_test!( AttrVal::SS(vec!["123".to_owned()]), - AttributeValue { - ss: Some(vec!["123".to_owned()]), - ..Default::default() - } + AttributeValue::Ss(vec!["123".to_owned()]) ); do_test!( AttrVal::BS(vec![Bytes::from_static(b"123")]), - AttributeValue { - bs: Some(vec![Bytes::from_static(b"123")]), - ..Default::default() - } + AttributeValue::Bs(vec![Blob::new(Bytes::from_static(b"123"))]) ); } @@ -2488,13 +2444,7 @@ mod tests { ExpressionResult { exp: format!("{}={}", attr_name_ref(0), attr_val_ref(0)), names: HashMap::from([(attr_name_ref(0), "id".to_owned())]), - values: HashMap::from([( - attr_val_ref(0), - AttributeValue { - n: Some("1".to_owned()), - ..Default::default() - } - )]), + values: HashMap::from([(attr_val_ref(0), AttributeValue::N("1".to_owned()))]), } ); @@ -2510,13 +2460,7 @@ mod tests { ExpressionResult { exp: format!("{}={}", attr_name_ref(0), attr_val_ref(0)), names: HashMap::from([(attr_name_ref(0), "id".to_owned())]), - values: HashMap::from([( - attr_val_ref(0), - AttributeValue { - s: Some("1".to_owned()), - ..Default::default() - } - )]), + values: HashMap::from([(attr_val_ref(0), AttributeValue::S("1".to_owned()))]), } ); @@ -2532,13 +2476,7 @@ mod tests { ExpressionResult { exp: format!("{}>{}", attr_name_ref(0), attr_val_ref(0)), names: HashMap::from([(attr_name_ref(0), "id".to_owned())]), - values: HashMap::from([( - attr_val_ref(0), - AttributeValue { - s: Some("1".to_owned()), - ..Default::default() - } - )]), + values: HashMap::from([(attr_val_ref(0), AttributeValue::S("1".to_owned()))]), } ); @@ -2554,13 +2492,7 @@ mod tests { ExpressionResult { exp: format!("{}>={}", attr_name_ref(0), attr_val_ref(0)), names: HashMap::from([(attr_name_ref(0), "id".to_owned())]), - values: HashMap::from([( - attr_val_ref(0), - AttributeValue { - n: Some("1".to_owned()), - ..Default::default() - } - )]), + values: HashMap::from([(attr_val_ref(0), AttributeValue::N("1".to_owned()))]), } ); @@ -2576,13 +2508,7 @@ mod tests { ExpressionResult { exp: format!("{}<{}", attr_name_ref(0), attr_val_ref(0)), names: HashMap::from([(attr_name_ref(0), "id".to_owned())]), - values: HashMap::from([( - attr_val_ref(0), - AttributeValue { - s: Some("1 2".to_owned()), - ..Default::default() - } - )]), + values: HashMap::from([(attr_val_ref(0), AttributeValue::S("1 2".to_owned()))]), } ); @@ -2598,13 +2524,7 @@ mod tests { ExpressionResult { exp: format!("{}<={}", attr_name_ref(0), attr_val_ref(0)), names: HashMap::from([(attr_name_ref(0), "id".to_owned())]), - values: HashMap::from([( - attr_val_ref(0), - AttributeValue { - n: Some("-1e5".to_owned()), - ..Default::default() - } - )]), + values: HashMap::from([(attr_val_ref(0), AttributeValue::N("-1e5".to_owned()))]), } ); @@ -2628,17 +2548,11 @@ mod tests { values: HashMap::from([ ( attr_val_ref(0), - AttributeValue { - b: Some(Bytes::from_static(b"1")), - ..Default::default() - } + AttributeValue::B(Blob::new(Bytes::from_static(b"1"))) ), ( attr_val_ref(1), - AttributeValue { - b: Some(Bytes::from_static(b"2")), - ..Default::default() - } + AttributeValue::B(Blob::new(Bytes::from_static(b"2"))) ) ]), } @@ -2658,10 +2572,7 @@ mod tests { names: HashMap::from([(attr_name_ref(0), "id".to_owned())]), values: HashMap::from([( attr_val_ref(0), - AttributeValue { - s: Some("id1234#e1234".to_owned()), - ..Default::default() - } + AttributeValue::S("id1234#e1234".to_owned()) )]), } ); @@ -2684,10 +2595,7 @@ mod tests { names: HashMap::from([(attr_name_ref(0), "id".to_owned())]), values: HashMap::from([( attr_val_ref(0), - AttributeValue { - s: Some(expected_val[i].to_owned()), - ..Default::default() - } + AttributeValue::S(expected_val[i].to_owned()) )]), } ); @@ -2738,20 +2646,8 @@ mod tests { ), names: HashMap::from([(attr_name_ref(0), "id".to_owned())]), values: HashMap::from([ - ( - attr_val_ref(0), - AttributeValue { - s: Some("1".to_owned()), - ..Default::default() - } - ), - ( - attr_val_ref(1), - AttributeValue { - s: Some("2".to_owned()), - ..Default::default() - } - ) + (attr_val_ref(0), AttributeValue::S("1".to_owned())), + (attr_val_ref(1), AttributeValue::S("2".to_owned())) ]), } ); @@ -2770,10 +2666,7 @@ mod tests { names: HashMap::from([(attr_name_ref(0), "id".to_owned())]), values: HashMap::from([( attr_val_ref(0), - AttributeValue { - s: Some("id12#i-12@i-12/i-12".to_owned()), - ..Default::default() - } + AttributeValue::S("id12#i-12@i-12/i-12".to_owned()) )]), } ); @@ -2790,13 +2683,7 @@ mod tests { ExpressionResult { exp: format!("{}={}", attr_name_ref(0), attr_val_ref(0),), names: HashMap::from([(attr_name_ref(0), "id".to_owned())]), - values: HashMap::from([( - attr_val_ref(0), - AttributeValue { - s: Some("123".to_owned()), - ..Default::default() - } - )]), + values: HashMap::from([(attr_val_ref(0), AttributeValue::S("123".to_owned()))]), } ); @@ -2812,13 +2699,7 @@ mod tests { ExpressionResult { exp: format!("{}={}", attr_name_ref(0), attr_val_ref(0),), names: HashMap::from([(attr_name_ref(0), "id".to_owned())]), - values: HashMap::from([( - attr_val_ref(0), - AttributeValue { - n: Some("123".to_owned()), - ..Default::default() - } - )]), + values: HashMap::from([(attr_val_ref(0), AttributeValue::N("123".to_owned()))]), } ); } @@ -2845,90 +2726,42 @@ mod tests { ) .unwrap(), HashMap::from([ - ( - "k0".to_owned(), - AttributeValue { - null: Some(true), - ..Default::default() - } - ), + ("k0".to_owned(), AttributeValue::Null(true)), ( "k1".to_owned(), - AttributeValue { - l: Some(Vec::from([ - AttributeValue { - n: Some("1".to_owned()), - ..Default::default() - }, - AttributeValue { - n: Some("2".to_owned()), - ..Default::default() - }, - AttributeValue { - n: Some("3".to_owned()), - ..Default::default() - }, - AttributeValue { - s: Some("str".to_owned()), - ..Default::default() - }, - ])), - ..Default::default() - } - ), - ( - "k2".to_owned(), - AttributeValue { - s: Some("str".to_owned()), - ..Default::default() - } + AttributeValue::L(vec![ + AttributeValue::N("1".to_owned()), + AttributeValue::N("2".to_owned()), + AttributeValue::N("3".to_owned()), + AttributeValue::S("str".to_owned()), + ]) ), + ("k2".to_owned(), AttributeValue::S("str".to_owned())), ( "k3".to_owned(), - AttributeValue { - m: Some(HashMap::from([ - ( - "l0".to_owned(), - AttributeValue { - ns: Some(vec!["1".to_owned(), "2".to_owned()]), - ..Default::default() - } - ), - ( - "l1".to_owned(), - AttributeValue { - ss: Some(vec!["str1".to_owned(), "str2".to_owned()]), - ..Default::default() - } - ), - ( - "l2".to_owned(), - AttributeValue { - bool: Some(true), - ..Default::default() - } - ) - ])), - ..Default::default() - } + AttributeValue::M(HashMap::from([ + ( + "l0".to_owned(), + AttributeValue::Ns(vec!["1".to_owned(), "2".to_owned()]) + ), + ( + "l1".to_owned(), + AttributeValue::Ss(vec!["str1".to_owned(), "str2".to_owned()]) + ), + ("l2".to_owned(), AttributeValue::Bool(true)) + ])) ), ( "k4".to_owned(), - AttributeValue { - b: Some(Bytes::from_static(b"\x20")), - ..Default::default() - } + AttributeValue::B(Blob::new(Bytes::from_static(b"\x20"))) ), ( "k5".to_owned(), - AttributeValue { - bs: Some(vec!( - Bytes::from_static(b"This"), - Bytes::from_static(b"bin"), - Bytes::from_static(b"file"), - )), - ..Default::default() - } + AttributeValue::Bs(vec![ + Blob::new(Bytes::from_static(b"This")), + Blob::new(Bytes::from_static(b"bin")), + Blob::new(Bytes::from_static(b"file")) + ]) ) ]) ) @@ -2942,13 +2775,7 @@ mod tests { ExpressionResult { exp: format!("{}={}", attr_name_ref(0), attr_val_ref(0)), names: HashMap::from([(attr_name_ref(0), "id".to_owned())]), - values: HashMap::from([( - attr_val_ref(0), - AttributeValue { - s: Some("string".to_owned()), - ..Default::default() - } - )]), + values: HashMap::from([(attr_val_ref(0), AttributeValue::S("string".to_owned()))]), } ); } @@ -2984,13 +2811,7 @@ mod tests { fn test_set_and_remove_action() { let mut parser = DyneinParser::new(); let names = HashMap::from([(attr_name_ref(0), "p0".to_owned())]); - let values = HashMap::from([( - attr_val_ref(0), - AttributeValue { - s: Some("string".to_owned()), - ..Default::default() - }, - )]); + let values = HashMap::from([(attr_val_ref(0), AttributeValue::S("string".to_owned()))]); assert_eq!( parser.parse_set_action("p0 = \"string\"").unwrap(), ExpressionResult { diff --git a/src/util.rs b/src/util.rs index af86e03..9cb1cdc 100644 --- a/src/util.rs +++ b/src/util.rs @@ -57,9 +57,9 @@ pub enum Mode { OnDemand, } -impl Into for Mode { - fn into(self) -> BillingMode { - match self { +impl From for BillingMode { + fn from(mode: Mode) -> Self { + match mode { Mode::Provisioned => BillingMode::Provisioned, Mode::OnDemand => BillingMode::PayPerRequest, } @@ -142,7 +142,8 @@ pub fn generate_essential_key_definitions( } else { KeyType::Range }) - .build().unwrap(), + .build() + .unwrap(), ); // If data type of key is omitted, dynein assumes it as String (S). @@ -154,7 +155,8 @@ pub fn generate_essential_key_definitions( } else { ScalarAttributeType::S }) - .build().unwrap(), + .build() + .unwrap(), ) } (key_schema, attribute_definitions) diff --git a/tests/backup.rs b/tests/backup.rs index 148ac36..d056683 100644 --- a/tests/backup.rs +++ b/tests/backup.rs @@ -38,7 +38,7 @@ async fn test_backup() -> Result<(), Box> { .failure() .stdout(predicate::str::contains( // This error message only happens on DynamoDB Local which does not support backup feature. - "com.amazonaws.dynamodb.v20120810#UnknownOperationException", + "unhandled error (UnknownOperationException)", )); Ok(()) diff --git a/tests/export.rs b/tests/export.rs index 8ea97e5..1f39fa7 100644 --- a/tests/export.rs +++ b/tests/export.rs @@ -62,7 +62,7 @@ async fn test_export_empty_table() -> Result<(), Box> { // TODO: this behavior should be fixed by the issue // https://github.com/awslabs/dynein/issues/152 cmd.assert().failure().stderr(predicate::str::contains( - "thread 'main' panicked at src/transfer.rs:478:20:\nattempt to subtract with overflow", + "thread 'main' panicked at src/transfer.rs:481:20:\nattempt to subtract with overflow", )); Ok(()) } diff --git a/tests/restore.rs b/tests/restore.rs index e73fa85..3cef50a 100644 --- a/tests/restore.rs +++ b/tests/restore.rs @@ -38,7 +38,7 @@ async fn test_restore() -> Result<(), Box> { .failure() .stdout(predicate::str::contains( // This error message only happens on DynamoDB Local which does not support backup feature. - "com.amazonaws.dynamodb.v20120810#UnknownOperationException", + "unhandled error (UnknownOperationException)", )); Ok(()) diff --git a/tests/util/mod.rs b/tests/util/mod.rs index 76efa3c..f3a1db4 100644 --- a/tests/util/mod.rs +++ b/tests/util/mod.rs @@ -14,16 +14,17 @@ * limitations under the License. */ -use assert_cmd::prelude::*; // Add methods on commands +use assert_cmd::prelude::*; use std::env; use std::process::Command; // Run programs // use assert_cmd::cmd::Command; // Run programs - it seems to be equal to "use assert_cmd::prelude::* + use std::process::Command" +use aws_config::SdkConfig; +use aws_sdk_dynamodb::Client as DynamoDbSdkClient; +use aws_types::region::Region; use once_cell::sync::Lazy; use rand::{distributions::Alphanumeric, Rng}; use regex::bytes::Regex; -use rusoto_core::Region; -use rusoto_dynamodb::{DynamoDb, DynamoDbClient}; use serde_json::Value; use std::io::{self, Write}; // Used when check results by printing to stdout use std::path::{Path, PathBuf}; @@ -339,14 +340,15 @@ async fn setup_container(port: i32) -> Result<(), Box> { // Wait dynamodb-local let health_check_url = format!("http://localhost:{}", port); - let ddb = DynamoDbClient::new(Region::Custom { - name: "local".to_owned(), - endpoint: health_check_url, - }); + // let ddb = DynamoDbClient::new(Region::Custom { + // name: "local".to_owned(), + // endpoint: health_check_url, + // }); + let ddb = DynamoDbSdkClient::new(&SdkConfig::builder().region(Region::new("local")).build()); let max_retries = 5; let mut attempts = 0; loop { - match ddb.list_tables(Default::default()).await { + match ddb.list_tables().send().await { Ok(_result) => { println!("ListTables API succeeded."); break; From 6b79381b6d9498c2501202c310f3aadcd344601b Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Mon, 20 May 2024 22:22:12 +0900 Subject: [PATCH 13/21] chore: remove cargo audit ignore Signed-off-by: Ryota Sakamoto --- .github/workflows/audit.yml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml index 531b4c4..e5d85bd 100644 --- a/.github/workflows/audit.yml +++ b/.github/workflows/audit.yml @@ -8,12 +8,7 @@ env: # We treat all findings as error to notify its status for maintainers. # If we need to temporarily suppress the error, we use `--ignore` option with justification. CARGO_AUDIT_BASE_FLAGS: --quiet -D warnings -D unmaintained -D unsound -D yanked - - # RUSTSEC-2022-0071 - # We are working to migrate from Rusoto to AWS SDK for Rust. - # To emphasize other issues and because of not affecting the customer immediately, we disable this error. - # See: https://github.com/awslabs/dynein/pull/126 - CARGO_AUDIT_IGNORE_FLAGS: --ignore RUSTSEC-2022-0071 + CARGO_AUDIT_IGNORE_FLAGS: permissions: {} From f486f577575858b50667a64482783aa7e3fe3181 Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Mon, 20 May 2024 23:05:46 +0900 Subject: [PATCH 14/21] chore: remove rusoto doc link Signed-off-by: Ryota Sakamoto --- README.md | 2 +- src/app.rs | 1 - src/batch.rs | 2 +- src/data.rs | 2 -- src/key.rs | 1 - src/util.rs | 1 - 6 files changed, 2 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index e60e7d3..5363d31 100644 --- a/README.md +++ b/README.md @@ -120,7 +120,7 @@ You can move the binary file named "dy" to anywhere under your `$PATH`. ## Prerequisites - AWS Credentials -First of all, please make sure you've already configured AWS Credentials in your environment. dynein depends on [rusoto](https://github.com/rusoto/rusoto) and rusoto [can utilize standard AWS credential toolchains](https://github.com/rusoto/rusoto/blob/master/AWS-CREDENTIALS.md) - for example `~/.aws/credentials` file, [IAM EC2 Instance Profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html), or environment variables such as `AWS_DEFAULT_REGION / AWS_ACCESS_KEY_ID / AWS_SECRET_ACCESS_KEY / AWS_PROFILE`. +First of all, please make sure you've already configured AWS Credentials in your environment. dynein depends on [aws-sdk-rust](https://github.com/awslabs/aws-sdk-rust) - for example `~/.aws/credentials` file, [IAM EC2 Instance Profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html), or environment variables such as `AWS_DEFAULT_REGION / AWS_ACCESS_KEY_ID / AWS_SECRET_ACCESS_KEY / AWS_PROFILE`. One convenient way to check if your AWS credential configuration is ok to use dynein is to install and try to execute [AWS CLI](https://aws.amazon.com/cli/) in your environment (e.g. `$ aws dynamodb list-tables`). Once you've [configured AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html), you should be ready to use dynein. diff --git a/src/app.rs b/src/app.rs index 9141a0b..b3932e4 100644 --- a/src/app.rs +++ b/src/app.rs @@ -286,7 +286,6 @@ impl Context { // otherwise, come down to "default region" of your environment. // e.g. region set via AWS CLI (check: $ aws configure get region), or environment variable `AWS_DEFAULT_REGION`. - // ref: https://docs.rs/rusoto_signature/0.42.0/src/rusoto_signature/region.rs.html#282-290 // ref: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html // TODO: fix Region::from_static("us-east-1") diff --git a/src/batch.rs b/src/batch.rs index 60b8bcd..0fab925 100644 --- a/src/batch.rs +++ b/src/batch.rs @@ -104,7 +104,7 @@ Public functions /// HashMap< /// String, ... table name. Batch requests can contain multiple tables as targets. /// Vec< ... requests for one table should be gathered. -/// WriteRequest ... https://docs.rs/rusoto_dynamodb/0.42.0/rusoto_dynamodb/struct.WriteRequest.html +/// WriteRequest ... https://docs.rs/aws-sdk-dynamodb/1.28.0/aws_sdk_dynamodb/types/struct.WriteRequest.html /// either: /// - put_request (Option), where PutRequest { item: HashMap } /// ... it should be same as "item" parameter used in PutItem. diff --git a/src/data.rs b/src/data.rs index 2644a05..30dda97 100644 --- a/src/data.rs +++ b/src/data.rs @@ -617,7 +617,6 @@ fn identify_target( // top 3 scalar types that can be used for primary keys. // ref: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html // https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes -// https://rusoto.github.io/rusoto/rusoto_dynamodb/struct.AttributeValue.html fn build_attrval_scalar(_ktype: &str, _kval: &str) -> AttributeValue { debug!( "Constructing an AttributeValue for (type: {:?}, val: {:?})", @@ -653,7 +652,6 @@ fn build_attrval_set(ktype: &str, kval: &[JsonValue]) -> AttributeValue { .collect(), ), // NOTE: Currently BS is not supported. - // "BS": Vec (serialize_with = "::rusoto_core::serialization::SerdeBlobList::serialize_blob_list") _ => panic!("ERROR: Unknown DynamoDB Data Type: {}", ktype), } } diff --git a/src/key.rs b/src/key.rs index 5c907d0..48cdc03 100644 --- a/src/key.rs +++ b/src/key.rs @@ -34,7 +34,6 @@ impl Key { } /// Restrict acceptable DynamoDB data types for primary keys. -/// enum witn methods/FromStr ref: https://docs.rs/rusoto_signature/0.42.0/src/rusoto_signature/region.rs.html#226-258 #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] pub enum KeyType { S, diff --git a/src/util.rs b/src/util.rs index 9cb1cdc..8511a52 100644 --- a/src/util.rs +++ b/src/util.rs @@ -30,7 +30,6 @@ struct / enum / const ================================================= */ // TableDescription doesn't implement Serialize -// https://docs.rs/rusoto_dynamodb/0.42.0/rusoto_dynamodb/struct.TableDescription.html #[derive(Serialize, Deserialize, Debug)] struct PrintDescribeTable { name: String, From 8906d956615ad7ca23dfbe75f4248a0ee1703734 Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Mon, 20 May 2024 23:16:11 +0900 Subject: [PATCH 15/21] fix: fix setup_container on local Signed-off-by: Ryota Sakamoto --- tests/util/mod.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/util/mod.rs b/tests/util/mod.rs index f3a1db4..eea0454 100644 --- a/tests/util/mod.rs +++ b/tests/util/mod.rs @@ -339,12 +339,13 @@ async fn setup_container(port: i32) -> Result<(), Box> { io::stderr().write_all(&output.stderr).unwrap(); // Wait dynamodb-local - let health_check_url = format!("http://localhost:{}", port); - // let ddb = DynamoDbClient::new(Region::Custom { - // name: "local".to_owned(), - // endpoint: health_check_url, - // }); - let ddb = DynamoDbSdkClient::new(&SdkConfig::builder().region(Region::new("local")).build()); + // https://docs.aws.amazon.com/sdk-for-rust/latest/dg/dynamodb-local.html + let config = aws_sdk_dynamodb::config::Builder::from( + &SdkConfig::builder().region(Region::new("local")).build(), + ) + .endpoint_url(format!("http://localhost:{}", port)) + .build(); + let ddb = DynamoDbSdkClient::from_conf(config); let max_retries = 5; let mut attempts = 0; loop { From caa20a2ffb8f367e7e44b11a2cfea6123c7d2325 Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Mon, 20 May 2024 23:40:58 +0900 Subject: [PATCH 16/21] chore: remove aws-types dependency Signed-off-by: Ryota Sakamoto --- Cargo.lock | 1 - Cargo.toml | 1 - src/app.rs | 3 +-- tests/util/mod.rs | 3 +-- 4 files changed, 2 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0f7637a..14ae975 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -949,7 +949,6 @@ dependencies = [ "aws-sdk-ec2", "aws-smithy-runtime-api", "aws-smithy-types", - "aws-types", "backon", "base64 0.22.0", "brotli", diff --git a/Cargo.toml b/Cargo.toml index 44eb54d..5968666 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,7 +35,6 @@ aws-sdk-dynamodb = "1.28.0" aws-sdk-ec2 = "1.42.0" aws-smithy-runtime-api = "1.6.0" aws-smithy-types = "1.1.9" -aws-types = "1.2.1" chrono = "0.4" clap = { version = "4.5.4", features = ["derive"] } dialoguer = "0.11.0" diff --git a/src/app.rs b/src/app.rs index b3932e4..dddc006 100644 --- a/src/app.rs +++ b/src/app.rs @@ -15,9 +15,8 @@ */ use ::serde::{Deserialize, Serialize}; -use aws_config::{meta::region::RegionProviderChain, BehaviorVersion, SdkConfig}; +use aws_config::{meta::region::RegionProviderChain, BehaviorVersion, Region, SdkConfig}; use aws_sdk_dynamodb::types::{AttributeDefinition, TableDescription}; -use aws_types::region::Region; use backon::ExponentialBuilder; use log::{debug, error, info}; use serde_yaml::Error as SerdeYAMLError; diff --git a/tests/util/mod.rs b/tests/util/mod.rs index eea0454..b6a9f04 100644 --- a/tests/util/mod.rs +++ b/tests/util/mod.rs @@ -19,9 +19,8 @@ use std::env; use std::process::Command; // Run programs // use assert_cmd::cmd::Command; // Run programs - it seems to be equal to "use assert_cmd::prelude::* + use std::process::Command" -use aws_config::SdkConfig; +use aws_config::{Region, SdkConfig}; use aws_sdk_dynamodb::Client as DynamoDbSdkClient; -use aws_types::region::Region; use once_cell::sync::Lazy; use rand::{distributions::Alphanumeric, Rng}; use regex::bytes::Regex; From 704cb0c7841743f8fc394af6a24caca8c28a33d8 Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Tue, 21 May 2024 00:25:53 +0900 Subject: [PATCH 17/21] fix: get default region from RegionProviderChain Signed-off-by: Ryota Sakamoto --- src/app.rs | 67 +++++++++++++++++++++++++++++------------------- src/bootstrap.rs | 26 ++++++++----------- src/control.rs | 31 ++++++++++------------ 3 files changed, 64 insertions(+), 60 deletions(-) diff --git a/src/app.rs b/src/app.rs index dddc006..75a21d4 100644 --- a/src/app.rs +++ b/src/app.rs @@ -250,7 +250,7 @@ impl Context { } pub async fn effective_sdk_config(&self) -> SdkConfig { - let region = self.effective_region(); + let region = self.effective_region().await; let region_name = region.as_ref(); self.effective_sdk_config_with_region(region_name).await @@ -266,7 +266,7 @@ impl Context { .await } - pub fn effective_region(&self) -> Region { + pub async fn effective_region(&self) -> Region { // if region is overwritten by --region comamnd, use it. if let Some(ow_region) = &self.overwritten_region { return ow_region.to_owned(); @@ -286,8 +286,11 @@ impl Context { // otherwise, come down to "default region" of your environment. // e.g. region set via AWS CLI (check: $ aws configure get region), or environment variable `AWS_DEFAULT_REGION`. // ref: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html - // TODO: fix - Region::from_static("us-east-1") + let region_provider = RegionProviderChain::default_provider(); + region_provider + .region() + .await + .unwrap_or(Region::from_static("us-east-1")) } pub fn effective_table_name(&self) -> String { @@ -318,15 +321,15 @@ impl Context { 8000 } - pub fn effective_cache_key(&self) -> String { + pub async fn effective_cache_key(&self) -> String { format!( "{}/{}", - &self.effective_region().as_ref(), + &self.effective_region().await.as_ref(), &self.effective_table_name() ) } - pub fn cached_using_table_schema(&self) -> Option { + pub async fn cached_using_table_schema(&self) -> Option { // return None if table name is not specified in both config and option. if self.overwritten_table_name.is_none() { match self.config.to_owned() { @@ -341,7 +344,7 @@ impl Context { None => return None, // return None for this "cached_using_table_schema" function }; let found_table_schema: Option<&TableSchema> = - cached_tables.get(&self.effective_cache_key()); + cached_tables.get(&self.effective_cache_key().await); // NOTE: HashMap's `get` returns a reference to the value / (&self, k: &Q) -> Option<&V> found_table_schema.map(|schema| schema.to_owned()) } @@ -361,9 +364,9 @@ impl Context { .unwrap_or_else(|| self.config.as_ref().map_or(false, |c| c.query.strict_mode)) } - pub fn is_local(&self) -> bool { + pub async fn is_local(&self) -> bool { let region = self.effective_region(); - region.as_ref() == LOCAL_REGION + region.await.as_ref() == LOCAL_REGION } } @@ -513,8 +516,8 @@ pub async fn use_table( debug!("describing the table: {}", tbl); let tbl = tbl.clone(); let desc: TableDescription = control::describe_table_api(cx, tbl.clone()).await; - save_using_target(cx, desc)?; - println!("Now you're using the table '{}' ({}).", tbl, &cx.effective_region().as_ref()); + save_using_target(cx, desc).await?; + println!("Now you're using the table '{}' ({}).", tbl, &cx.effective_region().await.as_ref()); }, None => bye(1, "You have to specify a table. How to use (1). 'dy use --table mytable', or (2) 'dy use mytable'."), }; @@ -523,7 +526,7 @@ pub async fn use_table( } /// Inserts specified table description into cache file. -pub fn insert_to_table_cache( +pub async fn insert_to_table_cache( cx: &Context, desc: TableDescription, ) -> Result<(), DyneinConfigError> { @@ -531,7 +534,7 @@ pub fn insert_to_table_cache( .table_name .clone() .expect("desc should have table name"); - let region: Region = cx.effective_region(); + let region: Region = cx.effective_region().await; debug!( "Under the region '{}', trying to save table schema of '{}'", ®ion.as_ref(), @@ -597,7 +600,7 @@ pub async fn table_schema(cx: &Context) -> TableSchema { .await; TableSchema { - region: String::from(cx.effective_region().as_ref()), + region: String::from(cx.effective_region().await.as_ref()), name: desc.clone().table_name.unwrap(), pk: key::typed_key("HASH", &desc).expect("pk should exist"), sk: key::typed_key("RANGE", &desc), @@ -614,7 +617,7 @@ pub async fn table_schema(cx: &Context) -> TableSchema { std::process::exit(1) }); let schema_from_cache: Option = cached_tables - .get(&cx.effective_cache_key()) + .get(&cx.effective_cache_key().await) .map(|x| x.to_owned()); schema_from_cache.unwrap_or_else(|| { error!("{}", Messages::NoEffectiveTable); @@ -709,7 +712,10 @@ fn retrieve_or_create_dynein_dir() -> Result { /// This function updates `using_region` and `using_table` in config.yml, /// and at the same time inserts TableDescription of the target table into cache.yml. -fn save_using_target(cx: &mut Context, desc: TableDescription) -> Result<(), DyneinConfigError> { +async fn save_using_target( + cx: &mut Context, + desc: TableDescription, +) -> Result<(), DyneinConfigError> { let table_name: String = desc .table_name .clone() @@ -718,7 +724,7 @@ fn save_using_target(cx: &mut Context, desc: TableDescription) -> Result<(), Dyn let port: u32 = cx.effective_port(); // retrieve current config from Context and update "using target". - let region = Some(String::from(cx.effective_region().as_ref())); + let region = Some(String::from(cx.effective_region().await.as_ref())); let config = cx.config.as_mut().expect("cx should have config"); config.using_region = region; config.using_table = Some(table_name); @@ -730,7 +736,7 @@ fn save_using_target(cx: &mut Context, desc: TableDescription) -> Result<(), Dyn write_dynein_file(DyneinFileType::ConfigFile, config_yaml_string)?; // save target table info into cache. - insert_to_table_cache(cx, desc)?; + insert_to_table_cache(cx, desc).await?; Ok(()) } @@ -755,8 +761,8 @@ mod tests { use std::convert::TryInto; use std::error::Error; - #[test] - fn test_context_functions() -> Result<(), Box> { + #[tokio::test] + async fn test_context_functions() -> Result<(), Box> { let cx1 = Context { config: None, cache: None, @@ -767,7 +773,10 @@ mod tests { should_strict_for_query: None, retry: None, }; - assert_eq!(cx1.effective_region(), Region::from_static("us-east-1")); + assert_eq!( + cx1.effective_region().await, + Region::from_static("us-east-1") + ); // cx1.effective_table_name(); ... exit(1) let cx2 = Context { @@ -787,7 +796,7 @@ mod tests { retry: Some(RetryConfig::default().try_into()?), }; assert_eq!( - cx2.effective_region(), + cx2.effective_region().await, Region::from_static("ap-northeast-1") ); assert_eq!(cx2.effective_table_name(), String::from("cfgtbl")); @@ -797,14 +806,20 @@ mod tests { overwritten_table_name: Some(String::from("argtbl")), // --table argtbl ..cx2.clone() }; - assert_eq!(cx3.effective_region(), Region::from_static("us-east-1")); + assert_eq!( + cx3.effective_region().await, + Region::from_static("us-east-1") + ); assert_eq!(cx3.effective_table_name(), String::from("argtbl")); let cx4 = Context { overwritten_region: Some(Region::from_static("us-east-1")), // --region us-east-1 ..cx2.clone() }; - assert_eq!(cx4.effective_region(), Region::from_static("us-east-1")); + assert_eq!( + cx4.effective_region().await, + Region::from_static("us-east-1") + ); assert_eq!(cx4.effective_table_name(), String::from("cfgtbl")); let cx5 = Context { @@ -812,7 +827,7 @@ mod tests { ..cx2.clone() }; assert_eq!( - cx5.effective_region(), + cx5.effective_region().await, Region::from_static("ap-northeast-1") ); assert_eq!(cx5.effective_table_name(), String::from("argtbl")); diff --git a/src/bootstrap.rs b/src/bootstrap.rs index 4b8748b..f5fa018 100644 --- a/src/bootstrap.rs +++ b/src/bootstrap.rs @@ -279,27 +279,21 @@ https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/AppendixSampleT let request_items = batch::build_batch_request_items_from_json(content.to_string())?; batch::batch_write_untill_processed(cx.clone(), request_items).await?; } + + let region = cx.effective_region().await.to_string(); + println!( "\n\nNow all tables have sample data. Try following commands to play with dynein. Enjoy!" ); - println!(" $ dy --region {} ls", &cx.effective_region().as_ref()); - println!( - " $ dy --region {} desc --table Thread", - &cx.effective_region().as_ref() - ); - println!( - " $ dy --region {} scan --table Thread", - &cx.effective_region().as_ref() - ); - println!( - " $ dy --region {} use --table Thread", - &cx.effective_region().as_ref() - ); + println!(" $ dy --region {} ls", region); + println!(" $ dy --region {} desc --table Thread", region); + println!(" $ dy --region {} scan --table Thread", region); + println!(" $ dy --region {} use --table Thread", region); println!(" $ dy scan"); println!("\nAfter you 'use' a table like above, dynein assume you're using the same region & table, which info is stored at ~/.dynein/config.yml and ~/.dynein/cache.yml"); println!( "Let's move on with the '{}' region you've just 'use'd...", - &cx.effective_region().as_ref() + region ); println!(" $ dy scan --table Forum"); println!(" $ dy scan -t ProductCatalog"); @@ -321,7 +315,7 @@ async fn prepare_table(cx: &app::Context, table_name: &str, keys: &[&str]) { println!( "Started to create table '{}' in {} region. status: {}", &table_name, - &cx.effective_region().as_ref(), + &cx.effective_region().await.as_ref(), desc.table_status.unwrap() ); } @@ -329,7 +323,7 @@ async fn prepare_table(cx: &app::Context, table_name: &str, keys: &[&str]) { CreateTableError::ResourceInUseException(_) => println!( "[skip] Table '{}' already exists in {} region, skipping to create new one.", &table_name, - &cx.effective_region().as_ref() + &cx.effective_region().await.as_ref() ), e => { debug!("CreateTable API call got an error -- {:#?}", e); diff --git a/src/control.rs b/src/control.rs index ffe2b36..069cf31 100644 --- a/src/control.rs +++ b/src/control.rs @@ -63,7 +63,7 @@ pub async fn list_tables_all_regions(cx: app::Context) { ) .await; - if cx.is_local() { + if cx.is_local().await { list_tables(cx.clone()).await; } } @@ -72,21 +72,16 @@ pub async fn list_tables_all_regions(cx: app::Context) { pub async fn list_tables(cx: app::Context) { let table_names = list_tables_api(cx.clone()).await; + let region = cx.effective_region().await.to_string(); - println!( - "DynamoDB tables in region: {}", - cx.effective_region().as_ref() - ); + println!("DynamoDB tables in region: {}", region); if table_names.is_empty() { return println!(" No table in this region."); } - // if let Some(table_in_config) = cx.clone().config.and_then(|x| x.table) { - if let Some(table_in_config) = cx.clone().cached_using_table_schema() { + if let Some(table_in_config) = cx.clone().cached_using_table_schema().await { for table_name in table_names { - if cx.clone().effective_region().as_ref() == table_in_config.region - && table_name == table_in_config.name - { + if region == table_in_config.region && table_name == table_in_config.name { println!("* {}", table_name); } else { println!(" {}", table_name); @@ -128,12 +123,12 @@ pub async fn describe_table(cx: app::Context, target_table_to_desc: Option debug!("Described table schema was written to the cache file."), Err(e) => println!( "Failed to write table schema to the cache with follwoing error: {:?}", @@ -143,7 +138,7 @@ pub async fn describe_table(cx: app::Context, target_table_to_desc: Option { - util::print_table_description(new_context.effective_region().as_ref(), desc) + util::print_table_description(new_context.effective_region().await.as_ref(), desc) } // Some("raw") => println!("{:#?}", desc), Some(_) => { @@ -156,7 +151,7 @@ pub async fn describe_table(cx: app::Context, target_table_to_desc: Option TableDescription { - let region = cx.effective_region(); + let region = cx.effective_region().await; let config = cx.effective_sdk_config_with_region(region.as_ref()).await; let ddb = DynamoDbSdkClient::new(&config); @@ -183,7 +178,7 @@ pub async fn create_table(cx: app::Context, name: String, given_keys: Vec util::print_table_description(cx.effective_region().as_ref(), desc), + Ok(desc) => util::print_table_description(cx.effective_region().await.as_ref(), desc), Err(e) => { debug!("CreateTable API call got an error -- {:#?}", e); error!("{}", e.into_service_error()); @@ -272,7 +267,7 @@ pub async fn create_index(cx: app::Context, index_name: String, given_keys: Vec< Ok(res) => { debug!("Returned result: {:#?}", res); util::print_table_description( - cx.effective_region().as_ref(), + cx.effective_region().await.as_ref(), res.table_description.unwrap(), ); } @@ -367,7 +362,7 @@ pub async fn update_table( ) .await { - Ok(desc) => util::print_table_description(cx.effective_region().as_ref(), desc), + Ok(desc) => util::print_table_description(cx.effective_region().await.as_ref(), desc), Err(e) => { debug!("UpdateTable API call got an error -- {:#?}", e); error!("{}", e.to_string()); @@ -599,7 +594,7 @@ pub async fn restore(cx: app::Context, backup_name: Option, restore_name debug!("Returned result: {:#?}", res); println!("Table restoration from: '{}' has been started", &backup_arn); let desc = res.table_description.unwrap(); - util::print_table_description(cx.effective_region().as_ref(), desc); + util::print_table_description(cx.effective_region().await.as_ref(), desc); } } } From 91ef6db856c79de1f327dd31107c47a722854a84 Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Tue, 21 May 2024 01:19:11 +0900 Subject: [PATCH 18/21] fix: overwrite endpoint_url when region is local Signed-off-by: Ryota Sakamoto --- src/app.rs | 34 +++++++++++----------------------- 1 file changed, 11 insertions(+), 23 deletions(-) diff --git a/src/app.rs b/src/app.rs index 75a21d4..8cbd611 100644 --- a/src/app.rs +++ b/src/app.rs @@ -240,7 +240,7 @@ impl Context { Ok(Context { config: Some(config), cache: Some(load_or_touch_cache_file(true)?), - overwritten_region: region_from_str(region, port), + overwritten_region: region_from_str(region), overwritten_table_name: table, overwritten_port: port, output: None, @@ -260,10 +260,12 @@ impl Context { let sdk_region = Region::new(region_name.to_owned()); let provider = RegionProviderChain::first_try(sdk_region); - aws_config::defaults(BehaviorVersion::v2024_03_28()) - .region(provider) - .load() - .await + let mut config = aws_config::defaults(BehaviorVersion::v2024_03_28()).region(provider); + if self.is_local().await { + config = config.endpoint_url(format!("http://localhost:{}", self.effective_port())); + } + + config.load().await } pub async fn effective_region(&self) -> Region { @@ -276,11 +278,8 @@ impl Context { if let Some(using_region_name_in_config) = &self.config.to_owned().and_then(|x| x.using_region) { - return region_from_str( - Some(using_region_name_in_config.to_owned()), - Some(self.effective_port()), - ) // Option - .expect("Region name in the config file is invalid."); + return region_from_str(Some(using_region_name_in_config.to_owned())) // Option + .expect("Region name in the config file is invalid."); }; // otherwise, come down to "default region" of your environment. @@ -427,10 +426,9 @@ Public functions ================================================= */ // Receives given --region option string, including "local", return Region struct. -pub fn region_from_str(s: Option, p: Option) -> Option { - let port = p.unwrap_or(8000); +pub fn region_from_str(s: Option) -> Option { match s.as_deref() { - Some(LOCAL_REGION) => Some(region_dynamodb_local(port)), + Some(LOCAL_REGION) => Some(Region::from_static(LOCAL_REGION)), Some(x) => Some(Region::new(x.to_owned())), // convert Result into Option None => None, } @@ -672,16 +670,6 @@ pub fn bye(code: i32, msg: &str) -> ! { Private functions ================================================= */ -fn region_dynamodb_local(port: u32) -> Region { - let endpoint_url = format!("http://localhost:{}", port); - debug!( - "setting DynamoDB Local '{}' as target region.", - &endpoint_url - ); - // TODO: fix - Region::from_static(LOCAL_REGION) -} - fn retrieve_dynein_file_path(file_type: DyneinFileType) -> Result { let filename = match file_type { DyneinFileType::ConfigFile => CONFIG_FILE_NAME, From 4cfcd3fc88760678f7db8b0ec53ef6e2fb2cc1fb Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Tue, 21 May 2024 01:38:03 +0900 Subject: [PATCH 19/21] fix: fix review points Signed-off-by: Ryota Sakamoto --- src/app.rs | 14 +++++------ src/bootstrap.rs | 8 +++--- src/control.rs | 46 ++++++++++++++++++----------------- src/ddb/mod.rs | 1 + src/{util.rs => ddb/table.rs} | 2 +- src/main.rs | 2 +- src/parser.rs | 1 + src/transfer.rs | 6 ++--- tests/util/mod.rs | 7 ++++-- 9 files changed, 47 insertions(+), 40 deletions(-) create mode 100644 src/ddb/mod.rs rename src/{util.rs => ddb/table.rs} (99%) diff --git a/src/app.rs b/src/app.rs index 8cbd611..452a864 100644 --- a/src/app.rs +++ b/src/app.rs @@ -34,8 +34,8 @@ use tempfile::NamedTempFile; use thiserror::Error; use super::control; +use super::ddb::table; use super::key; -use super::util; /* ================================================= struct / enum / const @@ -59,7 +59,7 @@ pub struct TableSchema { pub pk: key::Key, pub sk: Option, pub indexes: Option>, - pub mode: util::Mode, + pub mode: table::Mode, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -323,8 +323,8 @@ impl Context { pub async fn effective_cache_key(&self) -> String { format!( "{}/{}", - &self.effective_region().await.as_ref(), - &self.effective_table_name() + self.effective_region().await.as_ref(), + self.effective_table_name() ) } @@ -515,7 +515,7 @@ pub async fn use_table( let tbl = tbl.clone(); let desc: TableDescription = control::describe_table_api(cx, tbl.clone()).await; save_using_target(cx, desc).await?; - println!("Now you're using the table '{}' ({}).", tbl, &cx.effective_region().await.as_ref()); + println!("Now you're using the table '{}' ({}).", tbl, cx.effective_region().await.as_ref()); }, None => bye(1, "You have to specify a table. How to use (1). 'dy use --table mytable', or (2) 'dy use mytable'."), }; @@ -561,7 +561,7 @@ pub async fn insert_to_table_cache( pk: key::typed_key("HASH", &desc).expect("pk should exist"), sk: key::typed_key("RANGE", &desc), indexes: index_schemas(&desc), - mode: util::extract_mode(&desc.billing_mode_summary), + mode: table::extract_mode(&desc.billing_mode_summary), }, ); cache.tables = Some(table_schema_hashmap); @@ -603,7 +603,7 @@ pub async fn table_schema(cx: &Context) -> TableSchema { pk: key::typed_key("HASH", &desc).expect("pk should exist"), sk: key::typed_key("RANGE", &desc), indexes: index_schemas(&desc), - mode: util::extract_mode(&desc.billing_mode_summary), + mode: table::extract_mode(&desc.billing_mode_summary), } } None => { diff --git a/src/bootstrap.rs b/src/bootstrap.rs index f5fa018..8867337 100644 --- a/src/bootstrap.rs +++ b/src/bootstrap.rs @@ -314,16 +314,16 @@ async fn prepare_table(cx: &app::Context, table_name: &str, keys: &[&str]) { Ok(desc) => { println!( "Started to create table '{}' in {} region. status: {}", - &table_name, - &cx.effective_region().await.as_ref(), + table_name, + cx.effective_region().await.as_ref(), desc.table_status.unwrap() ); } Err(e) => match e.into_service_error() { CreateTableError::ResourceInUseException(_) => println!( "[skip] Table '{}' already exists in {} region, skipping to create new one.", - &table_name, - &cx.effective_region().await.as_ref() + table_name, + cx.effective_region().await.as_ref() ), e => { debug!("CreateTable API call got an error -- {:#?}", e); diff --git a/src/control.rs b/src/control.rs index 069cf31..eed1bb6 100644 --- a/src/control.rs +++ b/src/control.rs @@ -35,7 +35,7 @@ use dialoguer::{theme::ColorfulTheme, Confirm, Select}; use tabwriter::TabWriter; use super::app; -use super::util; +use super::ddb::table; /* ================================================= Public functions @@ -108,7 +108,7 @@ pub async fn describe_all_tables(cx: app::Context) { } /// Executed when you call `$ dy desc (table)`. Retrieve TableDescription via describe_table_api function, -/// then print them in convenient way using util::print_table_description function (default/yaml). +/// then print them in convenient way using table::print_table_description function (default/yaml). pub async fn describe_table(cx: app::Context, target_table_to_desc: Option) { debug!("context: {:#?}", &cx); debug!("positional arg table name: {:?}", &target_table_to_desc); @@ -122,8 +122,8 @@ pub async fn describe_table(cx: app::Context, target_table_to_desc: Option { - util::print_table_description(new_context.effective_region().await.as_ref(), desc) + table::print_table_description(new_context.effective_region().await.as_ref(), desc) } // Some("raw") => println!("{:#?}", desc), Some(_) => { @@ -178,7 +178,7 @@ pub async fn create_table(cx: app::Context, name: String, given_keys: Vec util::print_table_description(cx.effective_region().await.as_ref(), desc), + Ok(desc) => table::print_table_description(cx.effective_region().await.as_ref(), desc), Err(e) => { debug!("CreateTable API call got an error -- {:#?}", e); error!("{}", e.into_service_error()); @@ -200,7 +200,8 @@ pub async fn create_table_api( &name, &given_keys ); - let (key_schema, attribute_definitions) = util::generate_essential_key_definitions(&given_keys); + let (key_schema, attribute_definitions) = + table::generate_essential_key_definitions(&given_keys); let config = cx.effective_sdk_config().await; let ddb = DynamoDbSdkClient::new(&config); @@ -230,7 +231,8 @@ pub async fn create_index(cx: app::Context, index_name: String, given_keys: Vec< &cx.effective_table_name() ); - let (key_schema, attribute_definitions) = util::generate_essential_key_definitions(&given_keys); + let (key_schema, attribute_definitions) = + table::generate_essential_key_definitions(&given_keys); let config = cx.effective_sdk_config().await; let ddb = DynamoDbSdkClient::new(&config); @@ -266,7 +268,7 @@ pub async fn create_index(cx: app::Context, index_name: String, given_keys: Vec< } Ok(res) => { debug!("Returned result: {:#?}", res); - util::print_table_description( + table::print_table_description( cx.effective_region().await.as_ref(), res.table_description.unwrap(), ); @@ -285,11 +287,11 @@ pub async fn update_table( let desc: TableDescription = describe_table_api(&cx, table_name_to_update.clone()).await; // Map given string into "Mode" enum. Note that in cmd.rs clap already limits acceptable values. - let switching_to_mode: Option = match mode_string { + let switching_to_mode: Option = match mode_string { None => None, Some(ms) => match ms.as_str() { - "provisioned" => Some(util::Mode::Provisioned), - "ondemand" => Some(util::Mode::OnDemand), + "provisioned" => Some(table::Mode::Provisioned), + "ondemand" => Some(table::Mode::OnDemand), _ => panic!("You shouldn't see this message as --mode can takes only 'provisioned' or 'ondemand'."), }, }; @@ -298,9 +300,9 @@ pub async fn update_table( let provisioned_throughput: Option = match &switching_to_mode { // when --mode is not given, no mode switch happens. Check the table's current mode. None => { - match util::extract_mode(&desc.clone().billing_mode_summary) { + match table::extract_mode(&desc.clone().billing_mode_summary) { // When currently OnDemand mode and you're not going to change the it, set None for CU. - util::Mode::OnDemand => { + table::Mode::OnDemand => { if wcu.is_some() || rcu.is_some() { println!("Ignoring --rcu/--wcu options as the table mode is OnDemand."); }; @@ -308,7 +310,7 @@ pub async fn update_table( } // When currently Provisioned mode and you're not going to change the it, // pass given rcu/wcu, and use current values if missing. Provisioned table should have valid capacity units so unwrap() here. - util::Mode::Provisioned => Some( + table::Mode::Provisioned => Some( ProvisionedThroughput::builder() .read_capacity_units(rcu.unwrap_or_else(|| { desc.clone() @@ -332,14 +334,14 @@ pub async fn update_table( // When the user trying to switch mode. Some(target_mode) => match target_mode { // when switching Provisioned->OnDemand mode, ProvisionedThroughput can be None. - util::Mode::OnDemand => { + table::Mode::OnDemand => { if wcu.is_some() || rcu.is_some() { println!("Ignoring --rcu/--wcu options as --mode ondemand."); }; None } // when switching OnDemand->Provisioned mode, set given wcu/rcu, fill with "5" as a default if not given. - util::Mode::Provisioned => Some( + table::Mode::Provisioned => Some( ProvisionedThroughput::builder() .read_capacity_units(rcu.unwrap_or(5)) .write_capacity_units(wcu.unwrap_or(5)) @@ -362,7 +364,7 @@ pub async fn update_table( ) .await { - Ok(desc) => util::print_table_description(cx.effective_region().await.as_ref(), desc), + Ok(desc) => table::print_table_description(cx.effective_region().await.as_ref(), desc), Err(e) => { debug!("UpdateTable API call got an error -- {:#?}", e); error!("{}", e.to_string()); @@ -385,7 +387,7 @@ pub async fn update_table( async fn update_table_api( cx: app::Context, table_name_to_update: String, - switching_to_mode: Option, + switching_to_mode: Option, provisioned_throughput: Option, ) -> Result< TableDescription, @@ -500,7 +502,7 @@ pub async fn list_backups(cx: app::Context, all_tables: bool) -> Result<(), IOEr .expect("status should exist") .as_str() .to_string(), - util::epoch_to_rfc3339( + table::epoch_to_rfc3339( backup .backup_creation_date_time .expect("creation date should exist") @@ -544,7 +546,7 @@ pub async fn restore(cx: app::Context, backup_name: Option, restore_name format!( "{} ({}, {} bytes)", b.to_owned().backup_name.unwrap(), - util::epoch_to_rfc3339(b.backup_creation_date_time.unwrap().as_secs_f64()), + table::epoch_to_rfc3339(b.backup_creation_date_time.unwrap().as_secs_f64()), b.backup_size_bytes.unwrap() ) }) @@ -594,7 +596,7 @@ pub async fn restore(cx: app::Context, backup_name: Option, restore_name debug!("Returned result: {:#?}", res); println!("Table restoration from: '{}' has been started", &backup_arn); let desc = res.table_description.unwrap(); - util::print_table_description(cx.effective_region().await.as_ref(), desc); + table::print_table_description(cx.effective_region().await.as_ref(), desc); } } } diff --git a/src/ddb/mod.rs b/src/ddb/mod.rs new file mode 100644 index 0000000..13971b0 --- /dev/null +++ b/src/ddb/mod.rs @@ -0,0 +1 @@ +pub mod table; diff --git a/src/util.rs b/src/ddb/table.rs similarity index 99% rename from src/util.rs rename to src/ddb/table.rs index 8511a52..8428c80 100644 --- a/src/util.rs +++ b/src/ddb/table.rs @@ -23,7 +23,7 @@ use aws_sdk_dynamodb::types::{ use chrono::DateTime; use log::error; -use super::key; +use crate::key; /* ================================================= struct / enum / const diff --git a/src/main.rs b/src/main.rs index 714e1ee..999f9d9 100644 --- a/src/main.rs +++ b/src/main.rs @@ -32,11 +32,11 @@ mod bootstrap; mod cmd; mod control; mod data; +mod ddb; mod key; mod parser; mod shell; mod transfer; -mod util; /* ================================================= helper functions diff --git a/src/parser.rs b/src/parser.rs index febd82f..893a3f7 100644 --- a/src/parser.rs +++ b/src/parser.rs @@ -1444,6 +1444,7 @@ impl DyneinParser { let result = GeneratedParser::parse(Rule::map_literal, exp); match result { Ok(mut pair) => { + // pair is parsed through Rule::map_literal so we expect that pair should be HashMap let item = parse_literal(pair.next().unwrap())? .convert_attribute_value() .as_m() diff --git a/src/transfer.rs b/src/transfer.rs index 0c71aa9..593a82c 100644 --- a/src/transfer.rs +++ b/src/transfer.rs @@ -37,7 +37,7 @@ use thiserror::Error; use super::app; use super::batch; use super::data; -use super::util; +use super::ddb::table; #[derive(Error, Debug)] pub enum DyneinExportError { @@ -151,7 +151,7 @@ pub async fn export( let ts: app::TableSchema = app::table_schema(&cx).await; let format_str: Option<&str> = format.as_deref(); - if ts.mode == util::Mode::Provisioned { + if ts.mode == table::Mode::Provisioned { let msg = "WARN: For the best performance on import/export, dynein recommends OnDemand mode. However the target table is Provisioned mode now. Proceed anyway?"; if !Confirm::new().with_prompt(msg).interact()? { app::bye(0, "Operation has been cancelled."); @@ -297,7 +297,7 @@ pub async fn import( let format_str: Option<&str> = format.as_deref(); let ts: app::TableSchema = app::table_schema(&cx).await; - if ts.mode == util::Mode::Provisioned { + if ts.mode == table::Mode::Provisioned { let msg = "WARN: For the best performance on import/export, dynein recommends OnDemand mode. However the target table is Provisioned mode now. Proceed anyway?"; if !Confirm::new().with_prompt(msg).interact()? { println!("Operation has been cancelled."); diff --git a/tests/util/mod.rs b/tests/util/mod.rs index b6a9f04..2395ee6 100644 --- a/tests/util/mod.rs +++ b/tests/util/mod.rs @@ -19,7 +19,7 @@ use std::env; use std::process::Command; // Run programs // use assert_cmd::cmd::Command; // Run programs - it seems to be equal to "use assert_cmd::prelude::* + use std::process::Command" -use aws_config::{Region, SdkConfig}; +use aws_config::{BehaviorVersion, Region, SdkConfig}; use aws_sdk_dynamodb::Client as DynamoDbSdkClient; use once_cell::sync::Lazy; use rand::{distributions::Alphanumeric, Rng}; @@ -340,7 +340,10 @@ async fn setup_container(port: i32) -> Result<(), Box> { // Wait dynamodb-local // https://docs.aws.amazon.com/sdk-for-rust/latest/dg/dynamodb-local.html let config = aws_sdk_dynamodb::config::Builder::from( - &SdkConfig::builder().region(Region::new("local")).build(), + &SdkConfig::builder() + .region(Region::new("local")) + .behavior_version(BehaviorVersion::v2024_03_28()) + .build(), ) .endpoint_url(format!("http://localhost:{}", port)) .build(); From d10edfd99f67550a2f5f423118924861c15ddeb7 Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Tue, 21 May 2024 04:56:36 +0900 Subject: [PATCH 20/21] feat: use RetryConfig to define retry behavior Signed-off-by: Ryota Sakamoto --- Cargo.lock | 13 --------- Cargo.toml | 1 - src/app.rs | 82 +++++++++++++++++++++++++++++++--------------------- src/batch.rs | 64 ++++++++-------------------------------- 4 files changed, 61 insertions(+), 99 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 14ae975..90a5250 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -491,18 +491,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "backon" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d67782c3f868daa71d3533538e98a8e13713231969def7536e8039606fc46bf0" -dependencies = [ - "fastrand", - "futures-core", - "pin-project", - "tokio", -] - [[package]] name = "backtrace" version = "0.3.71" @@ -949,7 +937,6 @@ dependencies = [ "aws-sdk-ec2", "aws-smithy-runtime-api", "aws-smithy-types", - "backon", "base64 0.22.0", "brotli", "bytes", diff --git a/Cargo.toml b/Cargo.toml index 5968666..e261d43 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -60,7 +60,6 @@ console = "0.15.8" brotli = "5.0.0" base64 = "0.22.0" thiserror = "1.0.59" -backon = "0.4" [dev-dependencies] assert_cmd = "2.0.14" # contains helpers make executing the main binary on integration tests easier. diff --git a/src/app.rs b/src/app.rs index 452a864..ce9f59d 100644 --- a/src/app.rs +++ b/src/app.rs @@ -15,9 +15,10 @@ */ use ::serde::{Deserialize, Serialize}; -use aws_config::{meta::region::RegionProviderChain, BehaviorVersion, Region, SdkConfig}; +use aws_config::{ + meta::region::RegionProviderChain, retry::RetryConfig, BehaviorVersion, Region, SdkConfig, +}; use aws_sdk_dynamodb::types::{AttributeDefinition, TableDescription}; -use backon::ExponentialBuilder; use log::{debug, error, info}; use serde_yaml::Error as SerdeYAMLError; use std::convert::{TryFrom, TryInto}; @@ -105,19 +106,19 @@ pub struct Config { #[serde(default)] pub query: QueryConfig, // pub cache_expiration_time: Option, // in second. default 300 (= 5 minutes) - pub retry: Option, + pub retry: Option, } #[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct RetryConfig { +pub struct RetrySettingGlobal { pub default: RetrySetting, pub batch_write_item: Option, } -impl TryFrom for Retry { +impl TryFrom for Retry { type Error = RetryConfigError; - fn try_from(value: RetryConfig) -> Result { + fn try_from(value: RetrySettingGlobal) -> Result { let default = value.default.try_into()?; let batch_write_item = match value.batch_write_item { Some(v) => Some(v.try_into()?), @@ -154,29 +155,26 @@ pub enum RetryConfigError { #[error("max_backoff should be greater than zero")] MaxBackoff, } -impl TryFrom for ExponentialBuilder { +impl TryFrom for RetryConfig { type Error = RetryConfigError; fn try_from(value: RetrySetting) -> Result { - let mut builder = Self::default() - .with_jitter() - .with_factor(2.0) - .with_min_delay(Duration::from_secs(1)); + let mut builder = Self::standard(); if let Some(max_attempts) = value.max_attempts { if max_attempts == 0 { return Err(RetryConfigError::MaxAttempts); } - builder = builder.with_max_times(max_attempts as usize - 1); + builder = builder.with_max_attempts(max_attempts - 1); } if let Some(max_backoff) = value.max_backoff { if max_backoff.is_zero() { return Err(RetryConfigError::MaxBackoff); } - builder = builder.with_max_delay(max_backoff); + builder = builder.with_max_backoff(max_backoff); } if let Some(initial_backoff) = value.initial_backoff { - builder = builder.with_min_delay(initial_backoff); + builder = builder.with_initial_backoff(initial_backoff); } Ok(builder) } @@ -203,8 +201,8 @@ pub struct Cache { #[derive(Debug, Clone)] pub struct Retry { - pub default: ExponentialBuilder, - pub batch_write_item: Option, + pub default: RetryConfig, + pub batch_write_item: Option, } #[derive(Debug, Clone)] @@ -257,6 +255,24 @@ impl Context { } pub async fn effective_sdk_config_with_region(&self, region_name: &str) -> SdkConfig { + self.build_sdk_config(region_name, None).await + } + + pub async fn effective_sdk_config_with_retry( + &self, + retry_config: Option, + ) -> SdkConfig { + let region = self.effective_region().await; + let region_name = region.as_ref(); + + self.build_sdk_config(region_name, retry_config).await + } + + async fn build_sdk_config( + &self, + region_name: &str, + retry_config: Option, + ) -> SdkConfig { let sdk_region = Region::new(region_name.to_owned()); let provider = RegionProviderChain::first_try(sdk_region); @@ -265,6 +281,10 @@ impl Context { config = config.endpoint_url(format!("http://localhost:{}", self.effective_port())); } + if let Some(retry_config) = retry_config { + config = config.retry_config(retry_config); + } + config.load().await } @@ -773,7 +793,7 @@ mod tests { using_table: Some(String::from("cfgtbl")), using_port: Some(8000), query: QueryConfig { strict_mode: false }, - retry: Some(RetryConfig::default()), + retry: Some(RetrySettingGlobal::default()), }), cache: None, overwritten_region: None, @@ -781,7 +801,7 @@ mod tests { overwritten_port: None, output: None, should_strict_for_query: None, - retry: Some(RetryConfig::default().try_into()?), + retry: Some(RetrySettingGlobal::default().try_into()?), }; assert_eq!( cx2.effective_region().await, @@ -826,12 +846,10 @@ mod tests { #[test] fn test_retry_setting_success() { let config1 = RetrySetting::default(); - let actual = ExponentialBuilder::try_from(config1).unwrap(); - let expected = ExponentialBuilder::default() - .with_min_delay(Duration::from_secs(1)) - .with_jitter() - .with_factor(2.0) - .with_max_times(9); + let actual = RetryConfig::try_from(config1).unwrap(); + let expected = RetryConfig::standard() + .with_initial_backoff(Duration::from_secs(1)) + .with_max_attempts(9); assert_eq!(format!("{:?}", actual), format!("{:?}", expected)); let config2 = RetrySetting { @@ -839,13 +857,11 @@ mod tests { max_backoff: Some(Duration::from_secs(100)), max_attempts: Some(20), }; - let actual = ExponentialBuilder::try_from(config2).unwrap(); - let expected = ExponentialBuilder::default() - .with_jitter() - .with_factor(2.0) - .with_min_delay(Duration::from_secs(1)) - .with_max_delay(Duration::from_secs(100)) - .with_max_times(19); + let actual = RetryConfig::try_from(config2).unwrap(); + let expected = RetryConfig::standard() + .with_initial_backoff(Duration::from_secs(1)) + .with_max_backoff(Duration::from_secs(100)) + .with_max_attempts(19); assert_eq!(format!("{:?}", actual), format!("{:?}", expected)); } @@ -855,7 +871,7 @@ mod tests { max_attempts: Some(0), ..Default::default() }; - match ExponentialBuilder::try_from(config).unwrap_err() { + match RetryConfig::try_from(config).unwrap_err() { RetryConfigError::MaxAttempts => {} _ => unreachable!("unexpected error"), } @@ -864,7 +880,7 @@ mod tests { max_backoff: Some(Duration::new(0, 0)), ..Default::default() }; - match ExponentialBuilder::try_from(config).unwrap_err() { + match RetryConfig::try_from(config).unwrap_err() { RetryConfigError::MaxBackoff => {} _ => unreachable!("unexpected error"), } diff --git a/src/batch.rs b/src/batch.rs index 0fab925..209a85e 100644 --- a/src/batch.rs +++ b/src/batch.rs @@ -20,10 +20,9 @@ use aws_sdk_dynamodb::{ types::{AttributeValue, DeleteRequest, PutRequest, WriteRequest}, Client as DynamoDbSdkClient, }; -use backon::Retryable; use base64::{engine::general_purpose, Engine as _}; use bytes::Bytes; -use log::{debug, error, warn}; +use log::{debug, error}; use serde_json::Value as JsonValue; use std::{collections::HashMap, error, fmt, fs, future::Future, io::Error as IOError, pin::Pin}; @@ -231,58 +230,19 @@ async fn batch_write_item_api( &request_items ); - let config = cx.effective_sdk_config().await; - let ddb = DynamoDbSdkClient::new(&config); - - let retry_setting = cx + let retry_config = cx .retry + .clone() .map(|v| v.batch_write_item.to_owned().unwrap_or(v.default)); - let res = match retry_setting { - Some(backoff) => { - let f = || async { - ddb.batch_write_item() - .set_request_items(Some(request_items.clone())) - .send() - .await - }; - f.retry(&backoff) - .when(|err| match err.as_service_error() { - Some(BatchWriteItemError::ProvisionedThroughputExceededException(e)) => { - warn!("Retry batch_write_item : {}", e); - true - } - Some(BatchWriteItemError::InternalServerError(e)) => { - warn!("Retry batch_write_item : {}", e); - true - } - Some(BatchWriteItemError::RequestLimitExceeded(e)) => { - warn!("Retry batch_write_item : {}", e); - true - } - // aws_sdk_dynamodb::error::SdkError::DispatchFailure(e) => { - // warn!("Retry batch_write_item : {}", &e); - // true - // } - // aws_sdk_dynamodb::error::SdkError::a(response) => { - // if response.body_as_str().contains("ThrottlingException") { - // warn!("Retry batch_write_item : {}", err); - // true - // } else { - // false - // } - // } - _ => false, - }) - .await - } - None => { - ddb.batch_write_item() - .set_request_items(Some(request_items)) - .send() - .await - } - }; - match res { + let config = cx.effective_sdk_config_with_retry(retry_config).await; + let ddb = DynamoDbSdkClient::new(&config); + + match ddb + .batch_write_item() + .set_request_items(Some(request_items)) + .send() + .await + { Ok(res) => Ok(res.unprocessed_items), Err(e) => Err(e), } From 74e68bd63dbc2e99ff40684e64dce07a5e2b62da Mon Sep 17 00:00:00 2001 From: Ryota Sakamoto Date: Tue, 21 May 2024 10:47:49 +0900 Subject: [PATCH 21/21] fix: fix review points Signed-off-by: Ryota Sakamoto --- src/app.rs | 20 +++++++++++--------- src/batch.rs | 2 +- src/data.rs | 2 +- src/{ => ddb}/key.rs | 0 src/ddb/mod.rs | 17 +++++++++++++++++ src/ddb/table.rs | 2 +- src/main.rs | 1 - 7 files changed, 31 insertions(+), 13 deletions(-) rename src/{ => ddb}/key.rs (100%) diff --git a/src/app.rs b/src/app.rs index ce9f59d..aa34ce1 100644 --- a/src/app.rs +++ b/src/app.rs @@ -35,8 +35,7 @@ use tempfile::NamedTempFile; use thiserror::Error; use super::control; -use super::ddb::table; -use super::key; +use super::ddb::{key, table}; /* ================================================= struct / enum / const @@ -165,7 +164,7 @@ impl TryFrom for RetryConfig { if max_attempts == 0 { return Err(RetryConfigError::MaxAttempts); } - builder = builder.with_max_attempts(max_attempts - 1); + builder = builder.with_max_attempts(max_attempts); } if let Some(max_backoff) = value.max_backoff { if max_backoff.is_zero() { @@ -555,8 +554,8 @@ pub async fn insert_to_table_cache( let region: Region = cx.effective_region().await; debug!( "Under the region '{}', trying to save table schema of '{}'", - ®ion.as_ref(), - &table_name + region.as_ref(), + table_name ); // retrieve current cache from Context and update target table desc. @@ -782,8 +781,11 @@ mod tests { retry: None, }; assert_eq!( - cx1.effective_region().await, - Region::from_static("us-east-1") + &cx1.effective_region().await, + aws_config::load_defaults(BehaviorVersion::v2024_03_28()) + .await + .region() + .unwrap_or(&Region::from_static("us-east-1")) ); // cx1.effective_table_name(); ... exit(1) @@ -849,7 +851,7 @@ mod tests { let actual = RetryConfig::try_from(config1).unwrap(); let expected = RetryConfig::standard() .with_initial_backoff(Duration::from_secs(1)) - .with_max_attempts(9); + .with_max_attempts(10); assert_eq!(format!("{:?}", actual), format!("{:?}", expected)); let config2 = RetrySetting { @@ -861,7 +863,7 @@ mod tests { let expected = RetryConfig::standard() .with_initial_backoff(Duration::from_secs(1)) .with_max_backoff(Duration::from_secs(100)) - .with_max_attempts(19); + .with_max_attempts(20); assert_eq!(format!("{:?}", actual), format!("{:?}", expected)); } diff --git a/src/batch.rs b/src/batch.rs index 209a85e..3cb9ea5 100644 --- a/src/batch.rs +++ b/src/batch.rs @@ -28,7 +28,7 @@ use std::{collections::HashMap, error, fmt, fs, future::Future, io::Error as IOE use super::app; use super::data; -use super::key; +use super::ddb::key; /* ================================================= struct / enum / const diff --git a/src/data.rs b/src/data.rs index 30dda97..f10da89 100644 --- a/src/data.rs +++ b/src/data.rs @@ -36,7 +36,7 @@ use tabwriter::TabWriter; // use bytes::Bytes; use super::app; -use super::key; +use super::ddb::key; /* ================================================= struct / enum / const diff --git a/src/key.rs b/src/ddb/key.rs similarity index 100% rename from src/key.rs rename to src/ddb/key.rs diff --git a/src/ddb/mod.rs b/src/ddb/mod.rs index 13971b0..9880958 100644 --- a/src/ddb/mod.rs +++ b/src/ddb/mod.rs @@ -1 +1,18 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +pub mod key; pub mod table; diff --git a/src/ddb/table.rs b/src/ddb/table.rs index 8428c80..6f86163 100644 --- a/src/ddb/table.rs +++ b/src/ddb/table.rs @@ -23,7 +23,7 @@ use aws_sdk_dynamodb::types::{ use chrono::DateTime; use log::error; -use crate::key; +use crate::ddb::key; /* ================================================= struct / enum / const diff --git a/src/main.rs b/src/main.rs index 999f9d9..dd258c0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -33,7 +33,6 @@ mod cmd; mod control; mod data; mod ddb; -mod key; mod parser; mod shell; mod transfer;