From 234f88b013adcc2ce0dc5d4e23ac12a15d58a1af Mon Sep 17 00:00:00 2001 From: Emily Matheys Date: Thu, 13 Jun 2024 17:58:21 +0300 Subject: [PATCH 01/13] Midway there --- .github/workflows/main.yml | 2 +- .github/workflows/pr-checks.yml | 2 +- Cargo.lock | 254 ++++++++++++- Cargo.toml | 12 +- README.md | 33 ++ deny.toml | 2 +- examples/async_api.rs | 70 ++++ src/client/asynchronous.rs | 451 +++++++++++++++++++++++ src/client/blocking.rs | 134 +------ src/client/builder.rs | 50 ++- src/client/mod.rs | 124 +++++++ src/connection/asynchronous.rs | 135 +++++++ src/connection/blocking.rs | 36 +- src/connection/mod.rs | 15 + src/graph/asynchronous.rs | 613 ++++++++++++++++++++++++++++++++ src/graph/blocking.rs | 118 +++--- src/graph/mod.rs | 81 +++++ src/graph/query_builder.rs | 335 +++++++++++------ src/graph_schema/mod.rs | 41 ++- src/lib.rs | 47 +++ src/parser/mod.rs | 103 +++++- src/response/constraint.rs | 16 +- src/response/index.rs | 109 +++--- 23 files changed, 2337 insertions(+), 446 deletions(-) create mode 100644 examples/async_api.rs create mode 100644 src/client/asynchronous.rs create mode 100644 src/connection/asynchronous.rs create mode 100644 src/graph/asynchronous.rs diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 1a1dc85..e3f1d43 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -41,7 +41,7 @@ jobs: - uses: taiki-e/install-action@cargo-llvm-cov - uses: taiki-e/install-action@nextest - name: Generate Code Coverage - run: cargo llvm-cov nextest --all --codecov --output-path codecov.json + run: cargo llvm-cov nextest --all --test-threads 8 --codecov --output-path codecov.json - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v4 with: diff --git a/.github/workflows/pr-checks.yml b/.github/workflows/pr-checks.yml index 8d2ea94..6171a29 100644 --- a/.github/workflows/pr-checks.yml +++ b/.github/workflows/pr-checks.yml @@ -52,7 +52,7 @@ jobs: - name: Populate test graph run: pip install falkordb && ./resources/populate_graph.py - name: Test - run: cargo nextest run --all + run: cargo nextest run --all test-threads 8 services: falkordb: image: falkordb/falkordb:edge diff --git a/Cargo.lock b/Cargo.lock index d6f927d..444578f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -26,6 +26,17 @@ dependencies = [ "memchr", ] +[[package]] +name = "async-trait" +version = "0.1.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "autocfg" version = "1.3.0" @@ -84,7 +95,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" dependencies = [ "bytes", + "futures-core", "memchr", + "pin-project-lite", + "tokio", + "tokio-util", ] [[package]] @@ -116,7 +131,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -129,6 +144,7 @@ dependencies = [ "regex", "strum", "thiserror", + "tokio", "tracing", ] @@ -168,6 +184,37 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "futures-core" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" + +[[package]] +name = "futures-sink" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" + +[[package]] +name = "futures-task" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-util" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +dependencies = [ + "futures-core", + "futures-sink", + "futures-task", + "pin-project-lite", + "pin-utils", +] + [[package]] name = "getrandom" version = "0.2.15" @@ -197,6 +244,12 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + [[package]] name = "idna" version = "0.5.0" @@ -272,6 +325,17 @@ dependencies = [ "adler", ] +[[package]] +name = "mio" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.48.0", +] + [[package]] name = "native-tls" version = "0.2.11" @@ -290,6 +354,16 @@ dependencies = [ "tempfile", ] +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi", + "libc", +] + [[package]] name = "object" version = "0.32.2" @@ -372,7 +446,7 @@ dependencies = [ "redox_syscall", "smallvec", "thread-id", - "windows-targets", + "windows-targets 0.52.5", ] [[package]] @@ -397,6 +471,12 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + [[package]] name = "pkg-config" version = "0.3.30" @@ -463,16 +543,24 @@ version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0d7a6955c7511f60f3ba9e86c6d02b3c3f144f8c24b288d1f4e18074ab8bbec" dependencies = [ + "async-trait", + "bytes", "combine", + "futures-util", "itoa", "native-tls", "percent-encoding", + "pin-project-lite", "rand", "rustls", "rustls-native-certs", "rustls-pemfile", "rustls-pki-types", "ryu", + "tokio", + "tokio-native-tls", + "tokio-rustls", + "tokio-util", "url", ] @@ -526,7 +614,7 @@ dependencies = [ "libc", "spin", "untrusted", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -545,7 +633,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -620,7 +708,7 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -658,6 +746,16 @@ version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +[[package]] +name = "socket2" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "spin" version = "0.9.8" @@ -712,7 +810,7 @@ dependencies = [ "cfg-if", "fastrand", "rustix", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -760,6 +858,68 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" +[[package]] +name = "tokio" +version = "1.38.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "num_cpus", + "pin-project-lite", + "socket2", + "tokio-macros", + "windows-sys 0.48.0", +] + +[[package]] +name = "tokio-macros" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls", + "rustls-pki-types", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + [[package]] name = "tracing" version = "0.1.40" @@ -863,13 +1023,37 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets", + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", ] [[package]] @@ -878,28 +1062,46 @@ version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + [[package]] name = "windows_aarch64_msvc" version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + [[package]] name = "windows_i686_gnu" version = "0.52.5" @@ -912,24 +1114,48 @@ version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + [[package]] name = "windows_i686_msvc" version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + [[package]] name = "windows_x86_64_gnu" version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + [[package]] name = "windows_x86_64_msvc" version = "0.52.5" diff --git a/Cargo.toml b/Cargo.toml index a73da46..ce34e7f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,12 +13,22 @@ redis = { version = "0.25.4", default-features = false, features = ["sentinel"] regex = { version = "1.10.5", default-features = false, features = ["std", "perf", "unicode-bool", "unicode-perl"] } strum = { version = "0.26.2", default-features = false, features = ["std", "derive"] } thiserror = "1.0.61" +tokio = { version = "1.38.0", default-features = false, features = ["macros", "sync", "rt-multi-thread"], optional = true } tracing = { version = "0.1.40", default-features = false, features = ["std", "attributes"], optional = true } [features] -default = [] +default = ["tokio"] native-tls = ["redis/tls-native-tls"] rustls = ["redis/tls-rustls"] +tokio = ["dep:tokio", "redis/tokio-comp"] +tokio-native-tls = ["tokio", "redis/tokio-native-tls-comp"] +tokio-rustls = ["tokio", "redis/tokio-rustls-comp"] + tracing = ["dep:tracing"] + + +[[example]] +name = "async_api" +required-features = ["tokio"] \ No newline at end of file diff --git a/README.md b/README.md index 55e21dd..e597e27 100644 --- a/README.md +++ b/README.md @@ -58,6 +58,39 @@ for n in nodes.data { ## Features +### `tokio` support + +This client supports nonblocking API using the [`tokio`](https://tokio.rs/) runtime. +The `tokio` features is enabled by default. +Currently, this API requires running within a [`multi_threaded tokio scheduler`](https://docs.rs/tokio/latest/tokio/runtime/index.html#multi-thread-scheduler), and does not support the `current_thread` one, but this will probably be supported in the future. + +The API uses an almost identical API, but the various functions need to be awaited: +```rust +use falkordb::{FalkorClientBuilder, FalkorConnectionInfo}; + +// Connect to FalkorDB +let connection_info: FalkorConnectionInfo = "falkor://127.0.0.1:6379".try_into() + .expect("Invalid connection info"); + +let client = FalkorClientBuilder::new_async() + .with_connection_info(connection_info) + .build().await.expect("Failed to build client"); + +// Select the social graph +let mut graph = client.select_graph("social"); + +// Create 100 nodes and return a handful +let nodes = graph.query("UNWIND range(0, 100) AS i CREATE (n { v:1 }) RETURN n LIMIT 10") + .with_timeout(5000).execute().await.expect("Failed executing query"); + +for n in nodes.data { + println!("{:?}", n[0]); +} +``` + +Note that thread safety is still up to the user to ensure, I.e. an `AsyncGraph` cannot simply be sent to a task spawned by tokio and expected to be used later, +it must be wrapped in an Arc> or something similar. + ### SSL/TLS Support This client is currently built upon the [`redis`](https://docs.rs/redis/latest/redis/) crate, and therefore supports TLS using diff --git a/deny.toml b/deny.toml index 2e966cc..e78e594 100644 --- a/deny.toml +++ b/deny.toml @@ -5,7 +5,7 @@ multiple-versions = "deny" skip = ["windows_x86_64_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_i686_msvc", "windows_i686_gnu", "windows_i686_gnullvm", "windows_aarch64_msvc", "windows_aarch64_gnullvm", - "windows-targets"] # Windows crates are all locked in `mio`, but should be fine + "windows-targets", "windows-sys"] # Windows crates are all using various versions in `mio`, but should be fine [sources] unknown-registry = "deny" diff --git a/examples/async_api.rs b/examples/async_api.rs new file mode 100644 index 0000000..f255601 --- /dev/null +++ b/examples/async_api.rs @@ -0,0 +1,70 @@ +/* + * Copyright FalkorDB Ltd. 2023 - present + * Licensed under the Server Side Public License v1 (SSPLv1). + */ + +use falkordb::{FalkorClientBuilder, FalkorResult}; +use std::sync::Arc; +use tokio::{sync::Mutex, task::JoinSet}; + +// Usage of the asynchronous client REQUIRES the multi-threaded rt +#[tokio::main] +async fn main() -> FalkorResult<()> { + let client = FalkorClientBuilder::new_async() + .with_connection_info("falkor://127.0.0.1:6379".try_into()?) + .build() + .await?; + + let mut graph = client.select_graph("imdb"); + let mut res = graph.query("MATCH (a:actor) return a").execute().await?; + assert_eq!(res.data.len(), 1317); + + // Note that parsing is sync, even if a refresh of the graph schema was required, that refresh will happen in a blocking fashion + // The alternative is writing all the parsing functions to be async, all the way down + assert!(res.data.next().is_some()); + let collected = res.data.collect::>(); + + // One was already taken, so we should have one less now + assert_eq!(collected.len(), 1316); + + // And now for something completely different: + // Add synchronization, if we want to reuse the graph later, + // Otherwise we can just move it into the scope + let graph_a = Arc::new(Mutex::new(client.copy_graph("imdb", "imdb_a").await?)); + let graph_b = Arc::new(Mutex::new(client.copy_graph("imdb", "imdb_b").await?)); + let graph_c = Arc::new(Mutex::new(client.copy_graph("imdb", "imdb_c").await?)); + let graph_d = Arc::new(Mutex::new(client.copy_graph("imdb", "imdb_d").await?)); + + // Note that in each of the tasks, we have to consume the LazyResultSet somehow, and not return it, because it maintains a mutable reference to graph, and requires the lock guard to be alive + // By collecting it into a vec, we no longer need to maintain the lifetime, so we just get back our results + let mut join_set = JoinSet::new(); + join_set.spawn({ + let graph_a = graph_a.clone(); + async move { graph_a.lock().await.query("MATCH (a:actor) WITH a MATCH (b:actor) WHERE a.age = b.age AND a <> b RETURN a, collect(b) LIMIT 100").execute().await.map(|res| res.data.collect::>()) } + }); + join_set.spawn({ + let graph_b = graph_b.clone(); + async move { graph_b.lock().await.query("MATCH (a:actor) WITH a MATCH (b:actor) WHERE a.age = b.age AND a <> b RETURN a, collect(b) LIMIT 100").execute().await.map(|res| res.data.collect::>()) } + }); + join_set.spawn({ + let graph_c = graph_c.clone(); + async move { graph_c.lock().await.query("MATCH (a:actor) WITH a MATCH (b:actor) WHERE a.age = b.age AND a <> b RETURN a, collect(b) LIMIT 100").execute().await.map(|res| res.data.collect::>()) } + }); + join_set.spawn({ + let graph_d = graph_d.clone(); + async move { graph_d.lock().await.query("MATCH (a:actor) WITH a MATCH (b:actor) WHERE a.age = b.age AND a <> b RETURN a, collect(b) LIMIT 100").execute().await.map(|res| res.data.collect::>()) } + }); + + // Order is no longer guaranteed, as all these tasks were nonblocking + while let Some(Ok(res)) = join_set.join_next().await { + let actual_res = res?; + println!("{:?}", actual_res[0]) + } + + graph_a.lock().await.delete().await.ok(); + graph_b.lock().await.delete().await.ok(); + graph_c.lock().await.delete().await.ok(); + graph_d.lock().await.delete().await.ok(); + + Ok(()) +} diff --git a/src/client/asynchronous.rs b/src/client/asynchronous.rs new file mode 100644 index 0000000..d9f9d9a --- /dev/null +++ b/src/client/asynchronous.rs @@ -0,0 +1,451 @@ +/* + * Copyright FalkorDB Ltd. 2023 - present + * Licensed under the Server Side Public License v1 (SSPLv1). + */ + +use crate::{ + client::{FalkorClientProvider, ProvidesSyncConnections}, + connection::{ + asynchronous::{BorrowedAsyncConnection, FalkorAsyncConnection}, + blocking::FalkorSyncConnection, + }, + parser::{parse_config_hashmap, redis_value_as_untyped_string_vec}, + AsyncGraph, ConfigValue, FalkorConnectionInfo, FalkorDBError, FalkorResult, +}; +use std::{collections::HashMap, sync::Arc}; +use tokio::{ + runtime::Handle, + sync::{mpsc, Mutex, RwLock}, + task, +}; + +/// A user-opaque inner struct, containing the actual implementation of the asynchronous client +/// The idea is that each member here is either Copy, or locked in some form, and the public struct only has an Arc to this struct +/// allowing thread safe operations and cloning +pub struct FalkorAsyncClientInner { + _inner: Mutex, + + connection_pool_size: u8, + connection_pool_tx: RwLock>, + connection_pool_rx: Mutex>, +} + +impl FalkorAsyncClientInner { + #[cfg_attr( + feature = "tracing", + tracing::instrument( + name = "Borrow Connection From Connection Pool", + skip_all, + level = "debug" + ) + )] + pub(crate) async fn borrow_connection( + &self, + pool_owner: Arc, + ) -> FalkorResult { + Ok(BorrowedAsyncConnection::new( + self.connection_pool_rx + .lock() + .await + .recv() + .await + .ok_or(FalkorDBError::EmptyConnection)?, + self.connection_pool_tx.read().await.clone(), + pool_owner, + )) + } + + #[cfg_attr( + feature = "tracing", + tracing::instrument( + name = "Get New Async Connection From Client", + skip_all, + level = "info" + ) + )] + pub(crate) async fn get_async_connection(&self) -> FalkorResult { + self._inner.lock().await.get_async_connection().await + } +} + +impl ProvidesSyncConnections for FalkorAsyncClientInner { + #[cfg_attr( + feature = "tracing", + tracing::instrument( + name = "Get New Sync Connection From Client", + skip_all, + level = "info" + ) + )] + fn get_connection(&self) -> FalkorResult { + task::block_in_place(|| Handle::current().block_on(self._inner.lock())).get_connection() + } +} + +/// This is the publicly exposed API of the asynchronous Falkor Client +/// It makes no assumptions in regard to which database the Falkor module is running on, +/// and will select it based on enabled features and url connection +/// +/// # Thread Safety +/// This struct is fully thread safe, it can be cloned and passed between threads without constraints, +/// Its API uses only immutable references +pub struct FalkorAsyncClient { + inner: Arc, + _connection_info: FalkorConnectionInfo, +} + +impl FalkorAsyncClient { + pub(crate) async fn create( + mut client: FalkorClientProvider, + connection_info: FalkorConnectionInfo, + num_connections: u8, + ) -> FalkorResult { + let (connection_pool_tx, connection_pool_rx) = mpsc::channel(num_connections as usize); + + // One already exists + for _ in 0..num_connections { + let new_conn = client + .get_async_connection() + .await + .map_err(|err| FalkorDBError::RedisError(err.to_string()))?; + + connection_pool_tx + .send(new_conn) + .await + .map_err(|_| FalkorDBError::EmptyConnection)?; + } + + Ok(Self { + inner: Arc::new(FalkorAsyncClientInner { + _inner: client.into(), + + connection_pool_size: num_connections, + connection_pool_tx: RwLock::new(connection_pool_tx), + connection_pool_rx: Mutex::new(connection_pool_rx), + }), + _connection_info: connection_info, + }) + } + + /// Get the max number of connections in the client's connection pool + pub fn connection_pool_size(&self) -> u8 { + self.inner.connection_pool_size + } + + pub(crate) async fn borrow_connection(&self) -> FalkorResult { + self.inner.borrow_connection(self.inner.clone()).await + } + + /// Return a list of graphs currently residing in the database + /// + /// # Returns + /// A [`Vec`] of [`String`]s, containing the names of available graphs + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "List Graphs", skip_all, level = "info") + )] + pub async fn list_graphs(&self) -> FalkorResult> { + self.borrow_connection() + .await? + .execute_command(None, "GRAPH.LIST", None, None) + .await + .and_then(redis_value_as_untyped_string_vec) + } + + /// Return the current value of a configuration option in the database. + /// + /// # Arguments + /// * `config_Key`: A [`String`] representation of a configuration's key. + /// The config key can also be "*", which will return ALL the configuration options. + /// + /// # Returns + /// A [`HashMap`] comprised of [`String`] keys, and [`ConfigValue`] values. + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Get Config Value", skip_all, level = "info") + )] + pub async fn config_get( + &self, + config_key: &str, + ) -> FalkorResult> { + self.borrow_connection() + .await? + .execute_command(None, "GRAPH.CONFIG", Some("GET"), Some(&[config_key])) + .await + .and_then(parse_config_hashmap) + } + + /// Return the current value of a configuration option in the database. + /// + /// # Arguments + /// * `config_Key`: A [`String`] representation of a configuration's key. + /// The config key can also be "*", which will return ALL the configuration options. + /// * `value`: The new value to set, which is anything that can be converted into a [`ConfigValue`], namely string types and i64. + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Set Config Value", skip_all, level = "info") + )] + pub async fn config_set>( + &self, + config_key: &str, + value: C, + ) -> FalkorResult { + self.borrow_connection() + .await? + .execute_command( + None, + "GRAPH.CONFIG", + Some("SET"), + Some(&[config_key, value.into().to_string().as_str()]), + ) + .await + } + + /// Opens a graph context for queries and operations + /// + /// # Arguments + /// * `graph_name`: A string identifier of the graph to open. + /// + /// # Returns + /// a [`AsyncGraph`] object, allowing various graph operations. + pub fn select_graph( + &self, + graph_name: T, + ) -> AsyncGraph { + AsyncGraph::new(self.inner.clone(), graph_name) + } + + /// Copies an entire graph and returns the [`AsyncGraph`] for the new copied graph. + /// + /// # Arguments + /// * `graph_to_clone`: A string identifier of the graph to copy. + /// * `new_graph_name`: The name to give the new graph. + /// + /// # Returns + /// If successful, will return the new [`AsyncGraph`] object. + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Copy Graph", skip_all, level = "info") + )] + pub async fn copy_graph( + &self, + graph_to_clone: &str, + new_graph_name: &str, + ) -> FalkorResult { + self.borrow_connection() + .await? + .execute_command( + Some(graph_to_clone), + "GRAPH.COPY", + None, + Some(&[new_graph_name]), + ) + .await?; + Ok(self.select_graph(new_graph_name)) + } + + /// Retrieves redis information + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Client Get Redis Info", skip_all, level = "info") + )] + pub async fn redis_info( + &self, + section: Option<&str>, + ) -> FalkorResult> { + self.borrow_connection() + .await? + .as_inner()? + .get_redis_info(section) + .await + } +} + +#[cfg(test)] +pub(crate) async fn create_empty_inner_async_client() -> Arc { + let (tx, rx) = mpsc::channel(1); + tx.send(FalkorAsyncConnection::None).await.ok(); + Arc::new(FalkorAsyncClientInner { + _inner: Mutex::new(FalkorClientProvider::None), + connection_pool_size: 0, + connection_pool_tx: RwLock::new(tx), + connection_pool_rx: Mutex::new(rx), + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + test_utils::{create_async_test_client, TestAsyncGraphHandle}, + FalkorClientBuilder, + }; + use std::{mem, num::NonZeroU8, thread}; + use tokio::sync::mpsc::error::TryRecvError; + + #[tokio::test(flavor = "multi_thread")] + async fn test_borrow_connection() { + let client = FalkorClientBuilder::new_async() + .with_num_connections(NonZeroU8::new(6).expect("Could not create a perfectly valid u8")) + .build() + .await + .expect("Could not create client for this test"); + + // Client was created with 6 connections + let mut conn_vec = Vec::with_capacity(6); + for _ in 0..6 { + let conn = client.borrow_connection().await; + assert!(conn.is_ok()); + conn_vec.push(conn); + } + + let non_existing_conn = client.inner.connection_pool_rx.lock().await.try_recv(); + assert!(non_existing_conn.is_err()); + + let Err(TryRecvError::Empty) = non_existing_conn else { + panic!("Got error, but not a TryRecvError::Empty, as expected"); + }; + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_list_graphs() { + let client = create_async_test_client().await; + let res = client.list_graphs().await; + assert!(res.is_ok()); + + let graphs = res.unwrap(); + assert_eq!(graphs[0], "imdb"); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_select_graph_and_query() { + let client = create_async_test_client().await; + + let mut graph = client.select_graph("imdb"); + assert_eq!(graph.graph_name(), "imdb".to_string()); + + let res = graph + .query("MATCH (a:actor) return a") + .execute() + .await + .expect("Could not get actors from unmodified graph"); + + assert_eq!(res.data.collect::>().len(), 1317); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_copy_graph() { + let client = create_async_test_client().await; + + client + .select_graph("imdb_ro_copy_async") + .delete() + .await + .ok(); + + let graph = client.copy_graph("imdb", "imdb_ro_copy_async").await; + assert!(graph.is_ok()); + + let mut graph = TestAsyncGraphHandle { + inner: graph.unwrap(), + }; + + let mut original_graph = client.select_graph("imdb"); + + assert_eq!( + graph + .inner + .query("MATCH (a:actor) RETURN a") + .execute() + .await + .expect("Could not get actors from unmodified graph") + .data + .collect::>(), + original_graph + .query("MATCH (a:actor) RETURN a") + .execute() + .await + .expect("Could not get actors from unmodified graph") + .data + .collect::>() + ) + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_get_config() { + let client = create_async_test_client().await; + + let config = client + .config_get("QUERY_MEM_CAPACITY") + .await + .expect("Could not get configuration"); + + assert_eq!(config.len(), 1); + assert!(config.contains_key("QUERY_MEM_CAPACITY")); + assert_eq!( + mem::discriminant(config.get("QUERY_MEM_CAPACITY").unwrap()), + mem::discriminant(&ConfigValue::Int64(0)) + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_get_config_all() { + let client = create_async_test_client().await; + let configuration = client + .config_get("*") + .await + .expect("Could not get configuration"); + assert_eq!( + configuration.get("THREAD_COUNT").cloned().unwrap(), + ConfigValue::Int64(thread::available_parallelism().unwrap().get() as i64) + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_set_config() { + let client = create_async_test_client().await; + + let config = client + .config_get("MAX_QUEUED_QUERIES") + .await + .expect("Could not get configuration"); + + let current_val = config + .get("MAX_QUEUED_QUERIES") + .cloned() + .unwrap() + .as_i64() + .unwrap(); + + let desired_val = if current_val == 4294967295 { + 4294967295 / 2 + } else { + 4294967295 + }; + + client + .config_set("MAX_QUEUED_QUERIES", desired_val) + .await + .expect("Could not set config value"); + + let new_config = client + .config_get("MAX_QUEUED_QUERIES") + .await + .expect("Could not get configuration"); + + assert_eq!( + new_config + .get("MAX_QUEUED_QUERIES") + .cloned() + .unwrap() + .as_i64() + .unwrap(), + desired_val + ); + + client + .config_set("MAX_QUEUED_QUERIES", current_val) + .await + .ok(); + } +} diff --git a/src/client/blocking.rs b/src/client/blocking.rs index fc31ec9..389c9b3 100644 --- a/src/client/blocking.rs +++ b/src/client/blocking.rs @@ -4,9 +4,9 @@ */ use crate::{ - client::FalkorClientProvider, + client::{FalkorClientProvider, ProvidesSyncConnections}, connection::blocking::{BorrowedSyncConnection, FalkorSyncConnection}, - parser::{redis_value_as_string, redis_value_as_untyped_string_vec, redis_value_as_vec}, + parser::{parse_config_hashmap, redis_value_as_untyped_string_vec}, ConfigValue, FalkorConnectionInfo, FalkorDBError, FalkorResult, SyncGraph, }; use parking_lot::{Mutex, RwLock}; @@ -15,6 +15,9 @@ use std::{ sync::{mpsc, Arc}, }; +/// A user-opaque inner struct, containing the actual implementation of the blocking client +/// The idea is that each member here is either Copy, or locked in some form, and the public struct only has an Arc to this struct +/// allowing thread safe operations and cloning pub(crate) struct FalkorSyncClientInner { _inner: Mutex, @@ -45,7 +48,9 @@ impl FalkorSyncClientInner { pool_owner, )) } +} +impl ProvidesSyncConnections for FalkorSyncClientInner { #[cfg_attr( feature = "tracing", tracing::instrument( @@ -54,83 +59,10 @@ impl FalkorSyncClientInner { level = "info" ) )] - pub(crate) fn get_connection(&self) -> FalkorResult { + fn get_connection(&self) -> FalkorResult { self._inner.lock().get_connection() } } -#[cfg_attr( - feature = "tracing", - tracing::instrument(name = "Check Is Sentinel", skip_all, level = "info") -)] -fn is_sentinel(conn: &mut FalkorSyncConnection) -> FalkorResult { - let info_map = conn.get_redis_info(Some("server"))?; - Ok(info_map - .get("redis_mode") - .map(|redis_mode| redis_mode == "sentinel") - .unwrap_or_default()) -} - -#[cfg_attr( - feature = "tracing", - tracing::instrument(name = "Get Sentinel Client", skip_all, level = "info") -)] -pub(crate) fn get_sentinel_client( - client: &mut FalkorClientProvider, - connection_info: &redis::ConnectionInfo, -) -> FalkorResult> { - let mut conn = client.get_connection()?; - if !is_sentinel(&mut conn)? { - return Ok(None); - } - - // This could have been so simple using the Sentinel API, but it requires a service name - // Perhaps in the future we can use it if we only support the master instance to be called 'master'? - let sentinel_masters = conn - .execute_command(None, "SENTINEL", Some("MASTERS"), None) - .and_then(redis_value_as_vec)?; - - if sentinel_masters.len() != 1 { - return Err(FalkorDBError::SentinelMastersCount); - } - - let sentinel_master: HashMap<_, _> = sentinel_masters - .into_iter() - .next() - .and_then(|master| master.into_sequence().ok()) - .ok_or(FalkorDBError::SentinelMastersCount)? - .chunks_exact(2) - .flat_map(TryInto::<&[redis::Value; 2]>::try_into) // TODO: In the future, check if this can be done with no copying, but this should be a rare function call tbh - .flat_map(|[key, val]| { - redis_value_as_string(key.to_owned()) - .and_then(|key| redis_value_as_string(val.to_owned()).map(|val| (key, val))) - }) - .collect(); - - let name = sentinel_master - .get("name") - .ok_or(FalkorDBError::SentinelMastersCount)?; - - Ok(Some( - redis::sentinel::SentinelClient::build( - vec![connection_info.to_owned()], - name.to_string(), - Some(redis::sentinel::SentinelNodeConnectionInfo { - tls_mode: match connection_info.addr { - redis::ConnectionAddr::TcpTls { insecure: true, .. } => { - Some(redis::TlsMode::Insecure) - } - redis::ConnectionAddr::TcpTls { - insecure: false, .. - } => Some(redis::TlsMode::Secure), - _ => None, - }, - redis_connection_info: Some(connection_info.redis.clone()), - }), - redis::sentinel::SentinelServerType::Master, - ) - .map_err(|err| FalkorDBError::SentinelConnection(err.to_string()))?, - )) -} /// This is the publicly exposed API of the sync Falkor Client /// It makes no assumptions in regard to which database the Falkor module is running on, @@ -218,41 +150,11 @@ impl FalkorSyncClient { &self, config_key: &str, ) -> FalkorResult> { - let config = self - .borrow_connection() + self.borrow_connection() .and_then(|mut conn| { conn.execute_command(None, "GRAPH.CONFIG", Some("GET"), Some(&[config_key])) }) - .and_then(redis_value_as_vec)?; - - if config.len() == 2 { - let [key, val]: [redis::Value; 2] = config.try_into().map_err(|_| { - FalkorDBError::ParsingArrayToStructElementCount( - "Expected exactly 2 elements for configuration option", - ) - })?; - - return redis_value_as_string(key) - .and_then(|key| ConfigValue::try_from(val).map(|val| HashMap::from([(key, val)]))); - } - - Ok(config - .into_iter() - .flat_map(|config| { - redis_value_as_vec(config).and_then(|as_vec| { - let [key, val]: [redis::Value; 2] = as_vec.try_into().map_err(|_| { - FalkorDBError::ParsingArrayToStructElementCount( - "Expected exactly 2 elements for configuration option", - ) - })?; - - Result::<_, FalkorDBError>::Ok(( - redis_value_as_string(key)?, - ConfigValue::try_from(val)?, - )) - }) - }) - .collect::>()) + .and_then(parse_config_hashmap) } /// Return the current value of a configuration option in the database. @@ -270,12 +172,14 @@ impl FalkorSyncClient { config_key: &str, value: C, ) -> FalkorResult { - self.borrow_connection()?.execute_command( - None, - "GRAPH.CONFIG", - Some("SET"), - Some(&[config_key, value.into().to_string().as_str()]), - ) + self.borrow_connection().and_then(|mut conn| { + conn.execute_command( + None, + "GRAPH.CONFIG", + Some("SET"), + Some(&[config_key, value.into().to_string().as_str()]), + ) + }) } /// Opens a graph context for queries and operations @@ -334,7 +238,7 @@ impl FalkorSyncClient { } #[cfg(test)] -pub(crate) fn create_empty_inner_client() -> Arc { +pub(crate) fn create_empty_inner_sync_client() -> Arc { let (tx, rx) = mpsc::sync_channel(1); tx.send(FalkorSyncConnection::None).ok(); Arc::new(FalkorSyncClientInner { diff --git a/src/client/builder.rs b/src/client/builder.rs index 340bb7e..b279245 100644 --- a/src/client/builder.rs +++ b/src/client/builder.rs @@ -9,6 +9,9 @@ use crate::{ }; use std::num::NonZeroU8; +#[cfg(feature = "tokio")] +use crate::FalkorAsyncClient; + /// A Builder-pattern implementation struct for creating a new Falkor client. pub struct FalkorClientBuilder { connection_info: Option, @@ -93,9 +96,7 @@ impl FalkorClientBuilder<'S'> { #[allow(irrefutable_let_patterns)] if let FalkorConnectionInfo::Redis(redis_conn_info) = &connection_info { - if let Some(sentinel) = - super::blocking::get_sentinel_client(&mut client, redis_conn_info)? - { + if let Some(sentinel) = client.get_sentinel_client(redis_conn_info)? { client.set_sentinel(sentinel); } } @@ -103,6 +104,40 @@ impl FalkorClientBuilder<'S'> { } } +#[cfg(feature = "tokio")] +impl FalkorClientBuilder<'A'> { + /// Creates a new [`FalkorClientBuilder`] for an asynchronous client. + /// + /// # Returns + /// The new [`FalkorClientBuilder`] + pub fn new_async() -> Self { + FalkorClientBuilder { + connection_info: None, + num_connections: NonZeroU8::new(8).expect("Error creating perfectly valid u8"), + } + } + + /// Consume the builder, returning the newly constructed async client + /// + /// # Returns + /// a new [`FalkorAsyncClient`] + pub async fn build(self) -> FalkorResult { + let connection_info = self + .connection_info + .unwrap_or("falkor://127.0.0.1:6379".try_into()?); + + let mut client = Self::get_client(connection_info.clone())?; + + #[allow(irrefutable_let_patterns)] + if let FalkorConnectionInfo::Redis(redis_conn_info) = &connection_info { + if let Some(sentinel) = client.get_sentinel_client_async(redis_conn_info).await? { + client.set_sentinel(sentinel); + } + } + FalkorAsyncClient::create(client, connection_info, self.num_connections.get()).await + } +} + #[cfg(test)] mod tests { use super::*; @@ -118,15 +153,6 @@ mod tests { .is_ok()); } - #[test] - fn test_sync_builder_redis_fallback() { - let client = FalkorClientBuilder::new().build(); - assert!(client.is_ok()); - - let FalkorConnectionInfo::Redis(redis_info) = client.unwrap()._connection_info; - assert_eq!(redis_info.addr.to_string().as_str(), "127.0.0.1:6379"); - } - #[test] fn test_connection_pool_size() { let client = FalkorClientBuilder::new() diff --git a/src/client/mod.rs b/src/client/mod.rs index 84e49f0..6337696 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -4,10 +4,18 @@ */ use crate::{connection::blocking::FalkorSyncConnection, FalkorDBError, FalkorResult}; +use std::collections::HashMap; + +#[cfg(feature = "tokio")] +use crate::connection::asynchronous::FalkorAsyncConnection; +use crate::parser::{redis_value_as_string, redis_value_as_vec}; pub(crate) mod blocking; pub(crate) mod builder; +#[cfg(feature = "tokio")] +pub(crate) mod asynchronous; + #[allow(clippy::large_enum_variant)] pub(crate) enum FalkorClientProvider { #[allow(unused)] @@ -40,6 +48,28 @@ impl FalkorClientProvider { }) } + #[cfg(feature = "tokio")] + pub(crate) async fn get_async_connection(&mut self) -> FalkorResult { + Ok(match self { + FalkorClientProvider::Redis { + sentinel: Some(sentinel), + .. + } => FalkorAsyncConnection::Redis( + sentinel + .get_async_connection() + .await + .map_err(|err| FalkorDBError::RedisError(err.to_string()))?, + ), + FalkorClientProvider::Redis { client, .. } => FalkorAsyncConnection::Redis( + client + .get_multiplexed_async_connection() + .await + .map_err(|err| FalkorDBError::RedisError(err.to_string()))?, + ), + FalkorClientProvider::None => Err(FalkorDBError::UnavailableProvider)?, + }) + } + pub(crate) fn set_sentinel( &mut self, sentinel_client: redis::sentinel::SentinelClient, @@ -49,4 +79,98 @@ impl FalkorClientProvider { FalkorClientProvider::None => {} } } + + pub(crate) fn get_sentinel_client_common( + &self, + connection_info: &redis::ConnectionInfo, + sentinel_masters: Vec, + ) -> FalkorResult> { + if sentinel_masters.len() != 1 { + return Err(FalkorDBError::SentinelMastersCount); + } + + let sentinel_master: HashMap<_, _> = sentinel_masters + .into_iter() + .next() + .and_then(|master| master.into_sequence().ok()) + .ok_or(FalkorDBError::SentinelMastersCount)? + .chunks_exact(2) + .flat_map(TryInto::<&[redis::Value; 2]>::try_into) // TODO: In the future, check if this can be done with no copying, but this should be a rare function call tbh + .flat_map(|[key, val]| { + redis_value_as_string(key.to_owned()) + .and_then(|key| redis_value_as_string(val.to_owned()).map(|val| (key, val))) + }) + .collect(); + + let name = sentinel_master + .get("name") + .ok_or(FalkorDBError::SentinelMastersCount)?; + + Ok(Some( + redis::sentinel::SentinelClient::build( + vec![connection_info.to_owned()], + name.to_string(), + Some(redis::sentinel::SentinelNodeConnectionInfo { + tls_mode: match connection_info.addr { + redis::ConnectionAddr::TcpTls { insecure: true, .. } => { + Some(redis::TlsMode::Insecure) + } + redis::ConnectionAddr::TcpTls { + insecure: false, .. + } => Some(redis::TlsMode::Secure), + _ => None, + }, + redis_connection_info: Some(connection_info.redis.clone()), + }), + redis::sentinel::SentinelServerType::Master, + ) + .map_err(|err| FalkorDBError::SentinelConnection(err.to_string()))?, + )) + } + + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Get Sentinel Client", skip_all, level = "info") + )] + pub(crate) fn get_sentinel_client( + &mut self, + connection_info: &redis::ConnectionInfo, + ) -> FalkorResult> { + let mut conn = self.get_connection()?; + if !conn.check_is_redis_sentinel()? { + return Ok(None); + } + + conn.execute_command(None, "SENTINEL", Some("MASTERS"), None) + .and_then(redis_value_as_vec) + .and_then(|sentinel_masters| { + self.get_sentinel_client_common(connection_info, sentinel_masters) + }) + } + + #[cfg(feature = "tokio")] + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Get Sentinel Client", skip_all, level = "info") + )] + pub(crate) async fn get_sentinel_client_async( + &mut self, + connection_info: &redis::ConnectionInfo, + ) -> FalkorResult> { + let mut conn = self.get_async_connection().await?; + if !conn.check_is_redis_sentinel().await? { + return Ok(None); + } + + conn.execute_command(None, "SENTINEL", Some("MASTERS"), None) + .await + .and_then(redis_value_as_vec) + .and_then(|sentinel_masters| { + self.get_sentinel_client_common(connection_info, sentinel_masters) + }) + } +} + +pub(crate) trait ProvidesSyncConnections { + fn get_connection(&self) -> FalkorResult; } diff --git a/src/connection/asynchronous.rs b/src/connection/asynchronous.rs new file mode 100644 index 0000000..a9c4230 --- /dev/null +++ b/src/connection/asynchronous.rs @@ -0,0 +1,135 @@ +/* + * Copyright FalkorDB Ltd. 2023 - present + * Licensed under the Server Side Public License v1 (SSPLv1). + */ + +use crate::{ + client::asynchronous::FalkorAsyncClientInner, connection::map_redis_err, + parser::parse_redis_info, FalkorDBError, FalkorResult, +}; +use std::{collections::HashMap, sync::Arc}; +use tokio::sync::mpsc; + +pub(crate) enum FalkorAsyncConnection { + #[allow(unused)] + None, + Redis(redis::aio::MultiplexedConnection), +} + +impl FalkorAsyncConnection { + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Connection Inner Execute Command", skip_all, level = "debug") + )] + pub(crate) async fn execute_command( + &mut self, + graph_name: Option<&str>, + command: &str, + subcommand: Option<&str>, + params: Option<&[&str]>, + ) -> FalkorResult { + match self { + FalkorAsyncConnection::Redis(redis_conn) => { + let mut cmd = redis::cmd(command); + cmd.arg(subcommand); + cmd.arg(graph_name); + if let Some(params) = params { + for param in params { + cmd.arg(param.to_string()); + } + } + redis_conn + .send_packed_command(&cmd) + .await + .map_err(map_redis_err) + } + FalkorAsyncConnection::None => Ok(redis::Value::Nil), + } + } + + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Connection Get Redis Info", skip_all, level = "info") + )] + pub(crate) async fn get_redis_info( + &mut self, + section: Option<&str>, + ) -> FalkorResult> { + self.execute_command(None, "INFO", section, None) + .await + .and_then(parse_redis_info) + } + + pub(crate) async fn check_is_redis_sentinel(&mut self) -> FalkorResult { + let info_map = self.get_redis_info(Some("server")).await?; + Ok(info_map + .get("redis_mode") + .map(|redis_mode| redis_mode == "sentinel") + .unwrap_or_default()) + } +} + +/// A container for a connection that is borrowed from the pool. +/// Upon going out of scope, it will return the connection to the pool. +/// +/// This is publicly exposed for user-implementations of [`FalkorParsable`](crate::FalkorParsable) +pub struct BorrowedAsyncConnection { + conn: Option, + return_tx: mpsc::Sender, + client: Arc, +} + +impl BorrowedAsyncConnection { + pub(crate) fn new( + conn: FalkorAsyncConnection, + return_tx: mpsc::Sender, + client: Arc, + ) -> Self { + Self { + conn: Some(conn), + return_tx, + client, + } + } + + pub(crate) fn as_inner(&mut self) -> FalkorResult<&mut FalkorAsyncConnection> { + self.conn.as_mut().ok_or(FalkorDBError::EmptyConnection) + } + + #[cfg_attr( + feature = "tracing", + tracing::instrument( + name = "Borrowed Connection Execute Command", + skip_all, + level = "trace" + ) + )] + pub(crate) async fn execute_command( + mut self, + graph_name: Option<&str>, + command: &str, + subcommand: Option<&str>, + params: Option<&[&str]>, + ) -> FalkorResult { + match self + .as_inner()? + .execute_command(graph_name, command, subcommand, params) + .await + { + Err(FalkorDBError::ConnectionDown) => { + if let Ok(new_conn) = self.client.get_async_connection().await { + self.conn = Some(new_conn); + return Err(FalkorDBError::ConnectionDown); + } + Err(FalkorDBError::NoConnection) + } + res => res, + } + } + + pub(crate) async fn return_to_pool(self) { + if let Some(conn) = self.conn { + self.return_tx.send(conn).await.ok(); + } + } +} diff --git a/src/connection/blocking.rs b/src/connection/blocking.rs index 1f6be49..0396e95 100644 --- a/src/connection/blocking.rs +++ b/src/connection/blocking.rs @@ -4,8 +4,10 @@ */ use crate::{ - client::blocking::FalkorSyncClientInner, parser::redis_value_as_string, FalkorDBError, - FalkorResult, + client::{blocking::FalkorSyncClientInner, ProvidesSyncConnections}, + connection::map_redis_err, + parser::parse_redis_info, + FalkorDBError, FalkorResult, }; use std::{ collections::HashMap, @@ -41,15 +43,7 @@ impl FalkorSyncConnection { cmd.arg(param.to_string()); } } - redis_conn - .req_command(&cmd) - .map_err(|err| match err.kind() { - redis::ErrorKind::IoError - | redis::ErrorKind::ClusterConnectionNotFound - | redis::ErrorKind::ClusterDown - | redis::ErrorKind::MasterDown => FalkorDBError::ConnectionDown, - _ => FalkorDBError::RedisError(err.to_string()), - }) + redis_conn.req_command(&cmd).map_err(map_redis_err) } FalkorSyncConnection::None => Ok(redis::Value::Nil), } @@ -64,17 +58,15 @@ impl FalkorSyncConnection { section: Option<&str>, ) -> FalkorResult> { self.execute_command(None, "INFO", section, None) - .and_then(|res| { - redis_value_as_string(res) - .map(|info| { - info.split("\r\n") - .map(|info_item| info_item.split(':').collect::>()) - .flat_map(TryInto::<[&str; 2]>::try_into) - .map(|[key, val]| (key.to_string(), val.to_string())) - .collect() - }) - .map_err(|_| FalkorDBError::ParsingString) - }) + .and_then(parse_redis_info) + } + + pub(crate) fn check_is_redis_sentinel(&mut self) -> FalkorResult { + let info_map = self.get_redis_info(Some("server"))?; + Ok(info_map + .get("redis_mode") + .map(|redis_mode| redis_mode == "sentinel") + .unwrap_or_default()) } } diff --git a/src/connection/mod.rs b/src/connection/mod.rs index 2d11cf3..a86a020 100644 --- a/src/connection/mod.rs +++ b/src/connection/mod.rs @@ -3,4 +3,19 @@ * Licensed under the Server Side Public License v1 (SSPLv1). */ +use crate::FalkorDBError; + pub(crate) mod blocking; + +#[cfg(feature = "tokio")] +pub(crate) mod asynchronous; + +fn map_redis_err(error: redis::RedisError) -> FalkorDBError { + match error.kind() { + redis::ErrorKind::IoError + | redis::ErrorKind::ClusterConnectionNotFound + | redis::ErrorKind::ClusterDown + | redis::ErrorKind::MasterDown => FalkorDBError::ConnectionDown, + _ => FalkorDBError::RedisError(error.to_string()), + } +} diff --git a/src/graph/asynchronous.rs b/src/graph/asynchronous.rs new file mode 100644 index 0000000..fb14f9b --- /dev/null +++ b/src/graph/asynchronous.rs @@ -0,0 +1,613 @@ +/* + * Copyright FalkorDB Ltd. 2023 - present + * Licensed under the Server Side Public License v1 (SSPLv1). + */ + +use crate::{ + client::asynchronous::FalkorAsyncClientInner, + graph::HasGraphSchema, + graph::{generate_create_index_query, generate_drop_index_query}, + parser::redis_value_as_vec, + Constraint, ConstraintType, EntityType, ExecutionPlan, FalkorIndex, FalkorResult, GraphSchema, + IndexType, LazyResultSet, ProcedureQueryBuilder, QueryBuilder, QueryResult, SlowlogEntry, +}; +use std::{collections::HashMap, fmt::Display, sync::Arc}; + +/// The main graph API, this allows the user to perform graph operations while exposing as little details as possible. +/// # Thread Safety +/// This struct is NOT thread safe, and synchronization is up to the user. +/// It does, however, allow the user to perform nonblocking operations +/// Graph schema is not shared between instances of AsyncGraph, even with the same name, but cloning will maintain the current schema +#[derive(Clone)] +pub struct AsyncGraph { + client: Arc, + graph_name: String, + graph_schema: GraphSchema, +} + +impl AsyncGraph { + pub(crate) fn new( + client: Arc, + graph_name: T, + ) -> Self { + Self { + graph_name: graph_name.to_string(), + graph_schema: GraphSchema::new(graph_name, client.clone()), // Required for requesting refreshes + client, + } + } + + /// Returns the name of the graph for which this API performs operations. + /// + /// # Returns + /// The graph name as a string slice, without cloning. + pub fn graph_name(&self) -> &str { + self.graph_name.as_str() + } + + pub(crate) fn get_client(&self) -> &Arc { + &self.client + } + + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Graph Execute Command", skip_all, level = "info") + )] + async fn execute_command( + &self, + command: &str, + subcommand: Option<&str>, + params: Option<&[&str]>, + ) -> FalkorResult { + self.client + .borrow_connection(self.client.clone()) + .await? + .execute_command(Some(self.graph_name.as_str()), command, subcommand, params) + .await + } + + /// Deletes the graph stored in the database, and drop all the schema caches. + /// NOTE: This still maintains the graph API, operations are still viable. + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Delete Graph", skip_all, level = "info") + )] + pub async fn delete(&mut self) -> FalkorResult<()> { + self.execute_command("GRAPH.DELETE", None, None).await?; + self.graph_schema.clear(); + Ok(()) + } + + /// Retrieves the slowlog data, which contains info about the N slowest queries. + /// + /// # Returns + /// A [`Vec`] of [`SlowlogEntry`], providing information about each query. + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Get Graph Slowlog", skip_all, level = "info") + )] + pub async fn slowlog(&self) -> FalkorResult> { + self.execute_command("GRAPH.SLOWLOG", None, None) + .await + .and_then(|res| { + redis_value_as_vec(res) + .map(|as_vec| as_vec.into_iter().flat_map(SlowlogEntry::parse).collect()) + }) + } + + /// Resets the slowlog, all query time data will be cleared. + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Reset Graph Slowlog", skip_all, level = "info") + )] + pub async fn slowlog_reset(&self) -> FalkorResult { + self.execute_command("GRAPH.SLOWLOG", None, Some(&["RESET"])) + .await + } + + /// Creates a [`QueryBuilder`] for this graph, in an attempt to profile a specific query + /// This [`QueryBuilder`] has to be dropped or ran using [`QueryBuilder::execute`], before reusing the graph, as it takes a mutable reference to the graph for as long as it exists + /// + /// # Arguments + /// * `query_string`: The query to profile + /// + /// # Returns + /// A [`QueryBuilder`] object, which when performed will return an [`ExecutionPlan`] + pub fn profile<'a>( + &'a mut self, + query_string: &'a str, + ) -> QueryBuilder { + QueryBuilder::<'a>::new(self, "GRAPH.PROFILE", query_string) + } + + /// Creates a [`QueryBuilder`] for this graph, in an attempt to explain a specific query + /// This [`QueryBuilder`] has to be dropped or ran using [`QueryBuilder::execute`], before reusing the graph, as it takes a mutable reference to the graph for as long as it exists + /// + /// # Arguments + /// * `query_string`: The query to explain the process for + /// + /// # Returns + /// A [`QueryBuilder`] object, which when performed will return an [`ExecutionPlan`] + pub fn explain<'a>( + &'a mut self, + query_string: &'a str, + ) -> QueryBuilder { + QueryBuilder::new(self, "GRAPH.EXPLAIN", query_string) + } + + /// Creates a [`QueryBuilder`] for this graph + /// This [`QueryBuilder`] has to be dropped or ran using [`QueryBuilder::execute`], before reusing the graph, as it takes a mutable reference to the graph for as long as it exists + /// + /// # Arguments + /// * `query_string`: The query to run + /// + /// # Returns + /// A [`QueryBuilder`] object, which when performed will return a [`QueryResult`] + pub fn query( + &mut self, + query_string: T, + ) -> QueryBuilder, T, Self> { + QueryBuilder::new(self, "GRAPH.QUERY", query_string) + } + + /// Creates a [`QueryBuilder`] for this graph, for a readonly query + /// This [`QueryBuilder`] has to be dropped or ran using [`QueryBuilder::execute`], before reusing the graph, as it takes a mutable reference to the graph for as long as it exists + /// Read-only queries are more limited with the operations they are allowed to perform. + /// + /// # Arguments + /// * `query_string`: The query to run + /// + /// # Returns + /// A [`QueryBuilder`] object + pub fn ro_query<'a>( + &'a mut self, + query_string: &'a str, + ) -> QueryBuilder, &str, Self> { + QueryBuilder::new(self, "GRAPH.QUERY_RO", query_string) + } + + /// Creates a [`ProcedureQueryBuilder`] for this graph + /// This [`ProcedureQueryBuilder`] has to be dropped or ran using [`ProcedureQueryBuilder::execute`], before reusing the graph, as it takes a mutable reference to the graph for as long as it exists + /// Read-only queries are more limited with the operations they are allowed to perform. + /// + /// # Arguments + /// * `procedure_name`: The name of the procedure to call + /// + /// # Returns + /// A [`ProcedureQueryBuilder`] object + pub fn call_procedure<'a, P>( + &'a mut self, + procedure_name: &'a str, + ) -> ProcedureQueryBuilder { + ProcedureQueryBuilder::new(self, procedure_name) + } + + /// Creates a [`ProcedureQueryBuilder`] for this graph, for a readonly procedure + /// This [`ProcedureQueryBuilder`] has to be dropped or ran using [`ProcedureQueryBuilder::execute`], before reusing the graph, as it takes a mutable reference to the graph for as long as it exists + /// Read-only procedures are more limited with the operations they are allowed to perform. + /// + /// # Arguments + /// * `procedure_name`: The name of the procedure to call + /// + /// # Returns + /// A [`ProcedureQueryBuilder`] object + pub fn call_procedure_ro<'a, P>( + &'a mut self, + procedure_name: &'a str, + ) -> ProcedureQueryBuilder { + ProcedureQueryBuilder::new_readonly(self, procedure_name) + } + + /// Calls the DB.INDICES procedure on the graph, returning all the indexing methods currently used + /// + /// # Returns + /// A [`Vec`] of [`FalkorIndex`] + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "List Graph Indices", skip_all, level = "info") + )] + pub async fn list_indices(&mut self) -> FalkorResult>> { + ProcedureQueryBuilder::>, Self>::new(self, "DB.INDEXES") + .execute() + .await + } + + /// Creates a new index in the graph, for the selected entity type(Node/Edge), selected label, and properties + /// + /// # Arguments + /// * `index_field_type`: + /// * `entity_type`: + /// * `label`: + /// * `properties`: + /// * `options`: + /// + /// # Returns + /// A [`LazyResultSet`] containing information on the created index + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Graph Create Index", skip_all, level = "info") + )] + pub async fn create_index( + &mut self, + index_field_type: IndexType, + entity_type: EntityType, + label: &str, + properties: &[P], + options: Option<&HashMap>, + ) -> FalkorResult> { + // Create index from these properties + let query_str = + generate_create_index_query(index_field_type, entity_type, label, properties, options); + + QueryBuilder::, String, Self>::new( + self, + "GRAPH.QUERY", + query_str, + ) + .execute() + .await + } + + /// Drop an existing index, by specifying its type, entity, label and specific properties + /// + /// # Arguments + /// * `index_field_type` + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Graph Drop Index", skip_all, level = "info") + )] + pub async fn drop_index( + &mut self, + index_field_type: IndexType, + entity_type: EntityType, + label: &str, + properties: &[P], + ) -> FalkorResult> { + let query_str = generate_drop_index_query(index_field_type, entity_type, label, properties); + self.query(query_str).execute().await + } + + /// Calls the DB.CONSTRAINTS procedure on the graph, returning an array of the graph's constraints + /// + /// # Returns + /// A tuple where the first element is a [`Vec`] of [`Constraint`]s, and the second element is a [`Vec`] of stats as [`String`]s + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "List Graph Constraints", skip_all, level = "info") + )] + pub async fn list_constraints(&mut self) -> FalkorResult>> { + ProcedureQueryBuilder::>, Self>::new(self, "DB.CONSTRAINTS") + .execute() + .await + } + + /// Creates a new constraint for this graph, making the provided properties mandatory + /// + /// # Arguments + /// * `entity_type`: Whether to apply this constraint on nodes or relationships. + /// * `label`: Entities with this label will have this constraint applied to them. + /// * `properties`: A slice of the names of properties this constraint will apply to. + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Create Graph Mandatory Constraint", skip_all, level = "info") + )] + pub async fn create_mandatory_constraint( + &self, + entity_type: EntityType, + label: &str, + properties: &[&str], + ) -> FalkorResult { + let entity_type = entity_type.to_string(); + let properties_count = properties.len().to_string(); + + let mut params = Vec::with_capacity(5 + properties.len()); + params.extend([ + "MANDATORY", + entity_type.as_str(), + label, + "PROPERTIES", + properties_count.as_str(), + ]); + params.extend(properties); + + self.execute_command("GRAPH.CONSTRAINT", Some("CREATE"), Some(params.as_slice())) + .await + } + + /// Creates a new constraint for this graph, making the provided properties unique + /// + /// # Arguments + /// * `entity_type`: Whether to apply this constraint on nodes or relationships. + /// * `label`: Entities with this label will have this constraint applied to them. + /// * `properties`: A slice of the names of properties this constraint will apply to. + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Create Graph Unique Constraint", skip_all, level = "info") + )] + pub async fn create_unique_constraint( + &mut self, + entity_type: EntityType, + label: String, + properties: &[&str], + ) -> FalkorResult { + self.create_index( + IndexType::Range, + entity_type, + label.as_str(), + properties, + None, + ) + .await?; + + let entity_type = entity_type.to_string(); + let properties_count = properties.len().to_string(); + let mut params: Vec<&str> = Vec::with_capacity(5 + properties.len()); + params.extend([ + "UNIQUE", + entity_type.as_str(), + label.as_str(), + "PROPERTIES", + properties_count.as_str(), + ]); + params.extend(properties); + + // create constraint using index + self.execute_command("GRAPH.CONSTRAINT", Some("CREATE"), Some(params.as_slice())) + .await + } + + /// Drop an existing constraint from the graph + /// + /// # Arguments + /// * `constraint_type`: Which type of constraint to remove. + /// * `entity_type`: Whether this constraint exists on nodes or relationships. + /// * `label`: Remove the constraint from entities with this label. + /// * `properties`: A slice of the names of properties to remove the constraint from. + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Drop Graph Constraint", skip_all, level = "info") + )] + pub async fn drop_constraint( + &self, + constraint_type: ConstraintType, + entity_type: EntityType, + label: &str, + properties: &[&str], + ) -> FalkorResult { + let constraint_type = constraint_type.to_string(); + let entity_type = entity_type.to_string(); + let properties_count = properties.len().to_string(); + + let mut params = Vec::with_capacity(5 + properties.len()); + params.extend([ + constraint_type.as_str(), + entity_type.as_str(), + label, + "PROPERTIES", + properties_count.as_str(), + ]); + params.extend(properties); + + self.execute_command("GRAPH.CONSTRAINT", Some("DROP"), Some(params.as_slice())) + .await + } +} + +impl HasGraphSchema for AsyncGraph { + fn get_graph_schema_mut(&mut self) -> &mut GraphSchema { + &mut self.graph_schema + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{test_utils::open_async_test_graph, IndexType}; + + #[tokio::test(flavor = "multi_thread")] + async fn test_create_drop_index() { + let mut graph = open_async_test_graph("test_create_drop_index_async").await; + graph + .inner + .create_index( + IndexType::Fulltext, + EntityType::Node, + "actor", + &["Hello"], + None, + ) + .await + .expect("Could not create index"); + + let indices = graph + .inner + .list_indices() + .await + .expect("Could not list indices"); + + assert_eq!(indices.data.len(), 2); + assert_eq!( + indices.data[0].field_types["Hello"], + vec![IndexType::Fulltext] + ); + + graph + .inner + .drop_index(IndexType::Fulltext, EntityType::Node, "actor", &["Hello"]) + .await + .expect("Could not drop index"); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_list_indices() { + let mut graph = open_async_test_graph("test_list_indices_async").await; + let indices = graph + .inner + .list_indices() + .await + .expect("Could not list indices"); + + assert_eq!(indices.data.len(), 1); + assert_eq!(indices.data[0].entity_type, EntityType::Node); + assert_eq!(indices.data[0].index_label, "actor".to_string()); + assert_eq!(indices.data[0].field_types.len(), 1); + assert_eq!( + indices.data[0].field_types, + HashMap::from([("name".to_string(), vec![IndexType::Fulltext])]) + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_create_drop_mandatory_constraint() { + let graph = open_async_test_graph("test_mandatory_constraint_async").await; + + graph + .inner + .create_mandatory_constraint(EntityType::Edge, "act", &["hello", "goodbye"]) + .await + .expect("Could not create constraint"); + + graph + .inner + .drop_constraint( + ConstraintType::Mandatory, + EntityType::Edge, + "act", + &["hello", "goodbye"], + ) + .await + .expect("Could not drop constraint"); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_create_drop_unique_constraint() { + let mut graph = open_async_test_graph("test_unique_constraint_async").await; + + graph + .inner + .create_unique_constraint( + EntityType::Node, + "actor".to_string(), + &["first_name", "last_name"], + ) + .await + .expect("Could not create constraint"); + + graph + .inner + .drop_constraint( + ConstraintType::Unique, + EntityType::Node, + "actor", + &["first_name", "last_name"], + ) + .await + .expect("Could not drop constraint"); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_list_constraints() { + let mut graph = open_async_test_graph("test_list_constraint_async").await; + + graph + .inner + .create_unique_constraint( + EntityType::Node, + "actor".to_string(), + &["first_name", "last_name"], + ) + .await + .expect("Could not create constraint"); + + let res = graph + .inner + .list_constraints() + .await + .expect("Could not list constraints"); + assert_eq!(res.data.len(), 1); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_slowlog() { + let mut graph = open_async_test_graph("test_slowlog_async").await; + + graph + .inner + .query("UNWIND range(0, 500) AS x RETURN x") + .execute() + .await + .expect("Could not generate the fast query"); + graph + .inner + .query("UNWIND range(0, 100000) AS x RETURN x") + .execute() + .await + .expect("Could not generate the slow query"); + + let slowlog = graph + .inner + .slowlog() + .await + .expect("Could not get slowlog entries"); + + assert_eq!(slowlog.len(), 2); + assert_eq!( + slowlog[0].arguments, + "UNWIND range(0, 500) AS x RETURN x".to_string() + ); + assert_eq!( + slowlog[1].arguments, + "UNWIND range(0, 100000) AS x RETURN x".to_string() + ); + + graph + .inner + .slowlog_reset() + .await + .expect("Could not reset slowlog memory"); + let slowlog_after_reset = graph + .inner + .slowlog() + .await + .expect("Could not get slowlog entries after reset"); + assert!(slowlog_after_reset.is_empty()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_explain() { + let mut graph = open_async_test_graph("test_explain_async").await; + + let execution_plan = graph.inner.explain("MATCH (a:actor) WITH a MATCH (b:actor) WHERE a.age = b.age AND a <> b RETURN a, collect(b) LIMIT 100").execute().await.expect("Could not create execution plan"); + assert_eq!(execution_plan.plan().len(), 7); + assert!(execution_plan.operations().get("Aggregate").is_some()); + assert_eq!(execution_plan.operations()["Aggregate"].len(), 1); + + assert_eq!( + execution_plan.string_representation(), + "\nResults\n Limit\n Aggregate\n Filter\n Node By Label Scan | (b:actor)\n Project\n Node By Label Scan | (a:actor)" + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_profile() { + let mut graph = open_async_test_graph("test_profile_async").await; + + let execution_plan = graph + .inner + .profile("UNWIND range(0, 1000) AS x RETURN x") + .execute() + .await + .expect("Could not generate the query"); + + assert_eq!(execution_plan.plan().len(), 3); + + let expected = vec!["Results", "Project", "Unwind"]; + let mut current_rc = execution_plan.operation_tree().clone(); + for step in expected { + assert_eq!(current_rc.name, step); + if step != "Unwind" { + current_rc = current_rc.children[0].clone(); + } + } + } +} diff --git a/src/graph/blocking.rs b/src/graph/blocking.rs index 0f113db..1f641c6 100644 --- a/src/graph/blocking.rs +++ b/src/graph/blocking.rs @@ -4,21 +4,23 @@ */ use crate::{ - client::blocking::FalkorSyncClientInner, parser::redis_value_as_vec, Constraint, - ConstraintType, EntityType, ExecutionPlan, FalkorIndex, FalkorResult, GraphSchema, IndexType, - LazyResultSet, ProcedureQueryBuilder, QueryBuilder, QueryResult, SlowlogEntry, + client::blocking::FalkorSyncClientInner, + graph::{generate_create_index_query, generate_drop_index_query, HasGraphSchema}, + parser::redis_value_as_vec, + Constraint, ConstraintType, EntityType, ExecutionPlan, FalkorIndex, FalkorResult, GraphSchema, + IndexType, LazyResultSet, ProcedureQueryBuilder, QueryBuilder, QueryResult, SlowlogEntry, }; use std::{collections::HashMap, fmt::Display, sync::Arc}; /// The main graph API, this allows the user to perform graph operations while exposing as little details as possible. /// # Thread Safety /// This struct is NOT thread safe, and synchronization is up to the user. -/// Also, graph schema is not shared between instances of SyncGraph, even with the same name +/// Graph schema is not shared between instances of SyncGraph, even with the same name, but cloning will maintain the current schema #[derive(Clone)] pub struct SyncGraph { - pub(crate) client: Arc, + client: Arc, graph_name: String, - pub(crate) graph_schema: GraphSchema, + graph_schema: GraphSchema, } impl SyncGraph { @@ -41,6 +43,10 @@ impl SyncGraph { self.graph_name.as_str() } + pub(crate) fn get_client(&self) -> &Arc { + &self.client + } + #[cfg_attr( feature = "tracing", tracing::instrument(name = "Graph Execute Command", skip_all, level = "info") @@ -104,7 +110,7 @@ impl SyncGraph { pub fn profile<'a>( &'a mut self, query_string: &'a str, - ) -> QueryBuilder { + ) -> QueryBuilder { QueryBuilder::<'a>::new(self, "GRAPH.PROFILE", query_string) } @@ -119,7 +125,7 @@ impl SyncGraph { pub fn explain<'a>( &'a mut self, query_string: &'a str, - ) -> QueryBuilder { + ) -> QueryBuilder { QueryBuilder::new(self, "GRAPH.EXPLAIN", query_string) } @@ -134,7 +140,7 @@ impl SyncGraph { pub fn query( &mut self, query_string: T, - ) -> QueryBuilder, T> { + ) -> QueryBuilder, T, Self> { QueryBuilder::new(self, "GRAPH.QUERY", query_string) } @@ -150,7 +156,7 @@ impl SyncGraph { pub fn ro_query<'a>( &'a mut self, query_string: &'a str, - ) -> QueryBuilder, &str> { + ) -> QueryBuilder, &str, Self> { QueryBuilder::new(self, "GRAPH.QUERY_RO", query_string) } @@ -166,7 +172,7 @@ impl SyncGraph { pub fn call_procedure<'a, P>( &'a mut self, procedure_name: &'a str, - ) -> ProcedureQueryBuilder

{ + ) -> ProcedureQueryBuilder { ProcedureQueryBuilder::new(self, procedure_name) } @@ -182,7 +188,7 @@ impl SyncGraph { pub fn call_procedure_ro<'a, P>( &'a mut self, procedure_name: &'a str, - ) -> ProcedureQueryBuilder

{ + ) -> ProcedureQueryBuilder { ProcedureQueryBuilder::new_readonly(self, procedure_name) } @@ -195,7 +201,8 @@ impl SyncGraph { tracing::instrument(name = "List Graph Indices", skip_all, level = "info") )] pub fn list_indices(&mut self) -> FalkorResult>> { - ProcedureQueryBuilder::>>::new(self, "DB.INDEXES").execute() + ProcedureQueryBuilder::>, Self>::new(self, "DB.INDEXES") + .execute() } /// Creates a new index in the graph, for the selected entity type(Node/Edge), selected label, and properties @@ -222,40 +229,16 @@ impl SyncGraph { options: Option<&HashMap>, ) -> FalkorResult> { // Create index from these properties - let properties_string = properties - .iter() - .map(|element| format!("l.{}", element)) - .collect::>() - .join(", "); - - let pattern = match entity_type { - EntityType::Node => format!("(l:{})", label), - EntityType::Edge => format!("()-[l:{}]->()", label), - }; - - let idx_type = match index_field_type { - IndexType::Range => "", - IndexType::Vector => "VECTOR ", - IndexType::Fulltext => "FULLTEXT ", - }; - - let options_string = options - .map(|hashmap| { - hashmap - .iter() - .map(|(key, val)| format!("'{key}':'{val}'")) - .collect::>() - .join(",") - }) - .map(|options_string| format!(" OPTIONS {{ {} }}", options_string)) - .unwrap_or_default(); - let query_str = format!( - "CREATE {idx_type}INDEX FOR {pattern} ON ({}){}", - properties_string, options_string - ); - QueryBuilder::, String>::new(self, "GRAPH.QUERY", query_str) - .execute() + let query_str = + generate_create_index_query(index_field_type, entity_type, label, properties, options); + + QueryBuilder::, String, Self>::new( + self, + "GRAPH.QUERY", + query_str, + ) + .execute() } /// Drop an existing index, by specifying its type, entity, label and specific properties @@ -266,35 +249,14 @@ impl SyncGraph { feature = "tracing", tracing::instrument(name = "Graph Drop Index", skip_all, level = "info") )] - pub fn drop_index( + pub fn drop_index( &mut self, index_field_type: IndexType, entity_type: EntityType, - label: L, + label: &str, properties: &[P], ) -> FalkorResult> { - let properties_string = properties - .iter() - .map(|element| format!("e.{}", element.to_string())) - .collect::>() - .join(", "); - - let pattern = match entity_type { - EntityType::Node => format!("(e:{})", label.to_string()), - EntityType::Edge => format!("()-[e:{}]->()", label.to_string()), - }; - - let idx_type = match index_field_type { - IndexType::Range => "", - IndexType::Vector => "VECTOR", - IndexType::Fulltext => "FULLTEXT", - } - .to_string(); - - let query_str = format!( - "DROP {idx_type} INDEX for {pattern} ON ({})", - properties_string - ); + let query_str = generate_drop_index_query(index_field_type, entity_type, label, properties); self.query(query_str).execute() } @@ -307,7 +269,8 @@ impl SyncGraph { tracing::instrument(name = "List Graph Constraints", skip_all, level = "info") )] pub fn list_constraints(&mut self) -> FalkorResult>> { - ProcedureQueryBuilder::>>::new(self, "DB.CONSTRAINTS").execute() + ProcedureQueryBuilder::>, Self>::new(self, "DB.CONSTRAINTS") + .execute() } /// Creates a new constraint for this graph, making the provided properties mandatory @@ -418,6 +381,12 @@ impl SyncGraph { } } +impl HasGraphSchema for SyncGraph { + fn get_graph_schema_mut(&mut self) -> &mut GraphSchema { + &mut self.graph_schema + } +} + #[cfg(test)] mod tests { use super::*; @@ -447,12 +416,7 @@ mod tests { let indices = graph .inner - .drop_index( - IndexType::Fulltext, - EntityType::Node, - "actor".to_string(), - &["Hello"], - ) + .drop_index(IndexType::Fulltext, EntityType::Node, "actor", &["Hello"]) .expect("Could not drop index"); assert_eq!(indices.get_indices_deleted(), Some(1)); } diff --git a/src/graph/mod.rs b/src/graph/mod.rs index eb20df3..99b0c66 100644 --- a/src/graph/mod.rs +++ b/src/graph/mod.rs @@ -3,5 +3,86 @@ * Licensed under the Server Side Public License v1 (SSPLv1). */ +use crate::{EntityType, GraphSchema, IndexType}; +use std::{collections::HashMap, fmt::Display}; + pub(crate) mod blocking; pub(crate) mod query_builder; + +#[cfg(feature = "tokio")] +pub(crate) mod asynchronous; + +pub trait HasGraphSchema { + fn get_graph_schema_mut(&mut self) -> &mut GraphSchema; +} + +pub fn generate_create_index_query( + index_field_type: IndexType, + entity_type: EntityType, + label: &str, + properties: &[P], + options: Option<&HashMap>, +) -> String { + let properties_string = properties + .iter() + .map(|element| format!("l.{}", element)) + .collect::>() + .join(", "); + + let pattern = match entity_type { + EntityType::Node => format!("(l:{})", label), + EntityType::Edge => format!("()-[l:{}]->()", label), + }; + + let idx_type = match index_field_type { + IndexType::Range => "", + IndexType::Vector => "VECTOR ", + IndexType::Fulltext => "FULLTEXT ", + }; + + let options_string = options + .map(|hashmap| { + hashmap + .iter() + .map(|(key, val)| format!("'{key}':'{val}'")) + .collect::>() + .join(",") + }) + .map(|options_string| format!(" OPTIONS {{ {} }}", options_string)) + .unwrap_or_default(); + + format!( + "CREATE {idx_type}INDEX FOR {pattern} ON ({}){}", + properties_string, options_string + ) +} + +pub fn generate_drop_index_query( + index_field_type: IndexType, + entity_type: EntityType, + label: &str, + properties: &[P], +) -> String { + let properties_string = properties + .iter() + .map(|element| format!("e.{}", element)) + .collect::>() + .join(", "); + + let pattern = match entity_type { + EntityType::Node => format!("(e:{})", label), + EntityType::Edge => format!("()-[e:{}]->()", label), + }; + + let idx_type = match index_field_type { + IndexType::Range => "", + IndexType::Vector => "VECTOR", + IndexType::Fulltext => "FULLTEXT", + } + .to_string(); + + format!( + "DROP {idx_type} INDEX for {pattern} ON ({})", + properties_string + ) +} diff --git a/src/graph/query_builder.rs b/src/graph/query_builder.rs index fd387d8..040a84c 100644 --- a/src/graph/query_builder.rs +++ b/src/graph/query_builder.rs @@ -4,8 +4,8 @@ */ use crate::{ - connection::blocking::BorrowedSyncConnection, parser::redis_value_as_vec, Constraint, - ExecutionPlan, FalkorDBError, FalkorIndex, FalkorResult, LazyResultSet, QueryResult, SyncGraph, + graph::HasGraphSchema, parser::redis_value_as_vec, AsyncGraph, Constraint, ExecutionPlan, + FalkorDBError, FalkorIndex, FalkorResult, LazyResultSet, QueryResult, SyncGraph, }; use std::{collections::HashMap, fmt::Display, marker::PhantomData, ops::Not}; @@ -35,18 +35,18 @@ pub(crate) fn construct_query( } /// A Builder-pattern struct that allows creating and executing queries on a graph -pub struct QueryBuilder<'a, Output, T: Display> { +pub struct QueryBuilder<'a, Output, T: Display, G: HasGraphSchema> { _unused: PhantomData, - graph: &'a mut SyncGraph, + graph: &'a mut G, command: &'a str, query_string: T, params: Option<&'a HashMap>, timeout: Option, } -impl<'a, Output, T: Display> QueryBuilder<'a, Output, T> { +impl<'a, Output, T: Display, G: HasGraphSchema> QueryBuilder<'a, Output, T, G> { pub(crate) fn new( - graph: &'a mut SyncGraph, + graph: &'a mut G, command: &'a str, query_string: T, ) -> Self { @@ -88,38 +88,11 @@ impl<'a, Output, T: Display> QueryBuilder<'a, Output, T> { } } - #[cfg_attr( - feature = "tracing", - tracing::instrument(name = "Common Query Execution Steps", skip_all, level = "trace") - )] - fn common_execute_steps(&mut self) -> FalkorResult { - let mut conn = self - .graph - .client - .borrow_connection(self.graph.client.clone())?; - let query = construct_query(&self.query_string, self.params); - - let timeout = self.timeout.map(|timeout| format!("timeout {timeout}")); - let mut params = vec![query.as_str(), "--compact"]; - params.extend(timeout.as_deref()); - - conn.execute_command( - Some(self.graph.graph_name()), - self.command, - None, - Some(params.as_slice()), - ) - } -} - -impl<'a, T: Display> QueryBuilder<'a, QueryResult>, T> { - /// Executes the query, retuning a [`QueryResult`], with a [`LazyResultSet`] as its `data` member - #[cfg_attr( - feature = "tracing", - tracing::instrument(name = "Execute Lazy Result Set Query", skip_all, level = "info") - )] - pub fn execute(mut self) -> FalkorResult>> { - let res = self.common_execute_steps().and_then(redis_value_as_vec)?; + fn generate_query_result_set( + self, + value: redis::Value, + ) -> FalkorResult>> { + let res = redis_value_as_vec(value)?; match res.len() { 1 => { @@ -131,7 +104,7 @@ impl<'a, T: Display> QueryBuilder<'a, QueryResult>, T> { QueryResult::from_response( None, - LazyResultSet::new(Default::default(), &mut self.graph.graph_schema), + LazyResultSet::new(Default::default(), self.graph.get_graph_schema_mut()), stats, ) } @@ -144,7 +117,7 @@ impl<'a, T: Display> QueryBuilder<'a, QueryResult>, T> { QueryResult::from_response( Some(header), - LazyResultSet::new(Default::default(), &mut self.graph.graph_schema), + LazyResultSet::new(Default::default(), self.graph.get_graph_schema_mut()), stats, ) } @@ -157,7 +130,10 @@ impl<'a, T: Display> QueryBuilder<'a, QueryResult>, T> { QueryResult::from_response( Some(header), - LazyResultSet::new(redis_value_as_vec(data)?, &mut self.graph.graph_schema), + LazyResultSet::new( + redis_value_as_vec(data)?, + self.graph.get_graph_schema_mut(), + ), stats, ) } @@ -168,12 +144,99 @@ impl<'a, T: Display> QueryBuilder<'a, QueryResult>, T> { } } -impl<'a, T: Display> QueryBuilder<'a, ExecutionPlan, T> { +impl<'a, Out, T: Display> QueryBuilder<'a, Out, T, SyncGraph> { + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Common Query Execution Steps", skip_all, level = "trace") + )] + fn common_execute_steps(&mut self) -> FalkorResult { + let query = construct_query(&self.query_string, self.params); + + let timeout = self.timeout.map(|timeout| format!("timeout {timeout}")); + let mut params = vec![query.as_str(), "--compact"]; + params.extend(timeout.as_deref()); + + self.graph + .get_client() + .borrow_connection(self.graph.get_client().clone()) + .and_then(|mut conn| { + conn.execute_command( + Some(self.graph.graph_name()), + self.command, + None, + Some(params.as_slice()), + ) + }) + } +} + +#[cfg(feature = "tokio")] +impl<'a, Out, T: Display> QueryBuilder<'a, Out, T, AsyncGraph> { + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Common Query Execution Steps", skip_all, level = "trace") + )] + async fn common_execute_steps(&mut self) -> FalkorResult { + let query = construct_query(&self.query_string, self.params); + + let timeout = self.timeout.map(|timeout| format!("timeout {timeout}")); + let mut params = vec![query.as_str(), "--compact"]; + params.extend(timeout.as_deref()); + + self.graph + .get_client() + .borrow_connection(self.graph.get_client().clone()) + .await? + .execute_command( + Some(self.graph.graph_name()), + self.command, + None, + Some(params.as_slice()), + ) + .await + } +} + +impl<'a, T: Display> QueryBuilder<'a, QueryResult>, T, SyncGraph> { + /// Executes the query, retuning a [`QueryResult`], with a [`LazyResultSet`] as its `data` member + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Execute Lazy Result Set Query", skip_all, level = "info") + )] + pub fn execute(mut self) -> FalkorResult>> { + self.common_execute_steps() + .and_then(|res| self.generate_query_result_set(res)) + } +} + +#[cfg(feature = "tokio")] +impl<'a, T: Display> QueryBuilder<'a, QueryResult>, T, AsyncGraph> { + /// Executes the query, retuning a [`QueryResult`], with a [`LazyResultSet`] as its `data` member + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Execute Lazy Result Set Query", skip_all, level = "info") + )] + pub async fn execute(mut self) -> FalkorResult>> { + self.common_execute_steps() + .await + .and_then(|res| self.generate_query_result_set(res)) + } +} + +impl<'a, T: Display> QueryBuilder<'a, ExecutionPlan, T, SyncGraph> { /// Executes the query, returning an [`ExecutionPlan`] from the data returned pub fn execute(mut self) -> FalkorResult { - let res = self.common_execute_steps()?; + self.common_execute_steps().and_then(ExecutionPlan::parse) + } +} - ExecutionPlan::parse(res) +#[cfg(feature = "tokio")] +impl<'a, T: Display> QueryBuilder<'a, ExecutionPlan, T, AsyncGraph> { + /// Executes the query, returning an [`ExecutionPlan`] from the data returned + pub async fn execute(mut self) -> FalkorResult { + self.common_execute_steps() + .await + .and_then(ExecutionPlan::parse) } } @@ -219,22 +282,23 @@ pub(crate) fn generate_procedure_call( } /// A Builder-pattern struct that allows creating and executing procedure call on a graph -pub struct ProcedureQueryBuilder<'a, Output> { +pub struct ProcedureQueryBuilder<'a, Output, G: HasGraphSchema> { _unused: PhantomData, - graph: &'a mut SyncGraph, + graph: &'a mut G, readonly: bool, procedure_name: &'a str, args: Option<&'a [&'a str]>, yields: Option<&'a [&'a str]>, } -impl<'a, Output> ProcedureQueryBuilder<'a, Output> { +impl<'a, Out, G: HasGraphSchema> ProcedureQueryBuilder<'a, Out, G> { pub(crate) fn new( - graph: &'a mut SyncGraph, + graph: &'a mut G, procedure_name: &'a str, ) -> Self { Self { _unused: PhantomData, + graph, readonly: false, procedure_name, @@ -244,7 +308,7 @@ impl<'a, Output> ProcedureQueryBuilder<'a, Output> { } pub(crate) fn new_readonly( - graph: &'a mut SyncGraph, + graph: &'a mut G, procedure_name: &'a str, ) -> Self { Self { @@ -285,10 +349,37 @@ impl<'a, Output> ProcedureQueryBuilder<'a, Output> { } } - fn common_execute_steps( - &mut self, - conn: &mut BorrowedSyncConnection, - ) -> FalkorResult { + fn parse_query_result_of_type>( + res: redis::Value + ) -> FalkorResult>> { + let [header, indices, stats]: [redis::Value; 3] = + redis_value_as_vec(res).and_then(|res_vec| { + res_vec.try_into().map_err(|_| { + FalkorDBError::ParsingArrayToStructElementCount( + "Expected exactly 3 elements in query response", + ) + }) + })?; + + QueryResult::from_response( + Some(header), + redis_value_as_vec(indices) + .map(|indices| indices.into_iter().flat_map(T::try_from).collect())?, + stats, + ) + } +} + +impl<'a, Out> ProcedureQueryBuilder<'a, Out, SyncGraph> { + #[cfg_attr( + feature = "tracing", + tracing::instrument( + name = "Common Procedure Call Execution Steps", + skip_all, + level = "trace" + ) + )] + fn common_execute_steps(&mut self) -> FalkorResult { let command = match self.readonly { true => "GRAPH.QUERY_RO", false => "GRAPH.QUERY", @@ -298,16 +389,55 @@ impl<'a, Output> ProcedureQueryBuilder<'a, Output> { generate_procedure_call(self.procedure_name, self.args, self.yields); let query = construct_query(query_string, params.as_ref()); - conn.execute_command( - Some(self.graph.graph_name()), - command, - None, - Some(&[query.as_str(), "--compact"]), + self.graph + .get_client() + .borrow_connection(self.graph.get_client().clone()) + .and_then(|mut conn| { + conn.execute_command( + Some(self.graph.graph_name()), + command, + None, + Some(&[query.as_str(), "--compact"]), + ) + }) + } +} + +#[cfg(feature = "tokio")] +impl<'a, Out> ProcedureQueryBuilder<'a, Out, AsyncGraph> { + #[cfg_attr( + feature = "tracing", + tracing::instrument( + name = "Common Procedure Call Execution Steps", + skip_all, + level = "trace" ) + )] + async fn common_execute_steps(&mut self) -> FalkorResult { + let command = match self.readonly { + true => "GRAPH.QUERY_RO", + false => "GRAPH.QUERY", + }; + + let (query_string, params) = + generate_procedure_call(self.procedure_name, self.args, self.yields); + let query = construct_query(query_string, params.as_ref()); + + self.graph + .get_client() + .borrow_connection(self.graph.get_client().clone()) + .await? + .execute_command( + Some(self.graph.graph_name()), + command, + None, + Some(&[query.as_str(), "--compact"]), + ) + .await } } -impl<'a> ProcedureQueryBuilder<'a, QueryResult>> { +impl<'a> ProcedureQueryBuilder<'a, QueryResult>, SyncGraph> { /// Executes the procedure call and return a [`QueryResult`] type containing a result set of [`FalkorIndex`]s /// This functions consumes self #[cfg_attr( @@ -315,37 +445,27 @@ impl<'a> ProcedureQueryBuilder<'a, QueryResult>> { tracing::instrument(name = "Execute FalkorIndex Query", skip_all, level = "info") )] pub fn execute(mut self) -> FalkorResult>> { - self.common_execute_steps( - &mut self - .graph - .client - .borrow_connection(self.graph.client.clone())?, - ) - .and_then(|res| { - let [header, indices, stats]: [redis::Value; 3] = - redis_value_as_vec(res).and_then(|res_vec| { - res_vec.try_into().map_err(|_| { - FalkorDBError::ParsingArrayToStructElementCount( - "Expected exactly 3 elements in query response", - ) - }) - })?; + self.common_execute_steps() + .and_then(Self::parse_query_result_of_type) + } +} - QueryResult::from_response( - Some(header), - redis_value_as_vec(indices).map(|indices| { - indices - .into_iter() - .flat_map(|index| FalkorIndex::parse(index, &mut self.graph.graph_schema)) - .collect() - })?, - stats, - ) - }) +#[cfg(feature = "tokio")] +impl<'a> ProcedureQueryBuilder<'a, QueryResult>, AsyncGraph> { + /// Executes the procedure call and return a [`QueryResult`] type containing a result set of [`FalkorIndex`]s + /// This functions consumes self + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Execute FalkorIndex Query", skip_all, level = "info") + )] + pub async fn execute(mut self) -> FalkorResult>> { + self.common_execute_steps() + .await + .and_then(Self::parse_query_result_of_type) } } -impl<'a> ProcedureQueryBuilder<'a, QueryResult>> { +impl<'a> ProcedureQueryBuilder<'a, QueryResult>, SyncGraph> { /// Executes the procedure call and return a [`QueryResult`] type containing a result set of [`Constraint`]s /// This functions consumes self #[cfg_attr( @@ -353,35 +473,22 @@ impl<'a> ProcedureQueryBuilder<'a, QueryResult>> { tracing::instrument(name = "Execute Constraint Procedure Call", skip_all, level = "info") )] pub fn execute(mut self) -> FalkorResult>> { - self.common_execute_steps( - &mut self - .graph - .client - .borrow_connection(self.graph.client.clone())?, - ) - .and_then(|res| { - let [header, constraints, stats]: [redis::Value; 3] = redis_value_as_vec(res) - .and_then(|res_vec| { - res_vec.try_into().map_err(|_| { - FalkorDBError::ParsingArrayToStructElementCount( - "Expected exactly 3 elements in query response", - ) - }) - })?; + self.common_execute_steps() + .and_then(Self::parse_query_result_of_type) + } +} - QueryResult::from_response( - Some(header), - redis_value_as_vec(constraints).map(|constraints| { - constraints - .into_iter() - .flat_map(|constraint| { - Constraint::parse(constraint, &mut self.graph.graph_schema) - }) - .collect() - })?, - stats, - ) - }) +impl<'a> ProcedureQueryBuilder<'a, QueryResult>, AsyncGraph> { + /// Executes the procedure call and return a [`QueryResult`] type containing a result set of [`Constraint`]s + /// This functions consumes self + #[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Execute Constraint Procedure Call", skip_all, level = "info") + )] + pub async fn execute(mut self) -> FalkorResult>> { + self.common_execute_steps() + .await + .and_then(Self::parse_query_result_of_type) } } diff --git a/src/graph_schema/mod.rs b/src/graph_schema/mod.rs index 7b5a589..8d311fd 100644 --- a/src/graph_schema/mod.rs +++ b/src/graph_schema/mod.rs @@ -4,7 +4,7 @@ */ use crate::{ - client::blocking::FalkorSyncClientInner, + client::ProvidesSyncConnections, parser::{parse_type, redis_value_as_int, redis_value_as_string, redis_value_as_vec}, FalkorDBError, FalkorResult, FalkorValue, }; @@ -71,7 +71,7 @@ pub(crate) type IdMap = HashMap; /// A struct containing the various schema maps, allowing conversions between ids and their string representations. #[derive(Clone)] pub struct GraphSchema { - client: Arc, + client: Arc, graph_name: String, version: i64, labels: IdMap, @@ -82,7 +82,7 @@ pub struct GraphSchema { impl GraphSchema { pub(crate) fn new( graph_name: T, - client: Arc, + client: Arc, ) -> Self { Self { client, @@ -149,7 +149,7 @@ impl GraphSchema { // This is essentially the call_procedure(), but can be done here without access to the graph(which would cause ownership issues) let keys = self .client - .borrow_connection(self.client.clone()) + .get_connection() .and_then(|mut conn| { conn.execute_command( Some(self.graph_name.as_str()), @@ -261,7 +261,8 @@ impl GraphSchema { pub(crate) mod tests { use super::*; use crate::{ - client::blocking::create_empty_inner_client, test_utils::create_test_client, SyncGraph, + client::blocking::create_empty_inner_sync_client, graph::HasGraphSchema, + test_utils::create_test_client, SyncGraph, }; use std::collections::HashMap; @@ -269,25 +270,27 @@ pub(crate) mod tests { let client = create_test_client(); let mut graph = client.select_graph("imdb"); - graph.graph_schema.properties = HashMap::from([ - (0, "age".to_string()), - (1, "is_boring".to_string()), - (2, "something_else".to_string()), - (3, "secs_since_login".to_string()), - ]); + { + let schema = graph.get_graph_schema_mut(); + schema.properties = HashMap::from([ + (0, "age".to_string()), + (1, "is_boring".to_string()), + (2, "something_else".to_string()), + (3, "secs_since_login".to_string()), + ]); - graph.graph_schema.labels = - HashMap::from([(0, "much".to_string()), (1, "actor".to_string())]); + schema.labels = HashMap::from([(0, "much".to_string()), (1, "actor".to_string())]); - graph.graph_schema.relationships = - HashMap::from([(0, "very".to_string()), (1, "wow".to_string())]); + schema.relationships = HashMap::from([(0, "very".to_string()), (1, "wow".to_string())]); + } graph } #[test] fn test_label_not_exists() { - let mut parser = GraphSchema::new("graph_name".to_string(), create_empty_inner_client()); + let mut parser = + GraphSchema::new("graph_name".to_string(), create_empty_inner_sync_client()); let input_value = redis::Value::Bulk(vec![redis::Value::Bulk(vec![ redis::Value::Int(1), redis::Value::Int(2), @@ -300,7 +303,8 @@ pub(crate) mod tests { #[test] fn test_parse_properties_map() { - let mut parser = GraphSchema::new("graph_name".to_string(), create_empty_inner_client()); + let mut parser = + GraphSchema::new("graph_name".to_string(), create_empty_inner_sync_client()); parser.properties = HashMap::from([ (1, "property1".to_string()), (2, "property2".to_string()), @@ -341,7 +345,8 @@ pub(crate) mod tests { #[test] fn test_parse_id_vec() { - let mut parser = GraphSchema::new("graph_name".to_string(), create_empty_inner_client()); + let mut parser = + GraphSchema::new("graph_name".to_string(), create_empty_inner_sync_client()); parser.labels = HashMap::from([ (1, "property1".to_string()), diff --git a/src/lib.rs b/src/lib.rs index f74ae55..9b69eba 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,6 +3,8 @@ * Licensed under the Server Side Public License v1 (SSPLv1). */ +#![allow(private_interfaces)] +#![allow(private_bounds)] #![deny(missing_docs)] #![deny(rustdoc::broken_intra_doc_links)] #![doc = include_str!("../README.md")] @@ -44,6 +46,11 @@ pub use value::{ FalkorValue, }; +#[cfg(feature = "tokio")] +pub use client::asynchronous::FalkorAsyncClient; +#[cfg(feature = "tokio")] +pub use graph::asynchronous::AsyncGraph; + #[cfg(test)] pub(crate) mod test_utils { use super::*; @@ -58,12 +65,39 @@ pub(crate) mod test_utils { } } + #[cfg(feature = "tokio")] + pub(crate) struct TestAsyncGraphHandle { + pub(crate) inner: AsyncGraph, + } + + #[cfg(feature = "tokio")] + impl Drop for TestAsyncGraphHandle { + fn drop(&mut self) { + tokio::task::block_in_place(|| { + // Avoid copying the schema each time + let mut graph_handle = + AsyncGraph::new(self.inner.get_client().clone(), self.inner.graph_name()); + tokio::runtime::Handle::current().block_on(async move { + graph_handle.delete().await.ok(); + }) + }) + } + } + pub(crate) fn create_test_client() -> FalkorSyncClient { FalkorClientBuilder::new() .build() .expect("Could not create client") } + #[cfg(feature = "tokio")] + pub(crate) async fn create_async_test_client() -> FalkorAsyncClient { + FalkorClientBuilder::new_async() + .build() + .await + .expect("Could not create client") + } + pub(crate) fn open_test_graph(graph_name: &str) -> TestSyncGraphHandle { let client = create_test_client(); @@ -75,4 +109,17 @@ pub(crate) mod test_utils { .expect("Could not copy graph for test"), } } + + pub(crate) async fn open_async_test_graph(graph_name: &str) -> TestAsyncGraphHandle { + let client = create_async_test_client().await; + + client.select_graph(graph_name).delete().await.ok(); + + TestAsyncGraphHandle { + inner: client + .copy_graph("imdb", graph_name) + .await + .expect("Could not copy graph for test"), + } + } } diff --git a/src/parser/mod.rs b/src/parser/mod.rs index aa60006..25193a8 100644 --- a/src/parser/mod.rs +++ b/src/parser/mod.rs @@ -3,7 +3,9 @@ * Licensed under the Server Side Public License v1 (SSPLv1). */ -use crate::{Edge, FalkorDBError, FalkorResult, FalkorValue, GraphSchema, Node, Path, Point}; +use crate::{ + ConfigValue, Edge, FalkorDBError, FalkorResult, FalkorValue, GraphSchema, Node, Path, Point, +}; use std::collections::HashMap; pub(crate) fn redis_value_as_string(value: redis::Value) -> FalkorResult { @@ -43,6 +45,61 @@ pub(crate) fn redis_value_as_vec(value: redis::Value) -> FalkorResult FalkorResult> { + redis_value_as_string(res) + .map(|info| { + info.split("\r\n") + .map(|info_item| info_item.split(':').collect::>()) + .flat_map(TryInto::<[&str; 2]>::try_into) + .map(|[key, val]| (key.to_string(), val.to_string())) + .collect() + }) + .map_err(|_| FalkorDBError::ParsingString) +} + +#[cfg_attr( + feature = "tracing", + tracing::instrument(name = "Parse Config Hashmap", skip_all, level = "info") +)] +pub(crate) fn parse_config_hashmap( + value: redis::Value +) -> FalkorResult> { + let config = redis_value_as_vec(value)?; + + if config.len() == 2 { + let [key, val]: [redis::Value; 2] = config.try_into().map_err(|_| { + FalkorDBError::ParsingArrayToStructElementCount( + "Expected exactly 2 elements for configuration option", + ) + })?; + + return redis_value_as_string(key) + .and_then(|key| ConfigValue::try_from(val).map(|val| HashMap::from([(key, val)]))); + } + + Ok(config + .into_iter() + .flat_map(|config| { + redis_value_as_vec(config).and_then(|as_vec| { + let [key, val]: [redis::Value; 2] = as_vec.try_into().map_err(|_| { + FalkorDBError::ParsingArrayToStructElementCount( + "Expected exactly 2 elements for configuration option", + ) + })?; + + Result::<_, FalkorDBError>::Ok(( + redis_value_as_string(key)?, + ConfigValue::try_from(val)?, + )) + }) + }) + .collect::>()) +} + #[cfg_attr( feature = "tracing", tracing::instrument(name = "Parse Falkor Enum", skip_all, level = "trace") @@ -64,6 +121,24 @@ pub(crate) fn parse_falkor_enum TryFrom<&'a str, Error = impl ToStrin }) } +#[cfg_attr( + feature = "tracing", + tracing::instrument( + name = "Falkor Typed String From Redis Value", + skip_all, + level = "trace" + ) +)] +pub(crate) fn redis_value_as_typed_string(value: redis::Value) -> FalkorResult { + type_val_from_value(value).and_then(|(type_marker, val)| { + if type_marker == 2 { + redis_value_as_string(val) + } else { + Err(FalkorDBError::ParsingString) + } + }) +} + #[cfg_attr( feature = "tracing", tracing::instrument(name = "String Vec From Redis Value", skip_all, level = "debug") @@ -226,7 +301,7 @@ pub(crate) fn parse_type( mod tests { use super::*; use crate::{ - client::blocking::create_empty_inner_client, + client::blocking::create_empty_inner_sync_client, graph::HasGraphSchema, graph_schema::tests::open_readonly_graph_with_modified_schema, FalkorDBError, }; @@ -312,7 +387,7 @@ mod tests { ]), ]), ]), - &mut graph.graph_schema, + graph.get_graph_schema_mut(), ); assert!(res.is_ok()); @@ -361,7 +436,7 @@ mod tests { ]), ]), ]), - &mut graph.graph_schema, + graph.get_graph_schema_mut(), ); assert!(res.is_ok()); @@ -425,7 +500,7 @@ mod tests { ]), ]), ]), - &mut graph.graph_schema, + graph.get_graph_schema_mut(), ); assert!(res.is_ok()); @@ -470,7 +545,7 @@ mod tests { redis::Value::Status("true".to_string()), ]), ]), - &mut graph.graph_schema, + graph.get_graph_schema_mut(), ); assert!(res.is_ok()); @@ -498,7 +573,7 @@ mod tests { redis::Value::Status("102.0".to_string()), redis::Value::Status("15.2".to_string()), ]), - &mut graph.graph_schema, + graph.get_graph_schema_mut(), ); assert!(res.is_ok()); @@ -512,7 +587,7 @@ mod tests { #[test] fn test_map_not_a_vec() { - let mut graph_schema = GraphSchema::new("test_graph", create_empty_inner_client()); + let mut graph_schema = GraphSchema::new("test_graph", create_empty_inner_sync_client()); let res = parse_regular_falkor_map(redis::Value::Status("Hello".to_string()), &mut graph_schema); @@ -522,7 +597,7 @@ mod tests { #[test] fn test_map_vec_odd_element_count() { - let mut graph_schema = GraphSchema::new("test_graph", create_empty_inner_client()); + let mut graph_schema = GraphSchema::new("test_graph", create_empty_inner_sync_client()); let res = parse_regular_falkor_map( redis::Value::Bulk(vec![redis::Value::Nil; 7]), @@ -534,7 +609,7 @@ mod tests { #[test] fn test_map_val_element_is_not_array() { - let mut graph_schema = GraphSchema::new("test_graph", create_empty_inner_client()); + let mut graph_schema = GraphSchema::new("test_graph", create_empty_inner_sync_client()); let res = parse_regular_falkor_map( redis::Value::Bulk(vec![ @@ -549,7 +624,7 @@ mod tests { #[test] fn test_map_val_element_has_only_1_element() { - let mut graph_schema = GraphSchema::new("test_graph", create_empty_inner_client()); + let mut graph_schema = GraphSchema::new("test_graph", create_empty_inner_sync_client()); let res = parse_regular_falkor_map( redis::Value::Bulk(vec![ @@ -564,7 +639,7 @@ mod tests { #[test] fn test_map_val_element_has_ge_2_elements() { - let mut graph_schema = GraphSchema::new("test_graph", create_empty_inner_client()); + let mut graph_schema = GraphSchema::new("test_graph", create_empty_inner_sync_client()); let res = parse_regular_falkor_map( redis::Value::Bulk(vec![ @@ -579,7 +654,7 @@ mod tests { #[test] fn test_map_val_element_mismatch_type_marker() { - let mut graph_schema = GraphSchema::new("test_graph", create_empty_inner_client()); + let mut graph_schema = GraphSchema::new("test_graph", create_empty_inner_sync_client()); let res = parse_regular_falkor_map( redis::Value::Bulk(vec![ @@ -597,7 +672,7 @@ mod tests { #[test] fn test_map_ok_values() { - let mut graph_schema = GraphSchema::new("test_graph", create_empty_inner_client()); + let mut graph_schema = GraphSchema::new("test_graph", create_empty_inner_sync_client()); let res = parse_regular_falkor_map( redis::Value::Bulk(vec![ diff --git a/src/response/constraint.rs b/src/response/constraint.rs index da03a05..07ed62a 100644 --- a/src/response/constraint.rs +++ b/src/response/constraint.rs @@ -5,10 +5,10 @@ use crate::{ parser::{ - parse_falkor_enum, parse_raw_redis_value, redis_value_as_typed_string_vec, + parse_falkor_enum, redis_value_as_typed_string, redis_value_as_typed_string_vec, redis_value_as_vec, }, - EntityType, FalkorDBError, FalkorResult, FalkorValue, GraphSchema, + EntityType, FalkorDBError, FalkorResult, }; /// The type of restriction to apply for the property @@ -50,23 +50,21 @@ pub struct Constraint { pub status: ConstraintStatus, } -impl Constraint { +impl TryFrom for Constraint { + type Error = FalkorDBError; + #[cfg_attr( feature = "tracing", tracing::instrument(name = "Parse Constraint", skip_all, level = "info") )] - pub(crate) fn parse( - value: redis::Value, - graph_schema: &mut GraphSchema, - ) -> FalkorResult { + fn try_from(value: redis::Value) -> FalkorResult { let [constraint_type_raw, label_raw, properties_raw, entity_type_raw, status_raw]: [redis::Value; 5] = redis_value_as_vec(value) .and_then(|res| res.try_into() .map_err(|_| FalkorDBError::ParsingArrayToStructElementCount("Expected exactly 5 elements in constraint object")))?; Ok(Self { constraint_type: parse_falkor_enum(constraint_type_raw)?, - label: parse_raw_redis_value(label_raw, graph_schema) - .and_then(FalkorValue::into_string)?, + label: redis_value_as_typed_string(label_raw)?, properties: redis_value_as_typed_string_vec(properties_raw)?, entity_type: parse_falkor_enum(entity_type_raw)?, status: parse_falkor_enum(status_raw)?, diff --git a/src/response/index.rs b/src/response/index.rs index ffdb7f8..2d6c492 100644 --- a/src/response/index.rs +++ b/src/response/index.rs @@ -5,10 +5,10 @@ use crate::{ parser::{ - parse_falkor_enum, parse_raw_redis_value, redis_value_as_typed_string_vec, - redis_value_as_vec, + parse_falkor_enum, redis_value_as_string, redis_value_as_typed_string, + redis_value_as_typed_string_vec, redis_value_as_vec, type_val_from_value, }, - EntityType, FalkorDBError, FalkorValue, GraphSchema, + EntityType, FalkorDBError, FalkorResult, }; use std::collections::HashMap; @@ -36,32 +36,57 @@ pub enum IndexType { Fulltext, } -fn parse_types_map( - value: redis::Value, - graph_schema: &mut GraphSchema, -) -> Result>, FalkorDBError> { - parse_raw_redis_value(value, graph_schema) - .and_then(|map_val| map_val.into_map()) - .map(|map_val| { - map_val - .into_iter() - .flat_map(|(key, val)| { - val.into_vec().map(|as_vec| { - ( - key, - as_vec - .into_iter() - .flat_map(|item| { - item.into_string().and_then(|as_str| { - IndexType::try_from(as_str.as_str()).map_err(Into::into) - }) - }) - .collect(), - ) - }) - }) - .collect() - }) +fn parse_types_map(value: redis::Value) -> Result>, FalkorDBError> { + type_val_from_value(value).and_then(|(type_marker, val)| { + if type_marker != 10 { + return Err(FalkorDBError::ParsingMap); + } + + let map_iter = val.into_map_iter().map_err(|_| FalkorDBError::ParsingMap)?; + + let result = map_iter + .into_iter() + .map(|(key, val)| { + let key_str = redis_value_as_string(key)?; + let (val_type_marker, val) = type_val_from_value(val)?; + + if val_type_marker != 6 { + return Err(FalkorDBError::ParsingString); + } + + let val_vec = redis_value_as_vec(val)?; + let parsed_values = val_vec + .into_iter() + .flat_map(parse_falkor_enum::) + .collect::>(); + + Ok((key_str, parsed_values)) + }) + .collect::, FalkorDBError>>()?; + + Ok(result) + }) +} + +fn parse_info_map(value: redis::Value) -> FalkorResult> { + type_val_from_value(value).and_then(|(type_marker, val)| { + if type_marker != 10 { + return Err(FalkorDBError::ParsingMap); + } + + let map_iter = val.into_map_iter().map_err(|_| FalkorDBError::ParsingMap)?; + + let result = map_iter + .into_iter() + .map(|(key, val)| { + let key_str = redis_value_as_typed_string(key)?; + let val_str = redis_value_as_typed_string(val)?; + Ok((key_str, val_str)) + }) + .collect::, FalkorDBError>>()?; + + Ok(result) + }) } /// Contains all the info regarding an index on the database @@ -85,15 +110,14 @@ pub struct FalkorIndex { pub info: HashMap, } -impl FalkorIndex { +impl TryFrom for FalkorIndex { + type Error = FalkorDBError; + #[cfg_attr( feature = "tracing", tracing::instrument(name = "Parse Index", skip_all, level = "info") )] - pub(crate) fn parse( - value: redis::Value, - graph_schema: &mut GraphSchema, - ) -> Result { + fn try_from(value: redis::Value) -> Result { let [label, fields, field_types, language, stopwords, entity_type, status, info] = redis_value_as_vec(value).and_then(|as_vec| { as_vec.try_into().map_err(|_| { @@ -106,21 +130,12 @@ impl FalkorIndex { Ok(Self { entity_type: parse_falkor_enum(entity_type)?, status: parse_falkor_enum(status)?, - index_label: parse_raw_redis_value(label, graph_schema) - .and_then(FalkorValue::into_string)?, + index_label: redis_value_as_typed_string(label)?, fields: redis_value_as_typed_string_vec(fields)?, - field_types: parse_types_map(field_types, graph_schema)?, - language: parse_raw_redis_value(language, graph_schema) - .and_then(FalkorValue::into_string)?, + field_types: parse_types_map(field_types)?, + language: redis_value_as_typed_string(language)?, stopwords: redis_value_as_typed_string_vec(stopwords)?, - info: parse_raw_redis_value(info, graph_schema) - .and_then(FalkorValue::into_map) - .map(|as_map| { - as_map - .into_iter() - .flat_map(|(key, val)| val.into_string().map(|val_str| (key, val_str))) - .collect() - })?, + info: parse_info_map(info)?, }) } } From e9a995e64f811aa01f8f3d4a89c3b0cdbd97d0c8 Mon Sep 17 00:00:00 2001 From: Emily Matheys Date: Thu, 13 Jun 2024 18:03:25 +0300 Subject: [PATCH 02/13] Midway there --- src/client/asynchronous.rs | 24 +++++++----------------- src/client/blocking.rs | 2 +- src/client/mod.rs | 11 +++++++---- src/connection/asynchronous.rs | 7 +++++-- 4 files changed, 20 insertions(+), 24 deletions(-) diff --git a/src/client/asynchronous.rs b/src/client/asynchronous.rs index d9f9d9a..0988425 100644 --- a/src/client/asynchronous.rs +++ b/src/client/asynchronous.rs @@ -253,24 +253,14 @@ impl FalkorAsyncClient { &self, section: Option<&str>, ) -> FalkorResult> { - self.borrow_connection() - .await? - .as_inner()? - .get_redis_info(section) - .await - } -} + let mut conn = self.borrow_connection().await?; -#[cfg(test)] -pub(crate) async fn create_empty_inner_async_client() -> Arc { - let (tx, rx) = mpsc::channel(1); - tx.send(FalkorAsyncConnection::None).await.ok(); - Arc::new(FalkorAsyncClientInner { - _inner: Mutex::new(FalkorClientProvider::None), - connection_pool_size: 0, - connection_pool_tx: RwLock::new(tx), - connection_pool_rx: Mutex::new(rx), - }) + let redis_info = conn.as_inner()?.get_redis_info(section).await; + + conn.return_to_pool().await; + + redis_info + } } #[cfg(test)] diff --git a/src/client/blocking.rs b/src/client/blocking.rs index 389c9b3..a4f177d 100644 --- a/src/client/blocking.rs +++ b/src/client/blocking.rs @@ -74,7 +74,7 @@ impl ProvidesSyncConnections for FalkorSyncClientInner { #[derive(Clone)] pub struct FalkorSyncClient { inner: Arc, - pub(crate) _connection_info: FalkorConnectionInfo, + _connection_info: FalkorConnectionInfo, } impl FalkorSyncClient { diff --git a/src/client/mod.rs b/src/client/mod.rs index 6337696..c1465e8 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -3,12 +3,15 @@ * Licensed under the Server Side Public License v1 (SSPLv1). */ -use crate::{connection::blocking::FalkorSyncConnection, FalkorDBError, FalkorResult}; +use crate::{ + connection::blocking::FalkorSyncConnection, + parser::{redis_value_as_string, redis_value_as_vec}, + FalkorDBError, FalkorResult, +}; use std::collections::HashMap; #[cfg(feature = "tokio")] use crate::connection::asynchronous::FalkorAsyncConnection; -use crate::parser::{redis_value_as_string, redis_value_as_vec}; pub(crate) mod blocking; pub(crate) mod builder; @@ -62,7 +65,7 @@ impl FalkorClientProvider { ), FalkorClientProvider::Redis { client, .. } => FalkorAsyncConnection::Redis( client - .get_multiplexed_async_connection() + .get_multiplexed_tokio_connection() .await .map_err(|err| FalkorDBError::RedisError(err.to_string()))?, ), @@ -171,6 +174,6 @@ impl FalkorClientProvider { } } -pub(crate) trait ProvidesSyncConnections { +pub(crate) trait ProvidesSyncConnections: Sync + Send { fn get_connection(&self) -> FalkorResult; } diff --git a/src/connection/asynchronous.rs b/src/connection/asynchronous.rs index a9c4230..2458e91 100644 --- a/src/connection/asynchronous.rs +++ b/src/connection/asynchronous.rs @@ -111,7 +111,7 @@ impl BorrowedAsyncConnection { subcommand: Option<&str>, params: Option<&[&str]>, ) -> FalkorResult { - match self + let res = match self .as_inner()? .execute_command(graph_name, command, subcommand, params) .await @@ -124,7 +124,10 @@ impl BorrowedAsyncConnection { Err(FalkorDBError::NoConnection) } res => res, - } + }; + + self.return_to_pool().await; + res } pub(crate) async fn return_to_pool(self) { From 8bb9c6c11fdcbaab105bc77ef816d8674d64294e Mon Sep 17 00:00:00 2001 From: Emily Matheys Date: Sun, 16 Jun 2024 11:29:08 +0300 Subject: [PATCH 03/13] Async works --- src/client/asynchronous.rs | 2 +- src/client/blocking.rs | 2 +- src/graph/asynchronous.rs | 32 +++++------ src/graph/blocking.rs | 28 ++++++---- src/graph/query_builder.rs | 27 +++++---- src/graph_schema/mod.rs | 43 +++++++++------ src/lib.rs | 18 ++---- src/parser/mod.rs | 10 +++- src/response/constraint.rs | 13 +++-- src/response/index.rs | 43 ++++----------- src/response/lazy_result_set.rs | 98 ++++++++++++++++++++++++++++++++- src/response/mod.rs | 4 +- src/value/graph_entities.rs | 8 +-- 13 files changed, 210 insertions(+), 118 deletions(-) diff --git a/src/client/asynchronous.rs b/src/client/asynchronous.rs index 0988425..aa050bd 100644 --- a/src/client/asynchronous.rs +++ b/src/client/asynchronous.rs @@ -304,7 +304,7 @@ mod tests { assert!(res.is_ok()); let graphs = res.unwrap(); - assert_eq!(graphs[0], "imdb"); + assert!(graphs.contains(&"imdb".to_string())); } #[tokio::test(flavor = "multi_thread")] diff --git a/src/client/blocking.rs b/src/client/blocking.rs index a4f177d..f210301 100644 --- a/src/client/blocking.rs +++ b/src/client/blocking.rs @@ -289,7 +289,7 @@ mod tests { assert!(res.is_ok()); let graphs = res.unwrap(); - assert_eq!(graphs[0], "imdb"); + assert!(graphs.contains(&"imdb".to_string())); } #[test] diff --git a/src/graph/asynchronous.rs b/src/graph/asynchronous.rs index fb14f9b..f0b66c3 100644 --- a/src/graph/asynchronous.rs +++ b/src/graph/asynchronous.rs @@ -402,11 +402,15 @@ impl HasGraphSchema for AsyncGraph { #[cfg(test)] mod tests { use super::*; - use crate::{test_utils::open_async_test_graph, IndexType}; + use crate::{ + test_utils::{create_async_test_client, open_empty_async_test_graph}, + IndexType, + }; #[tokio::test(flavor = "multi_thread")] async fn test_create_drop_index() { - let mut graph = open_async_test_graph("test_create_drop_index_async").await; + let mut graph = open_empty_async_test_graph("test_create_drop_index_async").await; + graph .inner .create_index( @@ -425,7 +429,7 @@ mod tests { .await .expect("Could not list indices"); - assert_eq!(indices.data.len(), 2); + assert_eq!(indices.data.len(), 1); assert_eq!( indices.data[0].field_types["Hello"], vec![IndexType::Fulltext] @@ -440,12 +444,8 @@ mod tests { #[tokio::test(flavor = "multi_thread")] async fn test_list_indices() { - let mut graph = open_async_test_graph("test_list_indices_async").await; - let indices = graph - .inner - .list_indices() - .await - .expect("Could not list indices"); + let mut graph = create_async_test_client().await.select_graph("imdb"); + let indices = graph.list_indices().await.expect("Could not list indices"); assert_eq!(indices.data.len(), 1); assert_eq!(indices.data[0].entity_type, EntityType::Node); @@ -459,7 +459,7 @@ mod tests { #[tokio::test(flavor = "multi_thread")] async fn test_create_drop_mandatory_constraint() { - let graph = open_async_test_graph("test_mandatory_constraint_async").await; + let graph = open_empty_async_test_graph("test_mandatory_constraint_async").await; graph .inner @@ -481,7 +481,7 @@ mod tests { #[tokio::test(flavor = "multi_thread")] async fn test_create_drop_unique_constraint() { - let mut graph = open_async_test_graph("test_unique_constraint_async").await; + let mut graph = open_empty_async_test_graph("test_unique_constraint_async").await; graph .inner @@ -507,7 +507,7 @@ mod tests { #[tokio::test(flavor = "multi_thread")] async fn test_list_constraints() { - let mut graph = open_async_test_graph("test_list_constraint_async").await; + let mut graph = open_empty_async_test_graph("test_list_constraint_async").await; graph .inner @@ -529,7 +529,7 @@ mod tests { #[tokio::test(flavor = "multi_thread")] async fn test_slowlog() { - let mut graph = open_async_test_graph("test_slowlog_async").await; + let mut graph = open_empty_async_test_graph("test_slowlog_async").await; graph .inner @@ -575,9 +575,9 @@ mod tests { #[tokio::test(flavor = "multi_thread")] async fn test_explain() { - let mut graph = open_async_test_graph("test_explain_async").await; + let mut graph = create_async_test_client().await.select_graph("imdb"); - let execution_plan = graph.inner.explain("MATCH (a:actor) WITH a MATCH (b:actor) WHERE a.age = b.age AND a <> b RETURN a, collect(b) LIMIT 100").execute().await.expect("Could not create execution plan"); + let execution_plan = graph.explain("MATCH (a:actor) WITH a MATCH (b:actor) WHERE a.age = b.age AND a <> b RETURN a, collect(b) LIMIT 100").execute().await.expect("Could not create execution plan"); assert_eq!(execution_plan.plan().len(), 7); assert!(execution_plan.operations().get("Aggregate").is_some()); assert_eq!(execution_plan.operations()["Aggregate"].len(), 1); @@ -590,7 +590,7 @@ mod tests { #[tokio::test(flavor = "multi_thread")] async fn test_profile() { - let mut graph = open_async_test_graph("test_profile_async").await; + let mut graph = open_empty_async_test_graph("test_profile_async").await; let execution_plan = graph .inner diff --git a/src/graph/blocking.rs b/src/graph/blocking.rs index 1f641c6..deb2e09 100644 --- a/src/graph/blocking.rs +++ b/src/graph/blocking.rs @@ -390,11 +390,15 @@ impl HasGraphSchema for SyncGraph { #[cfg(test)] mod tests { use super::*; - use crate::{test_utils::open_test_graph, IndexType}; + use crate::{ + test_utils::{create_test_client, open_empty_test_graph}, + IndexType, + }; #[test] fn test_create_drop_index() { - let mut graph = open_test_graph("test_create_drop_index"); + let mut graph = open_empty_test_graph("test_create_drop_index"); + let indices = graph .inner .create_index( @@ -408,7 +412,7 @@ mod tests { assert_eq!(indices.get_indices_created(), Some(1)); let indices = graph.inner.list_indices().expect("Could not list indices"); - assert_eq!(indices.data.len(), 2); + assert_eq!(indices.data.len(), 1); assert_eq!( indices.data[0].field_types["Hello"], vec![IndexType::Fulltext] @@ -423,8 +427,8 @@ mod tests { #[test] fn test_list_indices() { - let mut graph = open_test_graph("test_list_indices"); - let indices = graph.inner.list_indices().expect("Could not list indices"); + let mut graph = create_test_client().select_graph("imdb"); + let indices = graph.list_indices().expect("Could not list indices"); assert_eq!(indices.data.len(), 1); assert_eq!(indices.data[0].entity_type, EntityType::Node); @@ -438,7 +442,7 @@ mod tests { #[test] fn test_create_drop_mandatory_constraint() { - let graph = open_test_graph("test_mandatory_constraint"); + let graph = open_empty_test_graph("test_mandatory_constraint"); graph .inner @@ -458,7 +462,7 @@ mod tests { #[test] fn test_create_drop_unique_constraint() { - let mut graph = open_test_graph("test_unique_constraint"); + let mut graph = open_empty_test_graph("test_unique_constraint"); graph .inner @@ -482,7 +486,7 @@ mod tests { #[test] fn test_list_constraints() { - let mut graph = open_test_graph("test_list_constraint"); + let mut graph = open_empty_test_graph("test_list_constraints"); graph .inner @@ -502,7 +506,7 @@ mod tests { #[test] fn test_slowlog() { - let mut graph = open_test_graph("test_slowlog"); + let mut graph = open_empty_test_graph("test_slowlog"); graph .inner @@ -543,9 +547,9 @@ mod tests { #[test] fn test_explain() { - let mut graph = open_test_graph("test_explain"); + let mut graph = create_test_client().select_graph("imdb"); - let execution_plan = graph.inner.explain("MATCH (a:actor) WITH a MATCH (b:actor) WHERE a.age = b.age AND a <> b RETURN a, collect(b) LIMIT 100").execute().expect("Could not create execution plan"); + let execution_plan = graph.explain("MATCH (a:actor) WITH a MATCH (b:actor) WHERE a.age = b.age AND a <> b RETURN a, collect(b) LIMIT 100").execute().expect("Could not create execution plan"); assert_eq!(execution_plan.plan().len(), 7); assert!(execution_plan.operations().get("Aggregate").is_some()); assert_eq!(execution_plan.operations()["Aggregate"].len(), 1); @@ -558,7 +562,7 @@ mod tests { #[test] fn test_profile() { - let mut graph = open_test_graph("test_profile"); + let mut graph = open_empty_test_graph("test_profile"); let execution_plan = graph .inner diff --git a/src/graph/query_builder.rs b/src/graph/query_builder.rs index 040a84c..849e980 100644 --- a/src/graph/query_builder.rs +++ b/src/graph/query_builder.rs @@ -4,8 +4,10 @@ */ use crate::{ - graph::HasGraphSchema, parser::redis_value_as_vec, AsyncGraph, Constraint, ExecutionPlan, - FalkorDBError, FalkorIndex, FalkorResult, LazyResultSet, QueryResult, SyncGraph, + graph::HasGraphSchema, + parser::{redis_value_as_vec, SchemaParsable}, + AsyncGraph, Constraint, ExecutionPlan, FalkorDBError, FalkorIndex, FalkorResult, LazyResultSet, + QueryResult, SyncGraph, }; use std::{collections::HashMap, fmt::Display, marker::PhantomData, ops::Not}; @@ -349,8 +351,9 @@ impl<'a, Out, G: HasGraphSchema> ProcedureQueryBuilder<'a, Out, G> { } } - fn parse_query_result_of_type>( - res: redis::Value + fn parse_query_result_of_type( + &mut self, + res: redis::Value, ) -> FalkorResult>> { let [header, indices, stats]: [redis::Value; 3] = redis_value_as_vec(res).and_then(|res_vec| { @@ -363,8 +366,12 @@ impl<'a, Out, G: HasGraphSchema> ProcedureQueryBuilder<'a, Out, G> { QueryResult::from_response( Some(header), - redis_value_as_vec(indices) - .map(|indices| indices.into_iter().flat_map(T::try_from).collect())?, + redis_value_as_vec(indices).map(|indices| { + indices + .into_iter() + .flat_map(|res| T::parse(res, self.graph.get_graph_schema_mut())) + .collect() + })?, stats, ) } @@ -446,7 +453,7 @@ impl<'a> ProcedureQueryBuilder<'a, QueryResult>, SyncGraph> { )] pub fn execute(mut self) -> FalkorResult>> { self.common_execute_steps() - .and_then(Self::parse_query_result_of_type) + .and_then(|res| self.parse_query_result_of_type(res)) } } @@ -461,7 +468,7 @@ impl<'a> ProcedureQueryBuilder<'a, QueryResult>, AsyncGraph> { pub async fn execute(mut self) -> FalkorResult>> { self.common_execute_steps() .await - .and_then(Self::parse_query_result_of_type) + .and_then(|res| self.parse_query_result_of_type(res)) } } @@ -474,7 +481,7 @@ impl<'a> ProcedureQueryBuilder<'a, QueryResult>, SyncGraph> { )] pub fn execute(mut self) -> FalkorResult>> { self.common_execute_steps() - .and_then(Self::parse_query_result_of_type) + .and_then(|res| self.parse_query_result_of_type(res)) } } @@ -488,7 +495,7 @@ impl<'a> ProcedureQueryBuilder<'a, QueryResult>, AsyncGraph> { pub async fn execute(mut self) -> FalkorResult>> { self.common_execute_steps() .await - .and_then(Self::parse_query_result_of_type) + .and_then(|res| self.parse_query_result_of_type(res)) } } diff --git a/src/graph_schema/mod.rs b/src/graph_schema/mod.rs index 8d311fd..9f42548 100644 --- a/src/graph_schema/mod.rs +++ b/src/graph_schema/mod.rs @@ -191,6 +191,29 @@ impl GraphSchema { Ok(()) } + pub(crate) fn parse_single_id( + &mut self, + raw_id: i64, + schema_type: SchemaType, + ) -> FalkorResult { + Ok( + match self + .get_id_map_by_schema_type(schema_type) + .get(&raw_id) + .cloned() + { + None => { + self.refresh(schema_type)?; + self.get_id_map_by_schema_type(schema_type) + .get(&raw_id) + .cloned() + .ok_or(FalkorDBError::MissingSchemaId(schema_type))? + } + Some(exists) => exists, + }, + ) + } + #[cfg_attr( feature = "tracing", tracing::instrument(name = "Parse ID Vec To String Vec", skip_all, level = "debug") @@ -204,22 +227,10 @@ impl GraphSchema { raw_ids .into_iter() .try_fold(Vec::with_capacity(raw_ids_len), |mut acc, raw_id| { - let id = redis_value_as_int(raw_id)?; - let value = match self - .get_id_map_by_schema_type(schema_type) - .get(&id) - .cloned() - { - None => { - self.refresh(schema_type)?; - self.get_id_map_by_schema_type(schema_type) - .get(&id) - .cloned() - .ok_or(FalkorDBError::MissingSchemaId(schema_type))? - } - Some(exists) => exists, - }; - acc.push(value); + acc.push( + redis_value_as_int(raw_id) + .and_then(|raw_id| self.parse_single_id(raw_id, schema_type))?, + ); Ok(acc) }) } diff --git a/src/lib.rs b/src/lib.rs index 9b69eba..5988c4a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -98,28 +98,20 @@ pub(crate) mod test_utils { .expect("Could not create client") } - pub(crate) fn open_test_graph(graph_name: &str) -> TestSyncGraphHandle { + pub(crate) fn open_empty_test_graph(graph_name: &str) -> TestSyncGraphHandle { let client = create_test_client(); - client.select_graph(graph_name).delete().ok(); - TestSyncGraphHandle { - inner: client - .copy_graph("imdb", graph_name) - .expect("Could not copy graph for test"), + inner: client.select_graph(graph_name), } } - pub(crate) async fn open_async_test_graph(graph_name: &str) -> TestAsyncGraphHandle { + #[cfg(feature = "tokio")] + pub(crate) async fn open_empty_async_test_graph(graph_name: &str) -> TestAsyncGraphHandle { let client = create_async_test_client().await; - client.select_graph(graph_name).delete().await.ok(); - TestAsyncGraphHandle { - inner: client - .copy_graph("imdb", graph_name) - .await - .expect("Could not copy graph for test"), + inner: client.select_graph(graph_name), } } } diff --git a/src/parser/mod.rs b/src/parser/mod.rs index 25193a8..04b8958 100644 --- a/src/parser/mod.rs +++ b/src/parser/mod.rs @@ -285,18 +285,24 @@ pub(crate) fn parse_type( Ok(acc) }) })?), - // The following types are sent as an array and require specific parsing functions 7 => FalkorValue::Edge(Edge::parse(val, graph_schema)?), 8 => FalkorValue::Node(Node::parse(val, graph_schema)?), 9 => FalkorValue::Path(Path::parse(val, graph_schema)?), 10 => FalkorValue::Map(parse_regular_falkor_map(val, graph_schema)?), 11 => FalkorValue::Point(Point::parse(val)?), - _ => Err(FalkorDBError::ParsingUnknownType)?, + _ => FalkorValue::Unparseable, }; Ok(res) } +pub(crate) trait SchemaParsable: Sized { + fn parse( + value: redis::Value, + graph_schema: &mut GraphSchema, + ) -> FalkorResult; +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/response/constraint.rs b/src/response/constraint.rs index 07ed62a..1f08709 100644 --- a/src/response/constraint.rs +++ b/src/response/constraint.rs @@ -6,9 +6,9 @@ use crate::{ parser::{ parse_falkor_enum, redis_value_as_typed_string, redis_value_as_typed_string_vec, - redis_value_as_vec, + redis_value_as_vec, SchemaParsable, }, - EntityType, FalkorDBError, FalkorResult, + EntityType, FalkorDBError, FalkorResult, GraphSchema, }; /// The type of restriction to apply for the property @@ -50,14 +50,15 @@ pub struct Constraint { pub status: ConstraintStatus, } -impl TryFrom for Constraint { - type Error = FalkorDBError; - +impl SchemaParsable for Constraint { #[cfg_attr( feature = "tracing", tracing::instrument(name = "Parse Constraint", skip_all, level = "info") )] - fn try_from(value: redis::Value) -> FalkorResult { + fn parse( + value: redis::Value, + _: &mut GraphSchema, + ) -> FalkorResult { let [constraint_type_raw, label_raw, properties_raw, entity_type_raw, status_raw]: [redis::Value; 5] = redis_value_as_vec(value) .and_then(|res| res.try_into() .map_err(|_| FalkorDBError::ParsingArrayToStructElementCount("Expected exactly 5 elements in constraint object")))?; diff --git a/src/response/index.rs b/src/response/index.rs index 2d6c492..adb4c43 100644 --- a/src/response/index.rs +++ b/src/response/index.rs @@ -5,10 +5,11 @@ use crate::{ parser::{ - parse_falkor_enum, redis_value_as_string, redis_value_as_typed_string, - redis_value_as_typed_string_vec, redis_value_as_vec, type_val_from_value, + parse_falkor_enum, parse_raw_redis_value, redis_value_as_string, + redis_value_as_typed_string, redis_value_as_typed_string_vec, redis_value_as_vec, + type_val_from_value, SchemaParsable, }, - EntityType, FalkorDBError, FalkorResult, + EntityType, FalkorDBError, FalkorValue, GraphSchema, }; use std::collections::HashMap; @@ -51,7 +52,7 @@ fn parse_types_map(value: redis::Value) -> Result let (val_type_marker, val) = type_val_from_value(val)?; if val_type_marker != 6 { - return Err(FalkorDBError::ParsingString); + return Err(FalkorDBError::ParsingArray); } let val_vec = redis_value_as_vec(val)?; @@ -68,27 +69,6 @@ fn parse_types_map(value: redis::Value) -> Result }) } -fn parse_info_map(value: redis::Value) -> FalkorResult> { - type_val_from_value(value).and_then(|(type_marker, val)| { - if type_marker != 10 { - return Err(FalkorDBError::ParsingMap); - } - - let map_iter = val.into_map_iter().map_err(|_| FalkorDBError::ParsingMap)?; - - let result = map_iter - .into_iter() - .map(|(key, val)| { - let key_str = redis_value_as_typed_string(key)?; - let val_str = redis_value_as_typed_string(val)?; - Ok((key_str, val_str)) - }) - .collect::, FalkorDBError>>()?; - - Ok(result) - }) -} - /// Contains all the info regarding an index on the database #[derive(Clone, Debug, PartialEq)] pub struct FalkorIndex { @@ -107,17 +87,18 @@ pub struct FalkorIndex { /// Words to avoid indexing as they are very common and will just be a waste of resources pub stopwords: Vec, /// Various other information for querying by the user - pub info: HashMap, + pub info: HashMap, } -impl TryFrom for FalkorIndex { - type Error = FalkorDBError; - +impl SchemaParsable for FalkorIndex { #[cfg_attr( feature = "tracing", tracing::instrument(name = "Parse Index", skip_all, level = "info") )] - fn try_from(value: redis::Value) -> Result { + fn parse( + value: redis::Value, + graph_schema: &mut GraphSchema, + ) -> Result { let [label, fields, field_types, language, stopwords, entity_type, status, info] = redis_value_as_vec(value).and_then(|as_vec| { as_vec.try_into().map_err(|_| { @@ -135,7 +116,7 @@ impl TryFrom for FalkorIndex { field_types: parse_types_map(field_types)?, language: redis_value_as_typed_string(language)?, stopwords: redis_value_as_typed_string_vec(stopwords)?, - info: parse_info_map(info)?, + info: parse_raw_redis_value(info, graph_schema).and_then(|val| val.into_map())?, }) } } diff --git a/src/response/lazy_result_set.rs b/src/response/lazy_result_set.rs index b1e5e73..3277e3f 100644 --- a/src/response/lazy_result_set.rs +++ b/src/response/lazy_result_set.rs @@ -3,7 +3,7 @@ * Licensed under the Server Side Public License v1 (SSPLv1). */ -use crate::{parser::parse_raw_redis_value, FalkorValue, GraphSchema}; +use crate::{parser::parse_type, FalkorValue, GraphSchema}; use std::collections::VecDeque; /// A wrapper around the returned raw data, allowing parsing on demand of each result @@ -44,9 +44,103 @@ impl<'a> Iterator for LazyResultSet<'a> { )] fn next(&mut self) -> Option { self.data.pop_front().map(|current_result| { - parse_raw_redis_value(current_result, self.graph_schema) + parse_type(6, current_result, self.graph_schema) .and_then(FalkorValue::into_vec) .unwrap_or(vec![FalkorValue::Unparseable]) }) } } + +#[cfg(test)] +mod tests { + use crate::graph::HasGraphSchema; + use crate::test_utils::create_test_client; + use crate::{Edge, FalkorValue, LazyResultSet, Node}; + use std::collections::HashMap; + + #[test] + fn test_lazy_result_set() { + let client = create_test_client(); + let mut graph = client.select_graph("imdb"); + + let mut result_set = LazyResultSet::new( + vec![ + redis::Value::Bulk(vec![redis::Value::Bulk(vec![ + redis::Value::Int(8), + redis::Value::Bulk(vec![ + redis::Value::Int(203), + redis::Value::Bulk(vec![redis::Value::Int(0)]), + redis::Value::Bulk(vec![redis::Value::Bulk(vec![ + redis::Value::Int(1), + redis::Value::Int(2), + redis::Value::Data("FirstNode".to_string().into_bytes()), + ])]), + ]), + ])]), + redis::Value::Bulk(vec![redis::Value::Bulk(vec![ + redis::Value::Int(8), + redis::Value::Bulk(vec![ + redis::Value::Int(203), + redis::Value::Bulk(vec![redis::Value::Int(0)]), + redis::Value::Bulk(vec![redis::Value::Bulk(vec![ + redis::Value::Int(1), + redis::Value::Int(2), + redis::Value::Data("FirstNode".to_string().into_bytes()), + ])]), + ]), + ])]), + redis::Value::Bulk(vec![redis::Value::Bulk(vec![ + redis::Value::Int(7), + redis::Value::Bulk(vec![ + redis::Value::Int(100), + redis::Value::Int(0), + redis::Value::Int(203), + redis::Value::Int(204), + redis::Value::Bulk(vec![redis::Value::Bulk(vec![ + redis::Value::Int(1), + redis::Value::Int(2), + redis::Value::Data("Edge".to_string().into_bytes()), + ])]), + ]), + ])]), + ], + graph.get_graph_schema_mut(), + ); + + assert_eq!( + result_set.next(), + Some(vec![FalkorValue::Node(Node { + entity_id: 203, + labels: vec!["actor".to_string()], + properties: HashMap::from([( + "name".to_string(), + FalkorValue::String("FirstNode".to_string()) + )]), + })]) + ); + + assert_eq!( + result_set.collect::>(), + vec![ + vec![FalkorValue::Node(Node { + entity_id: 203, + labels: vec!["actor".to_string()], + properties: HashMap::from([( + "name".to_string(), + FalkorValue::String("FirstNode".to_string()) + )]), + })], + vec![FalkorValue::Edge(Edge { + entity_id: 100, + relationship_type: "act".to_string(), + src_node_id: 203, + dst_node_id: 204, + properties: HashMap::from([( + "name".to_string(), + FalkorValue::String("Edge".to_string()) + )]), + })] + ], + ); + } +} diff --git a/src/response/mod.rs b/src/response/mod.rs index f2b182d..048e678 100644 --- a/src/response/mod.rs +++ b/src/response/mod.rs @@ -165,11 +165,11 @@ impl QueryResult { #[cfg(test)] mod tests { - use crate::test_utils::open_test_graph; + use crate::test_utils::open_empty_test_graph; #[test] fn test_get_statistics() { - let mut graph = open_test_graph("imdb_stats_test"); + let mut graph = open_empty_test_graph("imdb_stats_test"); { let query_result = graph .inner diff --git a/src/value/graph_entities.rs b/src/value/graph_entities.rs index cdb353c..817e9ad 100644 --- a/src/value/graph_entities.rs +++ b/src/value/graph_entities.rs @@ -90,14 +90,10 @@ impl Edge { }) })?; - let relationship = graph_schema - .relationships() - .get(&redis_value_as_int(relationship_id_raw)?) - .ok_or(FalkorDBError::MissingSchemaId(SchemaType::Relationships))?; - Ok(Edge { entity_id: redis_value_as_int(entity_id)?, - relationship_type: relationship.to_string(), + relationship_type: redis_value_as_int(relationship_id_raw) + .and_then(|id| graph_schema.parse_single_id(id, SchemaType::Relationships))?, src_node_id: redis_value_as_int(src_node_id)?, dst_node_id: redis_value_as_int(dst_node_id)?, properties: graph_schema.parse_properties_map(properties)?, From d0fccdbe98677f42677677d4b31be2ff9dcf24cd Mon Sep 17 00:00:00 2001 From: Emily Matheys Date: Sun, 16 Jun 2024 11:52:31 +0300 Subject: [PATCH 04/13] Some fixes etc --- .github/workflows/main.yml | 2 +- .github/workflows/pr-checks.yml | 2 +- Cargo.lock | 1 - Cargo.toml | 3 ++- README.md | 14 +++++++++++--- src/client/asynchronous.rs | 7 ++++++- src/client/mod.rs | 2 +- src/connection/asynchronous.rs | 3 ++- src/connection/blocking.rs | 3 ++- src/error/mod.rs | 6 ++++++ src/graph/mod.rs | 4 ++-- 11 files changed, 34 insertions(+), 13 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index e3f1d43..1a1dc85 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -41,7 +41,7 @@ jobs: - uses: taiki-e/install-action@cargo-llvm-cov - uses: taiki-e/install-action@nextest - name: Generate Code Coverage - run: cargo llvm-cov nextest --all --test-threads 8 --codecov --output-path codecov.json + run: cargo llvm-cov nextest --all --codecov --output-path codecov.json - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v4 with: diff --git a/.github/workflows/pr-checks.yml b/.github/workflows/pr-checks.yml index 6171a29..8d2ea94 100644 --- a/.github/workflows/pr-checks.yml +++ b/.github/workflows/pr-checks.yml @@ -52,7 +52,7 @@ jobs: - name: Populate test graph run: pip install falkordb && ./resources/populate_graph.py - name: Test - run: cargo nextest run --all test-threads 8 + run: cargo nextest run --all services: falkordb: image: falkordb/falkordb:edge diff --git a/Cargo.lock b/Cargo.lock index 444578f..724d261 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -138,7 +138,6 @@ dependencies = [ name = "falkordb" version = "0.1.1" dependencies = [ - "log", "parking_lot", "redis", "regex", diff --git a/Cargo.toml b/Cargo.toml index ce34e7f..d6a015e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,6 @@ license = "SSPL-1.0" [dependencies] -log = { version = "0.4.21", default-features = false, features = ["std"] } parking_lot = { version = "0.12.3", default-features = false, features = ["deadlock_detection"] } redis = { version = "0.25.4", default-features = false, features = ["sentinel"] } regex = { version = "1.10.5", default-features = false, features = ["std", "perf", "unicode-bool", "unicode-perl"] } @@ -28,6 +27,8 @@ tokio-rustls = ["tokio", "redis/tokio-rustls-comp"] tracing = ["dep:tracing"] +[[example]] +name = "basic_usage" [[example]] name = "async_api" diff --git a/README.md b/README.md index e597e27..35699db 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,7 @@ for n in nodes.data { ### `tokio` support This client supports nonblocking API using the [`tokio`](https://tokio.rs/) runtime. -The `tokio` features is enabled by default. +The `tokio` feature is enabled by default. Currently, this API requires running within a [`multi_threaded tokio scheduler`](https://docs.rs/tokio/latest/tokio/runtime/index.html#multi-thread-scheduler), and does not support the `current_thread` one, but this will probably be supported in the future. The API uses an almost identical API, but the various functions need to be awaited: @@ -95,7 +95,7 @@ it must be wrapped in an Arc> or something similar. This client is currently built upon the [`redis`](https://docs.rs/redis/latest/redis/) crate, and therefore supports TLS using its implementation, which uses either [`rustls`](https://docs.rs/rustls/latest/rustls/) or [`native_tls`](https://docs.rs/native-tls/latest/native_tls/). -This is not enabled by default, and the user ust opt-in by enabling the respective features: `"rustls"`/`"native-tls"`.\ +This is not enabled by default, and the user ust opt-in by enabling the respective features: `"rustls"`/`"native-tls"` (when using tokio: `"tokio-rustls"`/`"tokio-native-tls"`). For Rustls: @@ -103,12 +103,20 @@ For Rustls: falkordb = { version = "0.1", features = ["rustls"] } ``` -For NativeTLS: +```toml +falkordb = { version = "0.1", features = ["tokio-rustls"] } +``` + +For Native TLS: ```toml falkordb = { version = "0.1", features = ["native-tls"] } ``` +```toml +falkordb = { version = "0.1", features = ["tokio-native-tls"] } +``` + ### Tracing This crate fully supports instrumentation using the [`tracing`](https://docs.rs/tracing/latest/tracing/) crate, to use it, simply, enable the `tracing` feature: diff --git a/src/client/asynchronous.rs b/src/client/asynchronous.rs index aa050bd..ccd93f4 100644 --- a/src/client/asynchronous.rs +++ b/src/client/asynchronous.rs @@ -13,6 +13,7 @@ use crate::{ AsyncGraph, ConfigValue, FalkorConnectionInfo, FalkorDBError, FalkorResult, }; use std::{collections::HashMap, sync::Arc}; +use tokio::runtime::RuntimeFlavor; use tokio::{ runtime::Handle, sync::{mpsc, Mutex, RwLock}, @@ -78,7 +79,11 @@ impl ProvidesSyncConnections for FalkorAsyncClientInner { ) )] fn get_connection(&self) -> FalkorResult { - task::block_in_place(|| Handle::current().block_on(self._inner.lock())).get_connection() + let handle = Handle::try_current().map_err(|_| FalkorDBError::NoRuntime)?; + match handle.runtime_flavor() { + RuntimeFlavor::CurrentThread => Err(FalkorDBError::SingleThreadedRuntime), + _ => task::block_in_place(|| handle.block_on(self._inner.lock())).get_connection(), + } } } diff --git a/src/client/mod.rs b/src/client/mod.rs index c1465e8..690bd8c 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -21,7 +21,7 @@ pub(crate) mod asynchronous; #[allow(clippy::large_enum_variant)] pub(crate) enum FalkorClientProvider { - #[allow(unused)] + #[cfg(test)] None, Redis { diff --git a/src/connection/asynchronous.rs b/src/connection/asynchronous.rs index 2458e91..06c30c9 100644 --- a/src/connection/asynchronous.rs +++ b/src/connection/asynchronous.rs @@ -11,8 +11,9 @@ use std::{collections::HashMap, sync::Arc}; use tokio::sync::mpsc; pub(crate) enum FalkorAsyncConnection { - #[allow(unused)] + #[cfg(test)] None, + Redis(redis::aio::MultiplexedConnection), } diff --git a/src/connection/blocking.rs b/src/connection/blocking.rs index 0396e95..42c3684 100644 --- a/src/connection/blocking.rs +++ b/src/connection/blocking.rs @@ -15,8 +15,9 @@ use std::{ }; pub(crate) enum FalkorSyncConnection { - #[allow(unused)] + #[cfg(test)] None, + Redis(redis::Connection), } diff --git a/src/error/mod.rs b/src/error/mod.rs index ac0940f..ef6368a 100644 --- a/src/error/mod.rs +++ b/src/error/mod.rs @@ -112,6 +112,12 @@ pub enum FalkorDBError { /// Invalid enum string variant was encountered when parsing #[error("Invalid enum string variant was encountered when parsing: {0}")] InvalidEnumType(String), + /// Running in a single-threaded tokio runtime! Running async operations in a blocking context will cause a panic, aborting operation + #[error("Running in a single-threaded tokio runtime! Running async operations in a blocking context will cause a panic, aborting operation")] + SingleThreadedRuntime, + /// No runtime detected, you are trying to run an async operation from a sync context + #[error("No runtime detected, you are trying to run an async operation from a sync context")] + NoRuntime, } impl From for FalkorDBError { diff --git a/src/graph/mod.rs b/src/graph/mod.rs index 99b0c66..d6b1db0 100644 --- a/src/graph/mod.rs +++ b/src/graph/mod.rs @@ -16,7 +16,7 @@ pub trait HasGraphSchema { fn get_graph_schema_mut(&mut self) -> &mut GraphSchema; } -pub fn generate_create_index_query( +pub(crate) fn generate_create_index_query( index_field_type: IndexType, entity_type: EntityType, label: &str, @@ -57,7 +57,7 @@ pub fn generate_create_index_query( ) } -pub fn generate_drop_index_query( +pub(crate) fn generate_drop_index_query( index_field_type: IndexType, entity_type: EntityType, label: &str, From f5d1e1de55357da87d6499efac81ac4904ba0456 Mon Sep 17 00:00:00 2001 From: Emily Matheys Date: Sun, 16 Jun 2024 11:53:09 +0300 Subject: [PATCH 05/13] Hide behind cfg test --- src/client/mod.rs | 3 +++ src/connection/asynchronous.rs | 1 + src/connection/blocking.rs | 1 + 3 files changed, 5 insertions(+) diff --git a/src/client/mod.rs b/src/client/mod.rs index 690bd8c..5f0d06d 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -47,6 +47,7 @@ impl FalkorClientProvider { .get_connection() .map_err(|err| FalkorDBError::RedisError(err.to_string()))?, ), + #[cfg(test)] FalkorClientProvider::None => Err(FalkorDBError::UnavailableProvider)?, }) } @@ -69,6 +70,7 @@ impl FalkorClientProvider { .await .map_err(|err| FalkorDBError::RedisError(err.to_string()))?, ), + #[cfg(test)] FalkorClientProvider::None => Err(FalkorDBError::UnavailableProvider)?, }) } @@ -79,6 +81,7 @@ impl FalkorClientProvider { ) { match self { FalkorClientProvider::Redis { sentinel, .. } => *sentinel = Some(sentinel_client), + #[cfg(test)] FalkorClientProvider::None => {} } } diff --git a/src/connection/asynchronous.rs b/src/connection/asynchronous.rs index 06c30c9..8ba70d0 100644 --- a/src/connection/asynchronous.rs +++ b/src/connection/asynchronous.rs @@ -44,6 +44,7 @@ impl FalkorAsyncConnection { .await .map_err(map_redis_err) } + #[cfg(test)] FalkorAsyncConnection::None => Ok(redis::Value::Nil), } } diff --git a/src/connection/blocking.rs b/src/connection/blocking.rs index 42c3684..87705cd 100644 --- a/src/connection/blocking.rs +++ b/src/connection/blocking.rs @@ -46,6 +46,7 @@ impl FalkorSyncConnection { } redis_conn.req_command(&cmd).map_err(map_redis_err) } + #[cfg(test)] FalkorSyncConnection::None => Ok(redis::Value::Nil), } } From 4d7993ed6dbe49f3c478fe49d55e0af5a9655ece Mon Sep 17 00:00:00 2001 From: Emily Matheys Date: Sun, 16 Jun 2024 11:53:44 +0300 Subject: [PATCH 06/13] No need in connection --- src/connection/asynchronous.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/connection/asynchronous.rs b/src/connection/asynchronous.rs index 8ba70d0..46e99f9 100644 --- a/src/connection/asynchronous.rs +++ b/src/connection/asynchronous.rs @@ -11,9 +11,6 @@ use std::{collections::HashMap, sync::Arc}; use tokio::sync::mpsc; pub(crate) enum FalkorAsyncConnection { - #[cfg(test)] - None, - Redis(redis::aio::MultiplexedConnection), } @@ -44,8 +41,6 @@ impl FalkorAsyncConnection { .await .map_err(map_redis_err) } - #[cfg(test)] - FalkorAsyncConnection::None => Ok(redis::Value::Nil), } } From 0119e5d742595506fa999d3991f2581a076f5347 Mon Sep 17 00:00:00 2001 From: Emily Matheys Date: Sun, 16 Jun 2024 16:20:29 +0300 Subject: [PATCH 07/13] Final touches --- src/parser/mod.rs | 2 +- src/response/lazy_result_set.rs | 2 +- src/value/mod.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/parser/mod.rs b/src/parser/mod.rs index 04b8958..66d4320 100644 --- a/src/parser/mod.rs +++ b/src/parser/mod.rs @@ -290,7 +290,7 @@ pub(crate) fn parse_type( 9 => FalkorValue::Path(Path::parse(val, graph_schema)?), 10 => FalkorValue::Map(parse_regular_falkor_map(val, graph_schema)?), 11 => FalkorValue::Point(Point::parse(val)?), - _ => FalkorValue::Unparseable, + _ => Err(FalkorDBError::ParsingUnknownType)?, }; Ok(res) diff --git a/src/response/lazy_result_set.rs b/src/response/lazy_result_set.rs index 3277e3f..d11b87d 100644 --- a/src/response/lazy_result_set.rs +++ b/src/response/lazy_result_set.rs @@ -46,7 +46,7 @@ impl<'a> Iterator for LazyResultSet<'a> { self.data.pop_front().map(|current_result| { parse_type(6, current_result, self.graph_schema) .and_then(FalkorValue::into_vec) - .unwrap_or(vec![FalkorValue::Unparseable]) + .unwrap_or_else(|err| vec![FalkorValue::Unparseable(err.to_string())]) }) } } diff --git a/src/value/mod.rs b/src/value/mod.rs index 84c9ea3..b32ff05 100644 --- a/src/value/mod.rs +++ b/src/value/mod.rs @@ -40,7 +40,7 @@ pub enum FalkorValue { /// A NULL type None, /// Failed parsing this value - Unparseable, + Unparseable(String), } macro_rules! impl_to_falkordb_value { From 544402e3617c9afc85e5612bc1dd03e4fb3ec2f0 Mon Sep 17 00:00:00 2001 From: Emily Matheys Date: Mon, 17 Jun 2024 14:16:08 +0300 Subject: [PATCH 08/13] Tokio no longer default, removed lock from tx --- .github/workflows/main.yml | 2 +- .github/workflows/pr-checks.yml | 2 +- Cargo.toml | 2 +- src/client/asynchronous.rs | 11 +++++------ src/client/blocking.rs | 10 +++++----- src/graph/query_builder.rs | 6 +++++- 6 files changed, 18 insertions(+), 15 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 1a1dc85..ef2dd38 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -41,7 +41,7 @@ jobs: - uses: taiki-e/install-action@cargo-llvm-cov - uses: taiki-e/install-action@nextest - name: Generate Code Coverage - run: cargo llvm-cov nextest --all --codecov --output-path codecov.json + run: cargo llvm-cov nextest --all --features tokio --codecov --output-path codecov.json - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v4 with: diff --git a/.github/workflows/pr-checks.yml b/.github/workflows/pr-checks.yml index 8d2ea94..8b70ce2 100644 --- a/.github/workflows/pr-checks.yml +++ b/.github/workflows/pr-checks.yml @@ -52,7 +52,7 @@ jobs: - name: Populate test graph run: pip install falkordb && ./resources/populate_graph.py - name: Test - run: cargo nextest run --all + run: cargo nextest run --all --features tokio services: falkordb: image: falkordb/falkordb:edge diff --git a/Cargo.toml b/Cargo.toml index d6a015e..dbc6bac 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,7 @@ tokio = { version = "1.38.0", default-features = false, features = ["macros", "s tracing = { version = "0.1.40", default-features = false, features = ["std", "attributes"], optional = true } [features] -default = ["tokio"] +default = [] native-tls = ["redis/tls-native-tls"] rustls = ["redis/tls-rustls"] diff --git a/src/client/asynchronous.rs b/src/client/asynchronous.rs index ccd93f4..2eec4a8 100644 --- a/src/client/asynchronous.rs +++ b/src/client/asynchronous.rs @@ -13,10 +13,9 @@ use crate::{ AsyncGraph, ConfigValue, FalkorConnectionInfo, FalkorDBError, FalkorResult, }; use std::{collections::HashMap, sync::Arc}; -use tokio::runtime::RuntimeFlavor; use tokio::{ - runtime::Handle, - sync::{mpsc, Mutex, RwLock}, + runtime::{Handle, RuntimeFlavor}, + sync::{mpsc, Mutex}, task, }; @@ -27,7 +26,7 @@ pub struct FalkorAsyncClientInner { _inner: Mutex, connection_pool_size: u8, - connection_pool_tx: RwLock>, + connection_pool_tx: mpsc::Sender, connection_pool_rx: Mutex>, } @@ -51,7 +50,7 @@ impl FalkorAsyncClientInner { .recv() .await .ok_or(FalkorDBError::EmptyConnection)?, - self.connection_pool_tx.read().await.clone(), + self.connection_pool_tx.clone(), pool_owner, )) } @@ -125,7 +124,7 @@ impl FalkorAsyncClient { _inner: client.into(), connection_pool_size: num_connections, - connection_pool_tx: RwLock::new(connection_pool_tx), + connection_pool_tx, connection_pool_rx: Mutex::new(connection_pool_rx), }), _connection_info: connection_info, diff --git a/src/client/blocking.rs b/src/client/blocking.rs index f210301..2669e98 100644 --- a/src/client/blocking.rs +++ b/src/client/blocking.rs @@ -9,7 +9,7 @@ use crate::{ parser::{parse_config_hashmap, redis_value_as_untyped_string_vec}, ConfigValue, FalkorConnectionInfo, FalkorDBError, FalkorResult, SyncGraph, }; -use parking_lot::{Mutex, RwLock}; +use parking_lot::Mutex; use std::{ collections::HashMap, sync::{mpsc, Arc}, @@ -22,7 +22,7 @@ pub(crate) struct FalkorSyncClientInner { _inner: Mutex, connection_pool_size: u8, - connection_pool_tx: RwLock>, + connection_pool_tx: mpsc::SyncSender, connection_pool_rx: Mutex>, } @@ -44,7 +44,7 @@ impl FalkorSyncClientInner { .lock() .recv() .map_err(|_| FalkorDBError::EmptyConnection)?, - self.connection_pool_tx.read().clone(), + self.connection_pool_tx.clone(), pool_owner, )) } @@ -104,7 +104,7 @@ impl FalkorSyncClient { inner: Arc::new(FalkorSyncClientInner { _inner: client.into(), connection_pool_size: num_connections, - connection_pool_tx: RwLock::new(connection_pool_tx), + connection_pool_tx, connection_pool_rx: Mutex::new(connection_pool_rx), }), _connection_info: connection_info, @@ -244,7 +244,7 @@ pub(crate) fn create_empty_inner_sync_client() -> Arc { Arc::new(FalkorSyncClientInner { _inner: Mutex::new(FalkorClientProvider::None), connection_pool_size: 0, - connection_pool_tx: RwLock::new(tx), + connection_pool_tx: tx, connection_pool_rx: Mutex::new(rx), }) } diff --git a/src/graph/query_builder.rs b/src/graph/query_builder.rs index 849e980..8b12dad 100644 --- a/src/graph/query_builder.rs +++ b/src/graph/query_builder.rs @@ -6,11 +6,14 @@ use crate::{ graph::HasGraphSchema, parser::{redis_value_as_vec, SchemaParsable}, - AsyncGraph, Constraint, ExecutionPlan, FalkorDBError, FalkorIndex, FalkorResult, LazyResultSet, + Constraint, ExecutionPlan, FalkorDBError, FalkorIndex, FalkorResult, LazyResultSet, QueryResult, SyncGraph, }; use std::{collections::HashMap, fmt::Display, marker::PhantomData, ops::Not}; +#[cfg(feature = "tokio")] +use crate::AsyncGraph; + #[cfg_attr( feature = "tracing", tracing::instrument(name = "Construct Query", skip_all, level = "trace") @@ -485,6 +488,7 @@ impl<'a> ProcedureQueryBuilder<'a, QueryResult>, SyncGraph> { } } +#[cfg(feature = "tokio")] impl<'a> ProcedureQueryBuilder<'a, QueryResult>, AsyncGraph> { /// Executes the procedure call and return a [`QueryResult`] type containing a result set of [`Constraint`]s /// This functions consumes self From 7acd150b7279786d216709f820b70afa92159da4 Mon Sep 17 00:00:00 2001 From: Emily Matheys Date: Mon, 17 Jun 2024 14:19:58 +0300 Subject: [PATCH 09/13] merge use --- src/value/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/value/mod.rs b/src/value/mod.rs index b32ff05..eb8b9e7 100644 --- a/src/value/mod.rs +++ b/src/value/mod.rs @@ -224,8 +224,7 @@ impl FalkorValue { #[cfg(test)] mod tests { use super::*; - use std::collections::HashMap; - use std::f64::consts::PI; + use std::{collections::HashMap, f64::consts::PI}; #[test] fn test_as_vec() { From 033226bb41763154284629b82641d620639c3c87 Mon Sep 17 00:00:00 2001 From: Emily Matheys Date: Mon, 17 Jun 2024 14:23:35 +0300 Subject: [PATCH 10/13] Spawn the return_to_pool command --- src/connection/asynchronous.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/connection/asynchronous.rs b/src/connection/asynchronous.rs index 46e99f9..7196511 100644 --- a/src/connection/asynchronous.rs +++ b/src/connection/asynchronous.rs @@ -123,7 +123,7 @@ impl BorrowedAsyncConnection { res => res, }; - self.return_to_pool().await; + tokio::spawn(async { self.return_to_pool().await }); res } From f957c20e83a0d2b4af87599d0cd52f08fc8316bd Mon Sep 17 00:00:00 2001 From: Emily Matheys Date: Mon, 17 Jun 2024 14:49:29 +0300 Subject: [PATCH 11/13] Use enum for typemarker --- src/graph_schema/mod.rs | 18 +++--- src/parser/mod.rs | 102 ++++++++++++++++++++++---------- src/response/index.rs | 5 +- src/response/lazy_result_set.rs | 3 +- 4 files changed, 87 insertions(+), 41 deletions(-) diff --git a/src/graph_schema/mod.rs b/src/graph_schema/mod.rs index 9f42548..b43e067 100644 --- a/src/graph_schema/mod.rs +++ b/src/graph_schema/mod.rs @@ -5,7 +5,9 @@ use crate::{ client::ProvidesSyncConnections, - parser::{parse_type, redis_value_as_int, redis_value_as_string, redis_value_as_vec}, + parser::{ + parse_type, redis_value_as_int, redis_value_as_string, redis_value_as_vec, ParserTypeMarker, + }, FalkorDBError, FalkorResult, FalkorValue, }; use std::{collections::HashMap, sync::Arc}; @@ -22,7 +24,7 @@ pub(crate) fn get_refresh_command(schema_type: SchemaType) -> &'static str { #[derive(Debug)] pub(crate) struct FKeyTypeVal { pub(crate) key: i64, - pub(crate) type_marker: i64, + pub(crate) type_marker: ParserTypeMarker, pub(crate) val: redis::Value, } @@ -44,11 +46,13 @@ impl TryFrom for FKeyTypeVal { })?; redis_value_as_int(key_raw).and_then(|key| { - redis_value_as_int(type_raw).map(|type_marker| FKeyTypeVal { - key, - type_marker, - val, - }) + redis_value_as_int(type_raw) + .and_then(ParserTypeMarker::try_from) + .map(|type_marker| FKeyTypeVal { + key, + type_marker, + val, + }) }) } } diff --git a/src/parser/mod.rs b/src/parser/mod.rs index 66d4320..a2f3891 100644 --- a/src/parser/mod.rs +++ b/src/parser/mod.rs @@ -8,6 +8,43 @@ use crate::{ }; use std::collections::HashMap; +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +#[allow(dead_code)] +pub(crate) enum ParserTypeMarker { + None = 1, + String = 2, + I64 = 3, + Bool = 4, + F64 = 5, + Array = 6, + Edge = 7, + Node = 8, + Path = 9, + Map = 10, + Point = 11, +} + +impl TryFrom for ParserTypeMarker { + type Error = FalkorDBError; + + fn try_from(value: i64) -> Result { + Ok(match value { + 1 => Self::None, + 2 => Self::String, + 3 => Self::I64, + 4 => Self::Bool, + 5 => Self::F64, + 6 => Self::Array, + 7 => Self::Edge, + 8 => Self::Node, + 9 => Self::Path, + 10 => Self::Map, + 11 => Self::Point, + _ => Err(FalkorDBError::ParsingUnknownType)?, + }) + } +} + pub(crate) fn redis_value_as_string(value: redis::Value) -> FalkorResult { match value { redis::Value::Data(data) => { @@ -109,7 +146,7 @@ pub(crate) fn parse_falkor_enum TryFrom<&'a str, Error = impl ToStrin ) -> FalkorResult { type_val_from_value(value) .and_then(|(type_marker, val)| { - if type_marker == 2 { + if type_marker == ParserTypeMarker::String { redis_value_as_string(val) } else { Err(FalkorDBError::ParsingArray) @@ -131,7 +168,7 @@ pub(crate) fn parse_falkor_enum TryFrom<&'a str, Error = impl ToStrin )] pub(crate) fn redis_value_as_typed_string(value: redis::Value) -> FalkorResult { type_val_from_value(value).and_then(|(type_marker, val)| { - if type_marker == 2 { + if type_marker == ParserTypeMarker::String { redis_value_as_string(val) } else { Err(FalkorDBError::ParsingString) @@ -146,7 +183,7 @@ pub(crate) fn redis_value_as_typed_string(value: redis::Value) -> FalkorResult FalkorResult> { type_val_from_value(value) .and_then(|(type_marker, val)| { - if type_marker == 6 { + if type_marker == ParserTypeMarker::Array { redis_value_as_vec(val) } else { Err(FalkorDBError::ParsingArray) @@ -226,7 +263,7 @@ pub(crate) fn parse_raw_redis_value( )] pub(crate) fn type_val_from_value( value: redis::Value -) -> Result<(i64, redis::Value), FalkorDBError> { +) -> Result<(ParserTypeMarker, redis::Value), FalkorDBError> { redis_value_as_vec(value).and_then(|val_vec| { val_vec .try_into() @@ -236,7 +273,9 @@ pub(crate) fn type_val_from_value( ) }) .and_then(|[type_marker_raw, val]: [redis::Value; 2]| { - redis_value_as_int(type_marker_raw).map(|type_marker| (type_marker, val)) + redis_value_as_int(type_marker_raw) + .and_then(ParserTypeMarker::try_from) + .map(|type_marker| (type_marker, val)) }) }) } @@ -266,31 +305,32 @@ fn parse_regular_falkor_map( tracing::instrument(name = "Parse Element With Type Marker", skip_all, level = "trace") )] pub(crate) fn parse_type( - type_marker: i64, + type_marker: ParserTypeMarker, val: redis::Value, graph_schema: &mut GraphSchema, ) -> Result { let res = match type_marker { - 1 => FalkorValue::None, - 2 => FalkorValue::String(redis_value_as_string(val)?), - 3 => FalkorValue::I64(redis_value_as_int(val)?), - 4 => FalkorValue::Bool(redis_value_as_bool(val)?), - 5 => FalkorValue::F64(redis_value_as_double(val)?), - 6 => FalkorValue::Array(redis_value_as_vec(val).and_then(|val_vec| { - let len = val_vec.len(); - val_vec - .into_iter() - .try_fold(Vec::with_capacity(len), |mut acc, item| { - acc.push(parse_raw_redis_value(item, graph_schema)?); - Ok(acc) - }) - })?), - 7 => FalkorValue::Edge(Edge::parse(val, graph_schema)?), - 8 => FalkorValue::Node(Node::parse(val, graph_schema)?), - 9 => FalkorValue::Path(Path::parse(val, graph_schema)?), - 10 => FalkorValue::Map(parse_regular_falkor_map(val, graph_schema)?), - 11 => FalkorValue::Point(Point::parse(val)?), - _ => Err(FalkorDBError::ParsingUnknownType)?, + ParserTypeMarker::None => FalkorValue::None, + ParserTypeMarker::String => FalkorValue::String(redis_value_as_string(val)?), + ParserTypeMarker::I64 => FalkorValue::I64(redis_value_as_int(val)?), + ParserTypeMarker::Bool => FalkorValue::Bool(redis_value_as_bool(val)?), + ParserTypeMarker::F64 => FalkorValue::F64(redis_value_as_double(val)?), + ParserTypeMarker::Array => { + FalkorValue::Array(redis_value_as_vec(val).and_then(|val_vec| { + let len = val_vec.len(); + val_vec + .into_iter() + .try_fold(Vec::with_capacity(len), |mut acc, item| { + acc.push(parse_raw_redis_value(item, graph_schema)?); + Ok(acc) + }) + })?) + } + ParserTypeMarker::Edge => FalkorValue::Edge(Edge::parse(val, graph_schema)?), + ParserTypeMarker::Node => FalkorValue::Node(Node::parse(val, graph_schema)?), + ParserTypeMarker::Path => FalkorValue::Path(Path::parse(val, graph_schema)?), + ParserTypeMarker::Map => FalkorValue::Map(parse_regular_falkor_map(val, graph_schema)?), + ParserTypeMarker::Point => FalkorValue::Point(Point::parse(val)?), }; Ok(res) @@ -374,7 +414,7 @@ mod tests { let mut graph = open_readonly_graph_with_modified_schema(); let res = parse_type( - 7, + ParserTypeMarker::Edge, redis::Value::Bulk(vec![ redis::Value::Int(100), // edge id redis::Value::Int(0), // edge type @@ -420,7 +460,7 @@ mod tests { let mut graph = open_readonly_graph_with_modified_schema(); let res = parse_type( - 8, + ParserTypeMarker::Node, redis::Value::Bulk(vec![ redis::Value::Int(51), // node id redis::Value::Bulk(vec![redis::Value::Int(0), redis::Value::Int(1)]), // node type @@ -470,7 +510,7 @@ mod tests { let mut graph = open_readonly_graph_with_modified_schema(); let res = parse_type( - 9, + ParserTypeMarker::Path, redis::Value::Bulk(vec![ redis::Value::Bulk(vec![ redis::Value::Bulk(vec![ @@ -536,7 +576,7 @@ mod tests { let mut graph = open_readonly_graph_with_modified_schema(); let res = parse_type( - 10, + ParserTypeMarker::Map, redis::Value::Bulk(vec![ redis::Value::Status("key0".to_string()), redis::Value::Bulk(vec![ @@ -574,7 +614,7 @@ mod tests { let mut graph = open_readonly_graph_with_modified_schema(); let res = parse_type( - 11, + ParserTypeMarker::Point, redis::Value::Bulk(vec![ redis::Value::Status("102.0".to_string()), redis::Value::Status("15.2".to_string()), diff --git a/src/response/index.rs b/src/response/index.rs index adb4c43..f2a83b9 100644 --- a/src/response/index.rs +++ b/src/response/index.rs @@ -3,6 +3,7 @@ * Licensed under the Server Side Public License v1 (SSPLv1). */ +use crate::parser::ParserTypeMarker; use crate::{ parser::{ parse_falkor_enum, parse_raw_redis_value, redis_value_as_string, @@ -39,7 +40,7 @@ pub enum IndexType { fn parse_types_map(value: redis::Value) -> Result>, FalkorDBError> { type_val_from_value(value).and_then(|(type_marker, val)| { - if type_marker != 10 { + if type_marker != ParserTypeMarker::Map { return Err(FalkorDBError::ParsingMap); } @@ -51,7 +52,7 @@ fn parse_types_map(value: redis::Value) -> Result let key_str = redis_value_as_string(key)?; let (val_type_marker, val) = type_val_from_value(val)?; - if val_type_marker != 6 { + if val_type_marker != ParserTypeMarker::Array { return Err(FalkorDBError::ParsingArray); } diff --git a/src/response/lazy_result_set.rs b/src/response/lazy_result_set.rs index d11b87d..7fb9187 100644 --- a/src/response/lazy_result_set.rs +++ b/src/response/lazy_result_set.rs @@ -3,6 +3,7 @@ * Licensed under the Server Side Public License v1 (SSPLv1). */ +use crate::parser::ParserTypeMarker; use crate::{parser::parse_type, FalkorValue, GraphSchema}; use std::collections::VecDeque; @@ -44,7 +45,7 @@ impl<'a> Iterator for LazyResultSet<'a> { )] fn next(&mut self) -> Option { self.data.pop_front().map(|current_result| { - parse_type(6, current_result, self.graph_schema) + parse_type(ParserTypeMarker::Array, current_result, self.graph_schema) .and_then(FalkorValue::into_vec) .unwrap_or_else(|err| vec![FalkorValue::Unparseable(err.to_string())]) }) From ba73f6aff6bed85776b152434859a75e28335c64 Mon Sep 17 00:00:00 2001 From: Emily Matheys Date: Mon, 17 Jun 2024 14:54:56 +0300 Subject: [PATCH 12/13] Fenced code blank line --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 35699db..f0dc443 100644 --- a/README.md +++ b/README.md @@ -65,6 +65,7 @@ The `tokio` feature is enabled by default. Currently, this API requires running within a [`multi_threaded tokio scheduler`](https://docs.rs/tokio/latest/tokio/runtime/index.html#multi-thread-scheduler), and does not support the `current_thread` one, but this will probably be supported in the future. The API uses an almost identical API, but the various functions need to be awaited: + ```rust use falkordb::{FalkorClientBuilder, FalkorConnectionInfo}; From 55315505f8f0d13816d9946163319761235ce31b Mon Sep 17 00:00:00 2001 From: Emily Matheys Date: Mon, 17 Jun 2024 16:30:35 +0300 Subject: [PATCH 13/13] Update toml --- Cargo.toml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index dbc6bac..49b2898 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,8 +3,17 @@ name = "falkordb" version = "0.1.1" edition = "2021" description = "A FalkorDB Rust client" +homepage = "https://www.falkordb.com/" +readme = "README.md" +repository = "https://github.com/FalkorDB/falkordb-rs" license = "SSPL-1.0" +categories = ["database"] +keywords = ["database", "graph-database", "database-driver", "falkordb"] +[package.metadata.docs.rs] +all-features = true + +[lib] [dependencies] parking_lot = { version = "0.12.3", default-features = false, features = ["deadlock_detection"] } @@ -32,4 +41,4 @@ name = "basic_usage" [[example]] name = "async_api" -required-features = ["tokio"] \ No newline at end of file +required-features = ["tokio"]