From ad3a9b160b6abcbf1553f0feeb4a2f9eab136a3d Mon Sep 17 00:00:00 2001 From: altanozlu Date: Tue, 25 May 2021 17:50:42 +0300 Subject: [PATCH 01/28] parse parameter status and fix crdb error in query! --- sqlx-core/src/postgres/connection/describe.rs | 12 ++++--- .../src/postgres/connection/establish.rs | 16 ++++++++++ sqlx-core/src/postgres/connection/mod.rs | 11 +++++++ sqlx-core/src/postgres/connection/stream.rs | 8 ----- sqlx-core/src/postgres/message/mod.rs | 2 ++ .../src/postgres/message/parameter_status.rs | 32 +++++++++++++++++++ 6 files changed, 68 insertions(+), 13 deletions(-) create mode 100644 sqlx-core/src/postgres/message/parameter_status.rs diff --git a/sqlx-core/src/postgres/connection/describe.rs b/sqlx-core/src/postgres/connection/describe.rs index 097058cef2..4563690551 100644 --- a/sqlx-core/src/postgres/connection/describe.rs +++ b/sqlx-core/src/postgres/connection/describe.rs @@ -400,12 +400,14 @@ SELECT oid FROM pg_catalog.pg_type WHERE typname ILIKE $1 .await?; // patch up our null inference with data from EXPLAIN - let nullable_patch = self - .nullables_from_explain(stmt_id, meta.parameters.len()) - .await?; + if !self.has_parameter_status("crdb_version") { + let nullable_patch = self + .nullables_from_explain(stmt_id, meta.parameters.len()) + .await?; - for (nullable, patch) in nullables.iter_mut().zip(nullable_patch) { - *nullable = patch.or(*nullable); + for (nullable, patch) in nullables.iter_mut().zip(nullable_patch) { + *nullable = patch.or(*nullable); + } } Ok(nullables) diff --git a/sqlx-core/src/postgres/connection/establish.rs b/sqlx-core/src/postgres/connection/establish.rs index 59e3727c24..c40cd1a78b 100644 --- a/sqlx-core/src/postgres/connection/establish.rs +++ b/sqlx-core/src/postgres/connection/establish.rs @@ -4,6 +4,7 @@ use crate::common::StatementCache; use crate::error::Error; use crate::io::Decode; use crate::postgres::connection::{sasl, stream::PgStream, tls}; +use crate::postgres::message::ParameterStatus; use crate::postgres::message::{ Authentication, BackendKeyData, MessageFormat, Password, ReadyForQuery, Startup, }; @@ -56,6 +57,7 @@ impl PgConnection { let mut process_id = 0; let mut secret_key = 0; let transaction_status; + let mut parameter_status_map = HashMap::new(); loop { let message = stream.recv().await?; @@ -116,12 +118,25 @@ impl PgConnection { MessageFormat::ReadyForQuery => { // start-up is completed. The frontend can now issue commands + println!("message.contents.len(): {}", message.contents.len()); transaction_status = ReadyForQuery::decode(message.contents)?.transaction_status; break; } + MessageFormat::ParameterStatus => { + // informs the frontend about the current (initial) + // setting of backend parameters + + let parameter_status: ParameterStatus = message.decode()?; + if parameter_status.key == "crdb_version" { + //Currently all CockroachDB versions requires this + println!("{}", parameter_status.val); + } + parameter_status_map.insert(parameter_status.key, parameter_status.val); + } + _ => { return Err(err_protocol!( "establish: unexpected message: {:?}", @@ -143,6 +158,7 @@ impl PgConnection { cache_type_oid: HashMap::new(), cache_type_info: HashMap::new(), log_settings: options.log_settings.clone(), + parameter_status_map, }) } } diff --git a/sqlx-core/src/postgres/connection/mod.rs b/sqlx-core/src/postgres/connection/mod.rs index e0238f5911..7f73aaab56 100644 --- a/sqlx-core/src/postgres/connection/mod.rs +++ b/sqlx-core/src/postgres/connection/mod.rs @@ -62,6 +62,9 @@ pub struct PgConnection { pub(crate) transaction_depth: usize, log_settings: LogSettings, + + // parameter status info from server + parameter_status_map: HashMap, } impl PgConnection { @@ -82,6 +85,14 @@ impl PgConnection { Ok(()) } + fn get_parameter_status(&self, key: &str) -> Option<&String> { + self.parameter_status_map.get(key) + } + + fn has_parameter_status(&self, key: &str) -> bool { + self.parameter_status_map.contains_key(key) + } + async fn recv_ready_for_query(&mut self) -> Result<(), Error> { let r: ReadyForQuery = self .stream diff --git a/sqlx-core/src/postgres/connection/stream.rs b/sqlx-core/src/postgres/connection/stream.rs index f61bda62b8..0d8017cef1 100644 --- a/sqlx-core/src/postgres/connection/stream.rs +++ b/sqlx-core/src/postgres/connection/stream.rs @@ -104,14 +104,6 @@ impl PgStream { } } - MessageFormat::ParameterStatus => { - // informs the frontend about the current (initial) - // setting of backend parameters - - // we currently have no use for that data so we promptly ignore this message - continue; - } - MessageFormat::NoticeResponse => { // do we need this to be more configurable? // if you are reading this comment and think so, open an issue diff --git a/sqlx-core/src/postgres/message/mod.rs b/sqlx-core/src/postgres/message/mod.rs index 6c8d1f3023..91aa578911 100644 --- a/sqlx-core/src/postgres/message/mod.rs +++ b/sqlx-core/src/postgres/message/mod.rs @@ -14,6 +14,7 @@ mod execute; mod flush; mod notification; mod parameter_description; +mod parameter_status; mod parse; mod password; mod query; @@ -37,6 +38,7 @@ pub use execute::Execute; pub use flush::Flush; pub use notification::Notification; pub use parameter_description::ParameterDescription; +pub use parameter_status::ParameterStatus; pub use parse::Parse; pub use password::Password; pub use query::Query; diff --git a/sqlx-core/src/postgres/message/parameter_status.rs b/sqlx-core/src/postgres/message/parameter_status.rs new file mode 100644 index 0000000000..461df72b22 --- /dev/null +++ b/sqlx-core/src/postgres/message/parameter_status.rs @@ -0,0 +1,32 @@ +use bytes::Bytes; + +use crate::error::Error; +use crate::io::{BufExt, Decode}; + +#[derive(Debug)] +pub struct ParameterStatus { + pub(crate) key: String, + pub(crate) val: String, +} +impl Decode<'_> for ParameterStatus { + #[inline] + fn decode_with(mut buf: Bytes, _: ()) -> Result { + let key = buf.get_str_nul()?.to_owned(); + let val = buf.get_str_nul()?.to_owned(); + + Ok(Self { key, val }) + } +} + +#[test] +fn test_decode_parameter_status_response() { + const PARAMETER_STATUS_RESPONSE: &[u8] = b"crdb_version\0CockroachDB CCL v21.1.0 (x86_64-unknown-linux-gnu, built 2021/05/17 13:49:40, go1.15.11)\0"; + + let message = ParameterStatus::decode(Bytes::from(PARAMETER_STATUS_RESPONSE)).unwrap(); + + assert_eq!(message.key, "crdb_version"); + assert_eq!( + message.val, + "CockroachDB CCL v21.1.0 (x86_64-unknown-linux-gnu, built 2021/05/17 13:49:40, go1.15.11)" + ); +} From e5b8681680bf3d5345f253b6ee14bfb982793789 Mon Sep 17 00:00:00 2001 From: altanozlu Date: Tue, 25 May 2021 17:52:44 +0300 Subject: [PATCH 02/28] unnecessary code --- sqlx-core/src/postgres/connection/establish.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/sqlx-core/src/postgres/connection/establish.rs b/sqlx-core/src/postgres/connection/establish.rs index c40cd1a78b..05e3cdaaf9 100644 --- a/sqlx-core/src/postgres/connection/establish.rs +++ b/sqlx-core/src/postgres/connection/establish.rs @@ -118,7 +118,6 @@ impl PgConnection { MessageFormat::ReadyForQuery => { // start-up is completed. The frontend can now issue commands - println!("message.contents.len(): {}", message.contents.len()); transaction_status = ReadyForQuery::decode(message.contents)?.transaction_status; @@ -130,10 +129,6 @@ impl PgConnection { // setting of backend parameters let parameter_status: ParameterStatus = message.decode()?; - if parameter_status.key == "crdb_version" { - //Currently all CockroachDB versions requires this - println!("{}", parameter_status.val); - } parameter_status_map.insert(parameter_status.key, parameter_status.val); } From 4986ea2e5971ce1ab561bb84aa6be9c880a1d68e Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 31 May 2021 00:52:25 +0200 Subject: [PATCH 03/28] Improve root README.md and sqlx-cli/README.md (#1262) * readme: Fix inconsistent list style * readme: Improve text alignment * readme: Fix missing links * readme: Consistently use code formatting for runtime & TLS crates and dedup links * readme: Add SQLx is not an ORM section * readme: Improve documentation about offline mode --- README.md | 50 ++++++++++++++++++++++++++++++++++++---------- sqlx-cli/README.md | 17 +++++++++++----- 2 files changed, 51 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index df8a0e056d..7b14e4574b 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,7 @@
- Built with ❤️ by The LaunchBadge team + Built with ❤️ by The LaunchBadge team

@@ -59,20 +59,23 @@ SQLx is an async, pure Rust SQL crate featuring compile-time check - **Truly Asynchronous**. Built from the ground-up using async/await for maximum concurrency. -- **Type-safe SQL** (if you want it) without DSLs. Use the `query!()` macro to check your SQL and bind parameters at - compile time. (You can still use dynamic SQL queries if you like.) +- **Compile-time checked queries** (if you want). See [SQLx is not an ORM](#sqlx-is-not-an-orm). - **Database Agnostic**. Support for [PostgreSQL], [MySQL], [SQLite], and [MSSQL]. - **Pure Rust**. The Postgres and MySQL/MariaDB drivers are written in pure Rust using **zero** unsafe†† code. -* **Runtime Agnostic**. Works on different runtimes ([async-std](https://crates.io/crates/async-std) / [tokio](https://crates.io/crates/tokio) / [actix](https://crates.io/crates/actix-rt)) and TLS backends ([native-tls](https://crates.io/crates/native-tls), [rustls](https://crates.io/crates/rustls)). +- **Runtime Agnostic**. Works on different runtimes ([`async-std`] / [`tokio`] / [`actix`]) and TLS backends ([`native-tls`], [`rustls`]). -† The SQLite driver uses the libsqlite3 C library as SQLite is an embedded database (the only way -we could be pure Rust for SQLite is by porting _all_ of SQLite to Rust). + -†† SQLx uses `#![forbid(unsafe_code)]` unless the `sqlite` feature is enabled. As the SQLite driver interacts -with C, those interactions are `unsafe`. +† The SQLite driver uses the libsqlite3 C library as SQLite is an embedded database (the only way +we could be pure Rust for SQLite is by porting _all_ of SQLite to Rust). + +†† SQLx uses `#![forbid(unsafe_code)]` unless the `sqlite` feature is enabled. As the SQLite driver interacts +with C, those interactions are `unsafe`. + + [postgresql]: http://postgresql.org/ [sqlite]: https://sqlite.org/ @@ -108,6 +111,8 @@ SQLx is compatible with the [`async-std`], [`tokio`] and [`actix`] runtimes; and [`async-std`]: https://github.com/async-rs/async-std [`tokio`]: https://github.com/tokio-rs/tokio [`actix`]: https://github.com/actix/actix-net +[`native-tls`]: https://crates.io/crates/native-tls +[`rustls`]: https://crates.io/crates/rustls ```toml # Cargo.toml @@ -118,7 +123,7 @@ sqlx = { version = "0.5", features = [ "runtime-tokio-rustls" ] } sqlx = { version = "0.5", features = [ "runtime-async-std-native-tls" ] } ``` -The runtime and TLS backend not being separate feature sets to select is a workaround for a [Cargo issue](https://github.com/rust-lang/cargo/issues/3494). +The runtime and TLS backend not being separate feature sets to select is a workaround for a [Cargo issue](https://github.com/rust-lang/cargo/issues/3494). #### Cargo Feature Flags @@ -168,6 +173,24 @@ sqlx = { version = "0.5", features = [ "runtime-async-std-native-tls" ] } - `tls`: Add support for TLS connections. +## SQLx is not an ORM! + +SQLx supports **compile-time checked queries**. It does not, however, do this by providing a Rust +API or DSL (domain-specific language) for building queries. Instead, it provides macros that take +regular SQL as an input and ensure that it is valid for your database. The way this works is that +SQLx connects to your development DB at compile time to have the database itself verify (and return +some info on) your SQL queries. This has some potentially surprising implications: + +- Since SQLx never has to parse the SQL string itself, any syntax that the development DB accepts + can be used (including things added by database extensions) +- Due to the different amount of information databases let you retrieve about queries, the extent of + SQL verification you get from the query macros depends on the database + +**If you are looking for an (asynchronous) ORM,** you can check out [`ormx`], which is built on top +of SQLx. + +[`ormx`]: https://crates.io/crates/ormx + ## Usage ### Quickstart @@ -336,8 +359,8 @@ Differences from `query()`: ``` The biggest downside to `query!()` is that the output type cannot be named (due to Rust not -officially supporting anonymous records). To address that, there is a `query_as!()` macro that is identical -except that you can name the output type. +officially supporting anonymous records). To address that, there is a `query_as!()` macro that is +mostly identical except that you can name the output type. ```rust // no traits are needed @@ -359,6 +382,11 @@ WHERE organization = ? // countries[0].count ``` +To avoid the need of having a development database around to compile the project even when no +modifications (to the database-accessing parts of the code) are done, you can enable "offline mode" +to cache the results of the SQL query analysis using the `sqlx` command-line tool. See +[sqlx-cli/README.md](./sqlx-cli/README.md#enable-building-in-offline-mode-with-query). + ## Safety This crate uses `#![forbid(unsafe_code)]` to ensure everything is implemented in 100% Safe Rust. diff --git a/sqlx-cli/README.md b/sqlx-cli/README.md index 0c47941c4e..f098dad8ab 100644 --- a/sqlx-cli/README.md +++ b/sqlx-cli/README.md @@ -49,17 +49,19 @@ $ sqlx migrate run Compares the migration history of the running database against the `migrations/` folder and runs any scripts that are still pending. -#### Enable building in "offline" mode with `query!()` +#### Enable building in "offline mode" with `query!()` Note: must be run as `cargo sqlx`. ```bash cargo sqlx prepare ``` -Saves query data to `sqlx-data.json` in the current directory; check this file into version control -and an active database connection will no longer be needed to build your project. -Has no effect unless the `offline` feature of `sqlx` is enabled in your project. Omitting that feature is the most likely cause if you get a `sqlx-data.json` file that looks like this: +Saves query metadata to `sqlx-data.json` in the current directory; check this file into version +control and an active database connection will no longer be needed to build your project. + +Has no effect unless the `offline` feature of `sqlx` is enabled in your project. Omitting that +feature is the most likely cause if you get a `sqlx-data.json` file that looks like this: ```json { @@ -67,10 +69,12 @@ Has no effect unless the `offline` feature of `sqlx` is enabled in your project. } ``` ----- +--- + ```bash cargo sqlx prepare --check ``` + Exits with a nonzero exit status if the data in `sqlx-data.json` is out of date with the current database schema and queries in the project. Intended for use in Continuous Integration. @@ -79,3 +83,6 @@ database schema and queries in the project. Intended for use in Continuous Integ To make sure an accidentally-present `DATABASE_URL` environment variable or `.env` file does not result in `cargo build` (trying to) access the database, you can set the `SQLX_OFFLINE` environment variable to `true`. + +If you want to make this the default, just add it to your `.env` file. `cargo sqlx prepare` will +still do the right thing and connect to the database. From 358b80f62e84392c5efbfd4fa1e21a44c0705e42 Mon Sep 17 00:00:00 2001 From: Rohan Sharma Date: Wed, 2 Jun 2021 00:50:12 +0530 Subject: [PATCH 04/28] Rename _expr to expr (#1264) --- sqlx-macros/src/query/args.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/sqlx-macros/src/query/args.rs b/sqlx-macros/src/query/args.rs index b46b7f664e..71094a67d2 100644 --- a/sqlx-macros/src/query/args.rs +++ b/sqlx-macros/src/query/args.rs @@ -79,13 +79,13 @@ pub fn quote_args( use ::sqlx::ty_match::{WrapSameExt as _, MatchBorrowExt as _}; // evaluate the expression only once in case it contains moves - let _expr = ::sqlx::ty_match::dupe_value(#name); + let expr = ::sqlx::ty_match::dupe_value(#name); - // if `_expr` is `Option`, get `Option<$ty>`, otherwise `$ty` - let ty_check = ::sqlx::ty_match::WrapSame::<#param_ty, _>::new(&_expr).wrap_same(); + // if `expr` is `Option`, get `Option<$ty>`, otherwise `$ty` + let ty_check = ::sqlx::ty_match::WrapSame::<#param_ty, _>::new(&expr).wrap_same(); - // if `_expr` is `&str`, convert `String` to `&str` - let (mut _ty_check, match_borrow) = ::sqlx::ty_match::MatchBorrow::new(ty_check, &_expr); + // if `expr` is `&str`, convert `String` to `&str` + let (mut _ty_check, match_borrow) = ::sqlx::ty_match::MatchBorrow::new(ty_check, &expr); _ty_check = match_borrow.match_borrow(); From e33e4510fcab184f6bb655be3ba811e7f8a5b63f Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 16 Jun 2021 00:12:49 +0200 Subject: [PATCH 05/28] Fix error message about wildcard overrides (#1276) Co-authored-by: Austin Bonander --- sqlx-macros/src/query/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sqlx-macros/src/query/mod.rs b/sqlx-macros/src/query/mod.rs index eed89967f0..91c706dc42 100644 --- a/sqlx-macros/src/query/mod.rs +++ b/sqlx-macros/src/query/mod.rs @@ -303,7 +303,8 @@ where for rust_col in &columns { if rust_col.type_.is_wildcard() { return Err( - "columns may not have wildcard overrides in `query!()` or `query_as!()" + "wildcard overrides are only allowed with an explicit record type, \ + e.g. `query_as!()` and its variants" .into(), ); } From bb330f8e68933ee5bd8a301e5f1e72b2b6ed98bf Mon Sep 17 00:00:00 2001 From: Austin Bonander Date: Wed, 14 Jul 2021 14:29:42 -0700 Subject: [PATCH 06/28] feat(docs): add an FAQ (#1319) --- FAQ.md | 173 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ README.md | 6 ++ 2 files changed, 179 insertions(+) create mode 100644 FAQ.md diff --git a/FAQ.md b/FAQ.md new file mode 100644 index 0000000000..9e4b5d1c64 --- /dev/null +++ b/FAQ.md @@ -0,0 +1,173 @@ +SQLx Frequently Asked Questions +=============================== + +---------------------------------------------------------------- +### How can I do a `SELECT ... WHERE foo IN (...)` query? + + +In 0.6 SQLx will support binding arrays as a comma-separated list for every database, +but unfortunately there's no general solution for that currently in SQLx itself. +You would need to manually generate the query, at which point it +cannot be used with the macros. + +However, **in Postgres** you can work around this limitation by binding the arrays directly and using `= ANY()`: + +```rust +let db: PgPool = /* ... */; +let foo_ids: Vec = vec![/* ... */]; + +let foos = sqlx::query!( + "SELECT * FROM foo WHERE id = ANY($1)", + // a bug of the parameter typechecking code requires all array parameters to be slices + &foo_ids[..] +) + .fetch_all(&db) + .await?; +``` + +Even when SQLx gains generic placeholder expansion for arrays, this will still be the optimal way to do it for Postgres, +as comma-expansion means each possible length of the array generates a different query +(and represents a combinatorial explosion if more than one array is used). + +See also: [Postgres Manual, Section 9.24: Row and Array Comparisons](https://www.postgresql.org/docs/current/functions-comparisons.html) + +----- +### How can I bind an array to a `VALUES()` clause? How can I do bulk inserts? + +Like the above, SQLx currently does not support this in the general case right now but will in 0.6. + +However, **Postgres** also has a feature to save the day here! You can pass an array to `UNNEST()` and +it will treat it as a temporary table: + +```rust +let foo_texts: Vec = vec![/* ... */]; + +sqlx::query!( + // because `UNNEST()` is a generic function, Postgres needs the cast on the parameter here + // in order to know what type to expect there when preparing the query + "INSERT INTO foo(text_column) SELECT * FROM UNNEST($1::text[])", + &foo_texts[..] +) + .execute(&db) + .await?; +``` + +`UNNEST()` can also take more than one array, in which case it'll treat each array as a column in the temporary table: + +```rust +// this solution currently requires each column to be its own vector +// in 0.6 we're aiming to allow binding iterators directly as arrays +// so you can take a vector of structs and bind iterators mapping to each field +let foo_texts: Vec = vec![/* ... */]; +let foo_bools: Vec = vec![/* ... */]; +let foo_ints: Vec = vec![/* ... */]; + +sqlx::query!( + " + INSERT INTO foo(text_column, bool_column, int_column) + SELECT * FROM UNNEST($1::text[], $2::bool[], $3::int8[]]) + ", + &foo_texts[..], + &foo_bools[..], + &foo_ints[..] +) + .execute(&db) + .await?; +``` + +Again, even with comma-expanded lists in 0.6 this will likely still be the most performant way to run bulk inserts +with Postgres--at least until we get around to implementing an interface for `COPY FROM STDIN`, though +this solution with `UNNEST()` will still be more flexible as you can use it in queries that are more complex +than just inserting into a table. + +Note that if some vectors are shorter than others, `UNNEST` will fill the corresponding columns with `NULL`s +to match the longest vector. + +For example, if `foo_texts` is length 5, `foo_bools` is length 4, `foo_ints` is length 3, the resulting table will +look like this: + +| Row # | `text_column` | `bool_column` | `int_column` | +| ----- | -------------- | -------------- | ------------- | +| 1 | `foo_texts[0]` | `foo_bools[0]` | `foo_ints[0]` | +| 2 | `foo_texts[1]` | `foo_bools[1]` | `foo_ints[1]` | +| 3 | `foo_texts[2]` | `foo_bools[2]` | `foo_ints[2]` | +| 4 | `foo_texts[3]` | `foo_bools[3]` | `NULL` | +| 5 | `foo_texts[4]` | `NULL` | `NULL` | + +See Also: +* [Postgres Manual, Section 7.2.1.4: Table Functions](https://www.postgresql.org/docs/current/queries-table-expressions.html#QUERIES-TABLEFUNCTIONS) +* [Postgres Manual, Section 9.19: Array Functions and Operators](https://www.postgresql.org/docs/current/functions-array.html) + +---- +### How do I compile with the macros without needing a database, e.g. in CI? + +The macros support an offline mode which saves data for existing queries to a JSON file, +so the macros can just read that file instead of talking to a database. + +See the following: + +* [the docs for `query!()`](https://docs.rs/sqlx/0.5.5/sqlx/macro.query.html#offline-mode-requires-the-offline-feature) +* [the README for `sqlx-cli`](sqlx-cli/README.md#enable-building-in-offline-mode-with-query) + +To keep `sqlx-data.json` up-to-date you need to run `cargo sqlx prepare` before every commit that +adds or changes a query; you can do this with a Git pre-commit hook: + +```shell +$ echo "cargo sqlx prepare > /dev/null 2>&1; git add sqlx-data.json > /dev/null" > .git/hooks/pre-commit +``` + +Note that this may make committing take some time as it'll cause your project to be recompiled, and +as an ergonomic choice it does _not_ block committing if `cargo sqlx prepare` fails. + +We're working on a way for the macros to save their data to the filesystem automatically which should be part of SQLx 0.6, +so your pre-commit hook would then just need to stage the changed files. + +---- + +### How do the query macros work under the hood? + +The macros work by talking to the database at compile time. When a(n) SQL client asks to create a prepared statement +from a query string, the response from the server typically includes information about the following: + +* the number of bind parameters, and their expected types if the database is capable of inferring that +* the number, names and types of result columns, as well as the original table and columns names before aliasing + +In MySQL/MariaDB, we also get boolean flag signaling if a column is `NOT NULL`, however +in Postgres and SQLite, we need to do a bit more work to determine whether a column can be `NULL` or not. + +After preparing, the Postgres driver will first look up the result columns in their source table and check if they have +a `NOT NULL` constraint. Then, it will execute `EXPLAIN (VERBOSE, FORMAT JSON) ` to determine which columns +come from half-open joins (LEFT and RIGHT joins), which makes a normally `NOT NULL` column nullable. Since the +`EXPLAIN VERBOSE` format is not stable or completely documented, this inference isn't perfect. However, it does err on +the side of producing false-positives (marking a column nullable when it's `NOT NULL`) to avoid errors at runtime. + +If you do encounter false-positives please feel free to open an issue; make sure to include your query, any relevant +schema as well as the output of `EXPLAIN (VERBOSE, FORMAT JSON) ` which will make this easier to debug. + +The SQLite driver will pull the bytecode of the prepared statement and step through it to find any instructions +that produce a null value for any column in the output. + +--- +### Why can't SQLx just look at my database schema/migrations and parse the SQL itself? + +Take a moment and think of the effort that would be required to do that. + +To implement this for a single database driver, SQLx would need to: + +* know how to parse SQL, and not just standard SQL but the specific dialect of that particular database +* know how to analyze and typecheck SQL queries in the context of the original schema +* if inferring schema from migrations it would need to simulate all the schema-changing effects of those migrations + +This is effectively reimplementing a good chunk of the database server's frontend, + +_and_ maintaining and ensuring correctness of that reimplementation, + +including bugs and idiosyncrasies, + +for the foreseeable future, + +for _every_ database we intend to support. + +Even Sisyphus would pity us. + +--------- diff --git a/README.md b/README.md index 7b14e4574b..069c01c601 100644 --- a/README.md +++ b/README.md @@ -55,6 +55,12 @@
+
+
Have a question? Be sure to check the FAQ first!
+
+ +
+ SQLx is an async, pure Rust SQL crate featuring compile-time checked queries without a DSL. - **Truly Asynchronous**. Built from the ground-up using async/await for maximum concurrency. From 6c8f61f07cd56c5c2fd30902322d3180ae2dff05 Mon Sep 17 00:00:00 2001 From: Austin Bonander Date: Fri, 16 Jul 2021 12:57:52 -0700 Subject: [PATCH 07/28] doc(faq): mention how to invert `= ANY($1)` --- FAQ.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/FAQ.md b/FAQ.md index 9e4b5d1c64..5cd1c96301 100644 --- a/FAQ.md +++ b/FAQ.md @@ -29,6 +29,14 @@ Even when SQLx gains generic placeholder expansion for arrays, this will still b as comma-expansion means each possible length of the array generates a different query (and represents a combinatorial explosion if more than one array is used). +Note that you can use any operator that returns a boolean, but beware that `!= ANY($1)` is **not equivalent** to `NOT IN (...)` as it effectively works like this: + +`lhs != ANY(rhs) -> lhs != rhs[0] OR lhs != rhs[1] OR ... lhs != rhs[length(rhs) - 1]` + +The equivalent of `NOT IN (...)` would be `!= ALL($1)`: + +`lhs != ALL(rhs) -> lhs != rhs[0] AND lhs != rhs[1] AND ... lhs != rhs[length(rhs) - 1]` + See also: [Postgres Manual, Section 9.24: Row and Array Comparisons](https://www.postgresql.org/docs/current/functions-comparisons.html) ----- From b54adfad7cbf88235141b1f139e6312c142f2027 Mon Sep 17 00:00:00 2001 From: Austin Bonander Date: Fri, 16 Jul 2021 13:01:42 -0700 Subject: [PATCH 08/28] doc(faq): empty array cases for `ANY` and `ALL` --- FAQ.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/FAQ.md b/FAQ.md index 5cd1c96301..7d78128196 100644 --- a/FAQ.md +++ b/FAQ.md @@ -31,12 +31,15 @@ as comma-expansion means each possible length of the array generates a different Note that you can use any operator that returns a boolean, but beware that `!= ANY($1)` is **not equivalent** to `NOT IN (...)` as it effectively works like this: -`lhs != ANY(rhs) -> lhs != rhs[0] OR lhs != rhs[1] OR ... lhs != rhs[length(rhs) - 1]` +`lhs != ANY(rhs) -> false OR lhs != rhs[0] OR lhs != rhs[1] OR ... lhs != rhs[length(rhs) - 1]` The equivalent of `NOT IN (...)` would be `!= ALL($1)`: `lhs != ALL(rhs) -> lhs != rhs[0] AND lhs != rhs[1] AND ... lhs != rhs[length(rhs) - 1]` +Note that `ANY` with any operator with an empty array will return `false`, thus the leading `false OR ...`, +while `ALL` with an empty array will return `true`, thus the leading `true AND ...`. + See also: [Postgres Manual, Section 9.24: Row and Array Comparisons](https://www.postgresql.org/docs/current/functions-comparisons.html) ----- From 820498919195961b34eb654846637abb8a7540ea Mon Sep 17 00:00:00 2001 From: Austin Bonander Date: Fri, 16 Jul 2021 13:03:50 -0700 Subject: [PATCH 09/28] doc(faq): fix wording for empty cases --- FAQ.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/FAQ.md b/FAQ.md index 7d78128196..5f2c25190a 100644 --- a/FAQ.md +++ b/FAQ.md @@ -35,10 +35,10 @@ Note that you can use any operator that returns a boolean, but beware that `!= A The equivalent of `NOT IN (...)` would be `!= ALL($1)`: -`lhs != ALL(rhs) -> lhs != rhs[0] AND lhs != rhs[1] AND ... lhs != rhs[length(rhs) - 1]` +`lhs != ALL(rhs) -> true AND lhs != rhs[0] AND lhs != rhs[1] AND ... lhs != rhs[length(rhs) - 1]` -Note that `ANY` with any operator with an empty array will return `false`, thus the leading `false OR ...`, -while `ALL` with an empty array will return `true`, thus the leading `true AND ...`. +Note that `ANY` using any operator and passed an empty array will return `false`, thus the leading `false OR ...`. +Meanwhile, `ALL` with any operator and passed an empty array will return `true`, thus the leading `true AND ...`. See also: [Postgres Manual, Section 9.24: Row and Array Comparisons](https://www.postgresql.org/docs/current/functions-comparisons.html) From 0abbcc510f4b1b6eb015986941b3dec2e80f0abd Mon Sep 17 00:00:00 2001 From: Akhil Velagapudi <4@4khil.com> Date: Fri, 16 Jul 2021 15:25:32 -0700 Subject: [PATCH 10/28] Update crc 1.8.1 -> 2.0.0 (#1256) --- Cargo.lock | 18 +++++++++--------- sqlx-core/Cargo.toml | 2 +- sqlx-core/src/mysql/migrate.rs | 4 ++-- sqlx-core/src/postgres/migrate.rs | 4 ++-- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bab48c5036..f8a42fa104 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -346,12 +346,6 @@ dependencies = [ "serde", ] -[[package]] -name = "build_const" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4ae4235e6dac0694637c763029ecea1a2ec9e4e06ec2729bd21ba4d9c863eb7" - [[package]] name = "bumpalo" version = "3.6.1" @@ -523,13 +517,19 @@ dependencies = [ [[package]] name = "crc" -version = "1.8.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d663548de7f5cca343f1e0a48d14dcfb0e9eb4e079ec58883b7251539fa10aeb" +checksum = "10c2722795460108a7872e1cd933a85d6ec38abc4baecad51028f702da28889f" dependencies = [ - "build_const", + "crc-catalog", ] +[[package]] +name = "crc-catalog" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccaeedb56da03b09f598226e25e80088cb4cd25f316e6e4df7d695f0feeb1403" + [[package]] name = "criterion" version = "0.3.4" diff --git a/sqlx-core/Cargo.toml b/sqlx-core/Cargo.toml index bfa3f5f9ae..654f9b152f 100644 --- a/sqlx-core/Cargo.toml +++ b/sqlx-core/Cargo.toml @@ -109,7 +109,7 @@ bitflags = { version = "1.2.1", default-features = false } bytes = "1.0.0" byteorder = { version = "1.3.4", default-features = false, features = ["std"] } chrono = { version = "0.4.11", default-features = false, features = ["clock"], optional = true } -crc = { version = "1.8.1", optional = true } +crc = { version = "2.0.0", optional = true } crossbeam-queue = "0.3.1" crossbeam-channel = "0.5.0" crossbeam-utils = { version = "0.8.1", default-features = false } diff --git a/sqlx-core/src/mysql/migrate.rs b/sqlx-core/src/mysql/migrate.rs index 248fd6298e..c3898e9fc2 100644 --- a/sqlx-core/src/mysql/migrate.rs +++ b/sqlx-core/src/mysql/migrate.rs @@ -8,7 +8,6 @@ use crate::mysql::{MySql, MySqlConnectOptions, MySqlConnection}; use crate::query::query; use crate::query_as::query_as; use crate::query_scalar::query_scalar; -use crc::crc32; use futures_core::future::BoxFuture; use std::str::FromStr; use std::time::Duration; @@ -266,9 +265,10 @@ async fn current_database(conn: &mut MySqlConnection) -> Result String { + const CRC_IEEE: crc::Crc = crc::Crc::::new(&crc::CRC_32_ISO_HDLC); // 0x3d32ad9e chosen by fair dice roll format!( "{:x}", - 0x3d32ad9e * (crc32::checksum_ieee(database_name.as_bytes()) as i64) + 0x3d32ad9e * (CRC_IEEE.checksum(database_name.as_bytes()) as i64) ) } diff --git a/sqlx-core/src/postgres/migrate.rs b/sqlx-core/src/postgres/migrate.rs index 142918a69e..7a7127624a 100644 --- a/sqlx-core/src/postgres/migrate.rs +++ b/sqlx-core/src/postgres/migrate.rs @@ -8,7 +8,6 @@ use crate::postgres::{PgConnectOptions, PgConnection, Postgres}; use crate::query::query; use crate::query_as::query_as; use crate::query_scalar::query_scalar; -use crc::crc32; use futures_core::future::BoxFuture; use std::str::FromStr; use std::time::Duration; @@ -281,6 +280,7 @@ async fn current_database(conn: &mut PgConnection) -> Result i64 { + const CRC_IEEE: crc::Crc = crc::Crc::::new(&crc::CRC_32_ISO_HDLC); // 0x3d32ad9e chosen by fair dice roll - 0x3d32ad9e * (crc32::checksum_ieee(database_name.as_bytes()) as i64) + 0x3d32ad9e * (CRC_IEEE.checksum(database_name.as_bytes()) as i64) } From be189bd11e6bdd14c45c70bdad477e780a82b050 Mon Sep 17 00:00:00 2001 From: nomick Date: Tue, 20 Jul 2021 01:55:53 +0200 Subject: [PATCH 11/28] Support MACADDR in Postgres (#1329) --- Cargo.lock | 25 ++++++++ Cargo.toml | 2 + sqlx-core/Cargo.toml | 2 + sqlx-core/src/postgres/type_info.rs | 2 + sqlx-core/src/postgres/types/mac_address.rs | 63 +++++++++++++++++++++ sqlx-core/src/postgres/types/mod.rs | 11 ++++ sqlx-core/src/types/mod.rs | 7 +++ sqlx-macros/Cargo.toml | 1 + sqlx-macros/src/database/postgres.rs | 6 ++ tests/postgres/types.rs | 17 ++++++ 10 files changed, 136 insertions(+) create mode 100644 sqlx-core/src/postgres/types/mac_address.rs diff --git a/Cargo.lock b/Cargo.lock index f8a42fa104..4f165142eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,5 +1,7 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "actix-rt" version = "2.2.0" @@ -1270,6 +1272,16 @@ dependencies = [ "value-bag", ] +[[package]] +name = "mac_address" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d9bb26482176bddeea173ceaa2acec85146d20cdcc631eafaf9d605d3d4fc23" +dependencies = [ + "nix", + "winapi", +] + [[package]] name = "maplit" version = "1.0.2" @@ -1375,6 +1387,18 @@ dependencies = [ "tempfile", ] +[[package]] +name = "nix" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ccba0cfe4fdf15982d1674c69b1fd80bad427d293849982668dfe454bd61f2" +dependencies = [ + "bitflags", + "cc", + "cfg-if 1.0.0", + "libc", +] + [[package]] name = "nom" version = "6.1.2" @@ -2343,6 +2367,7 @@ dependencies = [ "libc", "libsqlite3-sys", "log", + "mac_address", "md-5", "memchr", "num-bigint 0.3.2", diff --git a/Cargo.toml b/Cargo.toml index 2861b2024e..8db3a7c021 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,6 +59,7 @@ all-types = [ "time", "chrono", "ipnetwork", + "mac_address", "uuid", "bit-vec", "bstr", @@ -121,6 +122,7 @@ bigdecimal = ["sqlx-core/bigdecimal", "sqlx-macros/bigdecimal"] decimal = ["sqlx-core/decimal", "sqlx-macros/decimal"] chrono = ["sqlx-core/chrono", "sqlx-macros/chrono"] ipnetwork = ["sqlx-core/ipnetwork", "sqlx-macros/ipnetwork"] +mac_address = ["sqlx-core/mac_address", "sqlx-macros/mac_address"] uuid = ["sqlx-core/uuid", "sqlx-macros/uuid"] json = ["sqlx-core/json", "sqlx-macros/json"] time = ["sqlx-core/time", "sqlx-macros/time"] diff --git a/sqlx-core/Cargo.toml b/sqlx-core/Cargo.toml index 654f9b152f..9d2ee65d8e 100644 --- a/sqlx-core/Cargo.toml +++ b/sqlx-core/Cargo.toml @@ -54,6 +54,7 @@ all-types = [ "bigdecimal", "decimal", "ipnetwork", + "mac_address", "json", "uuid", "bit-vec", @@ -125,6 +126,7 @@ hex = "0.4.2" hmac = { version = "0.10.1", default-features = false, optional = true } itoa = "0.4.5" ipnetwork = { version = "0.17.0", default-features = false, optional = true } +mac_address = { version = "1.1", default-features = false, optional = true } libc = "0.2.71" libsqlite3-sys = { version = "0.22.0", optional = true, default-features = false, features = [ "pkg-config", diff --git a/sqlx-core/src/postgres/type_info.rs b/sqlx-core/src/postgres/type_info.rs index 6f85364a85..b5e5f98ea7 100644 --- a/sqlx-core/src/postgres/type_info.rs +++ b/sqlx-core/src/postgres/type_info.rs @@ -198,6 +198,8 @@ impl PgTypeInfo { .contains(self) { Some("ipnetwork") + } else if [PgTypeInfo::MACADDR].contains(self) { + Some("mac_address") } else if [PgTypeInfo::NUMERIC, PgTypeInfo::NUMERIC_ARRAY].contains(self) { Some("bigdecimal") } else { diff --git a/sqlx-core/src/postgres/types/mac_address.rs b/sqlx-core/src/postgres/types/mac_address.rs new file mode 100644 index 0000000000..37bd543217 --- /dev/null +++ b/sqlx-core/src/postgres/types/mac_address.rs @@ -0,0 +1,63 @@ +use mac_address::MacAddress; + +use std::convert::TryInto; + +use crate::decode::Decode; +use crate::encode::{Encode, IsNull}; +use crate::error::BoxDynError; +use crate::postgres::{PgArgumentBuffer, PgTypeInfo, PgValueFormat, PgValueRef, Postgres}; +use crate::types::Type; + +impl Type for MacAddress { + fn type_info() -> PgTypeInfo { + PgTypeInfo::MACADDR + } + + fn compatible(ty: &PgTypeInfo) -> bool { + *ty == PgTypeInfo::MACADDR + } +} + +impl Type for [MacAddress] { + fn type_info() -> PgTypeInfo { + PgTypeInfo::MACADDR_ARRAY + } +} + +impl Type for Vec { + fn type_info() -> PgTypeInfo { + <[MacAddress] as Type>::type_info() + } + + fn compatible(ty: &PgTypeInfo) -> bool { + <[MacAddress] as Type>::compatible(ty) + } +} + +impl Encode<'_, Postgres> for MacAddress { + fn encode_by_ref(&self, buf: &mut PgArgumentBuffer) -> IsNull { + buf.extend_from_slice(&self.bytes()); // write just the address + IsNull::No + } + + fn size_hint(&self) -> usize { + 6 + } +} + +impl Decode<'_, Postgres> for MacAddress { + fn decode(value: PgValueRef<'_>) -> Result { + let bytes = match value.format() { + PgValueFormat::Binary => value.as_bytes()?, + PgValueFormat::Text => { + return Ok(value.as_str()?.parse()?); + } + }; + + if bytes.len() == 6 { + return Ok(MacAddress::new(bytes.try_into().unwrap())); + } + + Err("invalid data received when expecting an MACADDR".into()) + } +} diff --git a/sqlx-core/src/postgres/types/mod.rs b/sqlx-core/src/postgres/types/mod.rs index 3827d9dc6e..066a8c2309 100644 --- a/sqlx-core/src/postgres/types/mod.rs +++ b/sqlx-core/src/postgres/types/mod.rs @@ -73,6 +73,14 @@ //! |---------------------------------------|------------------------------------------------------| //! | `ipnetwork::IpNetwork` | INET, CIDR | //! +//! ### [`mac_address`](https://crates.io/crates/mac_address) +//! +//! Requires the `mac_address` Cargo feature flag. +//! +//! | Rust type | Postgres type(s) | +//! |---------------------------------------|------------------------------------------------------| +//! | `mac_address::MacAddress` | MACADDR | +//! //! ### [`bit-vec`](https://crates.io/crates/bit-vec) //! //! Requires the `bit-vec` Cargo feature flag. @@ -194,6 +202,9 @@ mod json; #[cfg(feature = "ipnetwork")] mod ipnetwork; +#[cfg(feature = "mac_address")] +mod mac_address; + #[cfg(feature = "bit-vec")] mod bit_vec; diff --git a/sqlx-core/src/types/mod.rs b/sqlx-core/src/types/mod.rs index 600daf0fdd..2bf4e3b5d2 100644 --- a/sqlx-core/src/types/mod.rs +++ b/sqlx-core/src/types/mod.rs @@ -75,6 +75,13 @@ pub mod ipnetwork { pub use ipnetwork::{IpNetwork, Ipv4Network, Ipv6Network}; } +#[cfg(feature = "mac_address")] +#[cfg_attr(docsrs, doc(cfg(feature = "mac_address")))] +pub mod mac_address { + #[doc(no_inline)] + pub use mac_address::MacAddress; +} + #[cfg(feature = "json")] pub use json::Json; diff --git a/sqlx-macros/Cargo.toml b/sqlx-macros/Cargo.toml index b76185ed53..014f3a5611 100644 --- a/sqlx-macros/Cargo.toml +++ b/sqlx-macros/Cargo.toml @@ -72,6 +72,7 @@ decimal = ["sqlx-core/decimal"] chrono = ["sqlx-core/chrono"] time = ["sqlx-core/time"] ipnetwork = ["sqlx-core/ipnetwork"] +mac_address = ["sqlx-core/mac_address"] uuid = ["sqlx-core/uuid"] bit-vec = ["sqlx-core/bit-vec"] json = ["sqlx-core/json", "serde_json"] diff --git a/sqlx-macros/src/database/postgres.rs b/sqlx-macros/src/database/postgres.rs index 05f0a88bd6..5330bb3cd9 100644 --- a/sqlx-macros/src/database/postgres.rs +++ b/sqlx-macros/src/database/postgres.rs @@ -60,6 +60,9 @@ impl_database_ext! { #[cfg(feature = "ipnetwork")] sqlx::types::ipnetwork::IpNetwork, + #[cfg(feature = "mac_address")] + sqlx::types::mac_address::MacAddress, + #[cfg(feature = "json")] serde_json::Value, @@ -113,6 +116,9 @@ impl_database_ext! { #[cfg(feature = "ipnetwork")] Vec | &[sqlx::types::ipnetwork::IpNetwork], + #[cfg(feature = "mac_address")] + Vec | &[sqlx::types::mac_address::MacAddress], + #[cfg(feature = "json")] Vec | &[serde_json::Value], diff --git a/tests/postgres/types.rs b/tests/postgres/types.rs index a0aa64eb69..5ae5bd217e 100644 --- a/tests/postgres/types.rs +++ b/tests/postgres/types.rs @@ -167,6 +167,14 @@ test_type!(ipnetwork(Postgres, .unwrap(), )); +#[cfg(feature = "mac_address")] +test_type!(mac_address(Postgres, + "'00:01:02:03:04:05'::macaddr" + == "00:01:02:03:04:05" + .parse::() + .unwrap() +)); + #[cfg(feature = "bit-vec")] test_type!(bitvec( Postgres, @@ -201,6 +209,15 @@ test_type!(ipnetwork_vec>(Postgres, ] )); +#[cfg(feature = "mac_address")] +test_type!(mac_address_vec>(Postgres, + "'{01:02:03:04:05:06,FF:FF:FF:FF:FF:FF}'::inet[]" + == vec![ + "01:02:03:04:05:06".parse::().unwrap(), + "FF:FF:FF:FF:FF:FF".parse::().unwrap() + ] +)); + #[cfg(feature = "chrono")] mod chrono { use super::*; From 8bcac0394f021069fac8e2ed9bea31279655b506 Mon Sep 17 00:00:00 2001 From: Austin Bonander Date: Wed, 21 Jul 2021 16:27:00 -0700 Subject: [PATCH 12/28] fix(mysql): implement type traits for `chrono::DateTime` (#1335) closes #1222 --- sqlx-core/src/mysql/types/chrono.rs | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/sqlx-core/src/mysql/types/chrono.rs b/sqlx-core/src/mysql/types/chrono.rs index 5a261804bf..76e8b2985d 100644 --- a/sqlx-core/src/mysql/types/chrono.rs +++ b/sqlx-core/src/mysql/types/chrono.rs @@ -1,7 +1,7 @@ use std::convert::TryFrom; use bytes::Buf; -use chrono::{DateTime, Datelike, NaiveDate, NaiveDateTime, NaiveTime, Timelike, Utc}; +use chrono::{DateTime, Datelike, Local, NaiveDate, NaiveDateTime, NaiveTime, Timelike, Utc}; use crate::decode::Decode; use crate::encode::{Encode, IsNull}; @@ -21,12 +21,14 @@ impl Type for DateTime { } } +/// Note: assumes the connection's `time_zone` is set to `+00:00` (UTC). impl Encode<'_, MySql> for DateTime { fn encode_by_ref(&self, buf: &mut Vec) -> IsNull { Encode::::encode(&self.naive_utc(), buf) } } +/// Note: assumes the connection's `time_zone` is set to `+00:00` (UTC). impl<'r> Decode<'r, MySql> for DateTime { fn decode(value: MySqlValueRef<'r>) -> Result { let naive: NaiveDateTime = Decode::::decode(value)?; @@ -35,6 +37,30 @@ impl<'r> Decode<'r, MySql> for DateTime { } } +impl Type for DateTime { + fn type_info() -> MySqlTypeInfo { + MySqlTypeInfo::binary(ColumnType::Timestamp) + } + + fn compatible(ty: &MySqlTypeInfo) -> bool { + matches!(ty.r#type, ColumnType::Datetime | ColumnType::Timestamp) + } +} + +/// Note: assumes the connection's `time_zone` is set to `+00:00` (UTC). +impl Encode<'_, MySql> for DateTime { + fn encode_by_ref(&self, buf: &mut Vec) -> IsNull { + Encode::::encode(&self.naive_utc(), buf) + } +} + +/// Note: assumes the connection's `time_zone` is set to `+00:00` (UTC). +impl<'r> Decode<'r, MySql> for DateTime { + fn decode(value: MySqlValueRef<'r>) -> Result { + Ok( as Decode<'r, MySql>>::decode(value)?.with_timezone(&Local)) + } +} + impl Type for NaiveTime { fn type_info() -> MySqlTypeInfo { MySqlTypeInfo::binary(ColumnType::Time) From cb3ff287210edad62f32dc362bf33e335711701e Mon Sep 17 00:00:00 2001 From: marshoepial <54693067+marshoepial@users.noreply.github.com> Date: Wed, 21 Jul 2021 19:27:47 -0400 Subject: [PATCH 13/28] Keep track of column typing in SQLite EXPLAIN parsing (#1323) * NewRowid, Column opcodes, better pointer handling * Implement tracking of column typing on sqlite explain parser * fmt for sqlite column typing for explain parsing Co-authored-by: marshoepial --- sqlx-core/src/sqlite/connection/explain.rs | 116 +++++++++++++++++---- tests/sqlite/describe.rs | 15 +++ 2 files changed, 112 insertions(+), 19 deletions(-) diff --git a/sqlx-core/src/sqlite/connection/explain.rs b/sqlx-core/src/sqlite/connection/explain.rs index 3797a3d0bb..14df95e6ac 100644 --- a/sqlx-core/src/sqlite/connection/explain.rs +++ b/sqlx-core/src/sqlite/connection/explain.rs @@ -17,6 +17,13 @@ const SQLITE_AFF_REAL: u8 = 0x45; /* 'E' */ const OP_INIT: &str = "Init"; const OP_GOTO: &str = "Goto"; const OP_COLUMN: &str = "Column"; +const OP_MAKE_RECORD: &str = "MakeRecord"; +const OP_INSERT: &str = "Insert"; +const OP_IDX_INSERT: &str = "IdxInsert"; +const OP_OPEN_READ: &str = "OpenRead"; +const OP_OPEN_WRITE: &str = "OpenWrite"; +const OP_OPEN_EPHEMERAL: &str = "OpenEphemeral"; +const OP_OPEN_AUTOINDEX: &str = "OpenAutoindex"; const OP_AGG_STEP: &str = "AggStep"; const OP_FUNCTION: &str = "Function"; const OP_MOVE: &str = "Move"; @@ -34,6 +41,7 @@ const OP_BLOB: &str = "Blob"; const OP_VARIABLE: &str = "Variable"; const OP_COUNT: &str = "Count"; const OP_ROWID: &str = "Rowid"; +const OP_NEWROWID: &str = "NewRowid"; const OP_OR: &str = "Or"; const OP_AND: &str = "And"; const OP_BIT_AND: &str = "BitAnd"; @@ -48,6 +56,21 @@ const OP_REMAINDER: &str = "Remainder"; const OP_CONCAT: &str = "Concat"; const OP_RESULT_ROW: &str = "ResultRow"; +#[derive(Debug, Clone, Eq, PartialEq)] +enum RegDataType { + Single(DataType), + Record(Vec), +} + +impl RegDataType { + fn map_to_datatype(self) -> DataType { + match self { + RegDataType::Single(d) => d, + RegDataType::Record(_) => DataType::Null, //If we're trying to coerce to a regular Datatype, we can assume a Record is invalid for the context + } + } +} + #[allow(clippy::wildcard_in_or_patterns)] fn affinity_to_type(affinity: u8) -> DataType { match affinity { @@ -73,13 +96,19 @@ fn opcode_to_type(op: &str) -> DataType { } } +// Opcode Reference: https://sqlite.org/opcode.html pub(super) async fn explain( conn: &mut SqliteConnection, query: &str, ) -> Result<(Vec, Vec>), Error> { - let mut r = HashMap::::with_capacity(6); + // Registers + let mut r = HashMap::::with_capacity(6); + // Map between pointer and register let mut r_cursor = HashMap::>::with_capacity(6); + // Rows that pointers point to + let mut p = HashMap::>::with_capacity(6); + // Nullable columns let mut n = HashMap::::with_capacity(6); let program = @@ -119,15 +148,52 @@ pub(super) async fn explain( } OP_COLUMN => { - r_cursor.entry(p1).or_default().push(p3); + //Get the row stored at p1, or NULL; get the column stored at p2, or NULL + if let Some(record) = p.get(&p1) { + if let Some(col) = record.get(&p2) { + // insert into p3 the datatype of the col + r.insert(p3, RegDataType::Single(*col)); + // map between pointer p1 and register p3 + r_cursor.entry(p1).or_default().push(p3); + } else { + r.insert(p3, RegDataType::Single(DataType::Null)); + } + } else { + r.insert(p3, RegDataType::Single(DataType::Null)); + } + } + + OP_MAKE_RECORD => { + // p3 = Record([p1 .. p1 + p2]) + let mut record = Vec::with_capacity(p2 as usize); + for reg in p1..p1 + p2 { + record.push( + r.get(®) + .map(|d| d.clone().map_to_datatype()) + .unwrap_or(DataType::Null), + ); + } + r.insert(p3, RegDataType::Record(record)); + } + + OP_INSERT | OP_IDX_INSERT => { + if let Some(RegDataType::Record(record)) = r.get(&p2) { + if let Some(row) = p.get_mut(&p1) { + // Insert the record into wherever pointer p1 is + *row = (0..).zip(record.iter().copied()).collect(); + } + } + //Noop if the register p2 isn't a record, or if pointer p1 does not exist + } - // r[p3] = - r.insert(p3, DataType::Null); + OP_OPEN_READ | OP_OPEN_WRITE | OP_OPEN_EPHEMERAL | OP_OPEN_AUTOINDEX => { + //Create a new pointer which is referenced by p1 + p.insert(p1, HashMap::with_capacity(6)); } OP_VARIABLE => { // r[p2] = - r.insert(p2, DataType::Null); + r.insert(p2, RegDataType::Single(DataType::Null)); n.insert(p3, true); } @@ -136,7 +202,7 @@ pub(super) async fn explain( match from_utf8(p4).map_err(Error::protocol)? { "last_insert_rowid(0)" => { // last_insert_rowid() -> INTEGER - r.insert(p3, DataType::Int64); + r.insert(p3, RegDataType::Single(DataType::Int64)); n.insert(p3, n.get(&p3).copied().unwrap_or(false)); } @@ -145,9 +211,9 @@ pub(super) async fn explain( } OP_NULL_ROW => { - // all values of cursor X are potentially nullable - for column in &r_cursor[&p1] { - n.insert(*column, true); + // all registers that map to cursor X are potentially nullable + for register in &r_cursor[&p1] { + n.insert(*register, true); } } @@ -156,9 +222,9 @@ pub(super) async fn explain( if p4.starts_with("count(") { // count(_) -> INTEGER - r.insert(p3, DataType::Int64); + r.insert(p3, RegDataType::Single(DataType::Int64)); n.insert(p3, n.get(&p3).copied().unwrap_or(false)); - } else if let Some(v) = r.get(&p2).copied() { + } else if let Some(v) = r.get(&p2).cloned() { // r[p3] = AGG ( r[p2] ) r.insert(p3, v); let val = n.get(&p2).copied().unwrap_or(true); @@ -169,13 +235,13 @@ pub(super) async fn explain( OP_CAST => { // affinity(r[p1]) if let Some(v) = r.get_mut(&p1) { - *v = affinity_to_type(p2 as u8); + *v = RegDataType::Single(affinity_to_type(p2 as u8)); } } OP_COPY | OP_MOVE | OP_SCOPY | OP_INT_COPY => { // r[p2] = r[p1] - if let Some(v) = r.get(&p1).copied() { + if let Some(v) = r.get(&p1).cloned() { r.insert(p2, v); if let Some(null) = n.get(&p1).copied() { @@ -184,15 +250,16 @@ pub(super) async fn explain( } } - OP_OR | OP_AND | OP_BLOB | OP_COUNT | OP_REAL | OP_STRING8 | OP_INTEGER | OP_ROWID => { + OP_OR | OP_AND | OP_BLOB | OP_COUNT | OP_REAL | OP_STRING8 | OP_INTEGER | OP_ROWID + | OP_NEWROWID => { // r[p2] = - r.insert(p2, opcode_to_type(&opcode)); + r.insert(p2, RegDataType::Single(opcode_to_type(&opcode))); n.insert(p2, n.get(&p2).copied().unwrap_or(false)); } OP_NOT => { // r[p2] = NOT r[p1] - if let Some(a) = r.get(&p1).copied() { + if let Some(a) = r.get(&p1).cloned() { r.insert(p2, a); let val = n.get(&p1).copied().unwrap_or(true); n.insert(p2, val); @@ -202,9 +269,16 @@ pub(super) async fn explain( OP_BIT_AND | OP_BIT_OR | OP_SHIFT_LEFT | OP_SHIFT_RIGHT | OP_ADD | OP_SUBTRACT | OP_MULTIPLY | OP_DIVIDE | OP_REMAINDER | OP_CONCAT => { // r[p3] = r[p1] + r[p2] - match (r.get(&p1).copied(), r.get(&p2).copied()) { + match (r.get(&p1).cloned(), r.get(&p2).cloned()) { (Some(a), Some(b)) => { - r.insert(p3, if matches!(a, DataType::Null) { b } else { a }); + r.insert( + p3, + if matches!(a, RegDataType::Single(DataType::Null)) { + b + } else { + a + }, + ); } (Some(v), None) => { @@ -252,7 +326,11 @@ pub(super) async fn explain( if let Some(result) = result { for i in result { - output.push(SqliteTypeInfo(r.remove(&i).unwrap_or(DataType::Null))); + output.push(SqliteTypeInfo( + r.remove(&i) + .map(|d| d.map_to_datatype()) + .unwrap_or(DataType::Null), + )); nullable.push(n.remove(&i)); } } diff --git a/tests/sqlite/describe.rs b/tests/sqlite/describe.rs index 02d935a1bd..90d59284ea 100644 --- a/tests/sqlite/describe.rs +++ b/tests/sqlite/describe.rs @@ -171,6 +171,21 @@ async fn it_describes_insert_with_read_only() -> anyhow::Result<()> { Ok(()) } +#[sqlx_macros::test] +async fn it_describes_insert_with_returning() -> anyhow::Result<()> { + let mut conn = new::().await?; + + let d = conn + .describe("INSERT INTO tweet (id, text) VALUES (2, 'Hello') RETURNING *") + .await?; + + assert_eq!(d.columns().len(), 4); + assert_eq!(d.column(0).type_info().name(), "INTEGER"); + assert_eq!(d.column(1).type_info().name(), "TEXT"); + + Ok(()) +} + #[sqlx_macros::test] async fn it_describes_bad_statement() -> anyhow::Result<()> { let mut conn = new::().await?; From 531740550fc6443657740be9902b2ae088fed816 Mon Sep 17 00:00:00 2001 From: Austin Bonander Date: Wed, 21 Jul 2021 16:28:44 -0700 Subject: [PATCH 14/28] fix(pool): reimplement pool internals with `futures-intrusive` (#1320) --- Cargo.lock | 12 ++ sqlx-core/Cargo.toml | 1 + sqlx-core/src/pool/connection.rs | 42 ++-- sqlx-core/src/pool/inner.rs | 348 +++++++++++++------------------ sqlx-core/src/pool/mod.rs | 4 +- sqlx-core/src/pool/options.rs | 12 +- tests/postgres/postgres.rs | 15 +- 7 files changed, 201 insertions(+), 233 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4f165142eb..817f5ffdfe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -883,6 +883,17 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-intrusive" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62007592ac46aa7c2b6416f7deb9a8a8f63a01e0f1d6e1787d5630170db2b63e" +dependencies = [ + "futures-core", + "lock_api", + "parking_lot", +] + [[package]] name = "futures-io" version = "0.3.15" @@ -2356,6 +2367,7 @@ dependencies = [ "encoding_rs", "futures-channel", "futures-core", + "futures-intrusive", "futures-util", "generic-array", "git2", diff --git a/sqlx-core/Cargo.toml b/sqlx-core/Cargo.toml index 9d2ee65d8e..468a77a528 100644 --- a/sqlx-core/Cargo.toml +++ b/sqlx-core/Cargo.toml @@ -120,6 +120,7 @@ encoding_rs = { version = "0.8.23", optional = true } either = "1.5.3" futures-channel = { version = "0.3.5", default-features = false, features = ["sink", "alloc", "std"] } futures-core = { version = "0.3.5", default-features = false } +futures-intrusive = "0.4.0" futures-util = { version = "0.3.5", features = ["sink"] } generic-array = { version = "0.14.4", default-features = false, optional = true } hex = "0.4.2" diff --git a/sqlx-core/src/pool/connection.rs b/sqlx-core/src/pool/connection.rs index 732c1a8c92..415fd0c1ec 100644 --- a/sqlx-core/src/pool/connection.rs +++ b/sqlx-core/src/pool/connection.rs @@ -1,13 +1,16 @@ -use super::inner::{DecrementSizeGuard, SharedPool}; -use crate::connection::Connection; -use crate::database::Database; -use crate::error::Error; -use sqlx_rt::spawn; use std::fmt::{self, Debug, Formatter}; use std::ops::{Deref, DerefMut}; use std::sync::Arc; use std::time::Instant; +use futures_intrusive::sync::SemaphoreReleaser; + +use crate::connection::Connection; +use crate::database::Database; +use crate::error::Error; + +use super::inner::{DecrementSizeGuard, SharedPool}; + /// A connection managed by a [`Pool`][crate::pool::Pool]. /// /// Will be returned to the pool on-drop. @@ -28,8 +31,8 @@ pub(super) struct Idle { /// RAII wrapper for connections being handled by functions that may drop them pub(super) struct Floating<'p, C> { - inner: C, - guard: DecrementSizeGuard<'p>, + pub(super) inner: C, + pub(super) guard: DecrementSizeGuard<'p>, } const DEREF_ERR: &str = "(bug) connection already released to pool"; @@ -71,7 +74,7 @@ impl Drop for PoolConnection { fn drop(&mut self) { if let Some(live) = self.live.take() { let pool = self.pool.clone(); - spawn(async move { + sqlx_rt::spawn(async move { let mut floating = live.float(&pool); // test the connection on-release to ensure it is still viable @@ -102,7 +105,8 @@ impl Live { pub fn float(self, pool: &SharedPool) -> Floating<'_, Self> { Floating { inner: self, - guard: DecrementSizeGuard::new(pool), + // create a new guard from a previously leaked permit + guard: DecrementSizeGuard::new_permit(pool), } } @@ -161,6 +165,11 @@ impl<'s, DB: Database> Floating<'s, Live> { } } + pub async fn close(self) -> Result<(), Error> { + // `guard` is dropped as intended + self.inner.raw.close().await + } + pub fn detach(self) -> DB::Connection { self.inner.raw } @@ -174,10 +183,14 @@ impl<'s, DB: Database> Floating<'s, Live> { } impl<'s, DB: Database> Floating<'s, Idle> { - pub fn from_idle(idle: Idle, pool: &'s SharedPool) -> Self { + pub fn from_idle( + idle: Idle, + pool: &'s SharedPool, + permit: SemaphoreReleaser<'s>, + ) -> Self { Self { inner: idle, - guard: DecrementSizeGuard::new(pool), + guard: DecrementSizeGuard::from_permit(pool, permit), } } @@ -192,9 +205,12 @@ impl<'s, DB: Database> Floating<'s, Idle> { } } - pub async fn close(self) -> Result<(), Error> { + pub async fn close(self) -> DecrementSizeGuard<'s> { // `guard` is dropped as intended - self.inner.live.raw.close().await + if let Err(e) = self.inner.live.raw.close().await { + log::debug!("error occurred while closing the pool connection: {}", e); + } + self.guard } } diff --git a/sqlx-core/src/pool/inner.rs b/sqlx-core/src/pool/inner.rs index f9e5df43b3..e297537e36 100644 --- a/sqlx-core/src/pool/inner.rs +++ b/sqlx-core/src/pool/inner.rs @@ -6,6 +6,7 @@ use crate::error::Error; use crate::pool::{deadline_as_timeout, PoolOptions}; use crossbeam_queue::{ArrayQueue, SegQueue}; use futures_core::task::{Poll, Waker}; +use futures_intrusive::sync::{Semaphore, SemaphoreReleaser}; use futures_util::future; use std::cmp; use std::mem; @@ -15,12 +16,16 @@ use std::sync::{Arc, Weak}; use std::task::Context; use std::time::{Duration, Instant}; -type Waiters = SegQueue>; +/// Ihe number of permits to release to wake all waiters, such as on `SharedPool::close()`. +/// +/// This should be large enough to realistically wake all tasks waiting on the pool without +/// potentially overflowing the permits count in the semaphore itself. +const WAKE_ALL_PERMITS: usize = usize::MAX / 2; pub(crate) struct SharedPool { pub(super) connect_options: ::Options, pub(super) idle_conns: ArrayQueue>, - waiters: Waiters, + pub(super) semaphore: Semaphore, pub(super) size: AtomicU32, is_closed: AtomicBool, pub(super) options: PoolOptions, @@ -31,10 +36,18 @@ impl SharedPool { options: PoolOptions, connect_options: ::Options, ) -> Arc { + let capacity = options.max_connections as usize; + + // ensure the permit count won't overflow if we release `WAKE_ALL_PERMITS` + // this assert should never fire on 64-bit targets as `max_connections` is a u32 + let _ = capacity + .checked_add(WAKE_ALL_PERMITS) + .expect("max_connections exceeds max capacity of the pool"); + let pool = Self { connect_options, - idle_conns: ArrayQueue::new(options.max_connections as usize), - waiters: SegQueue::new(), + idle_conns: ArrayQueue::new(capacity), + semaphore: Semaphore::new(options.fair, capacity), size: AtomicU32::new(0), is_closed: AtomicBool::new(false), options, @@ -61,148 +74,133 @@ impl SharedPool { } pub(super) async fn close(&self) { - self.is_closed.store(true, Ordering::Release); - while let Some(waker) = self.waiters.pop() { - if let Some(waker) = waker.upgrade() { - waker.wake(); - } + let already_closed = self.is_closed.swap(true, Ordering::AcqRel); + + if !already_closed { + // if we were the one to mark this closed, release enough permits to wake all waiters + // we can't just do `usize::MAX` because that would overflow + // and we can't do this more than once cause that would _also_ overflow + self.semaphore.release(WAKE_ALL_PERMITS); } - // ensure we wait until the pool is actually closed - while self.size() > 0 { - if let Some(idle) = self.idle_conns.pop() { - if let Err(e) = Floating::from_idle(idle, self).close().await { - log::warn!("error occurred while closing the pool connection: {}", e); - } - } + // wait for all permits to be released + let _permits = self + .semaphore + .acquire(WAKE_ALL_PERMITS + (self.options.max_connections as usize)) + .await; - // yield to avoid starving the executor - sqlx_rt::yield_now().await; + while let Some(idle) = self.idle_conns.pop() { + idle.live.float(self).close().await; } } #[inline] - pub(super) fn try_acquire(&self) -> Option>> { - // don't cut in line - if self.options.fair && !self.waiters.is_empty() { + pub(super) fn try_acquire(&self) -> Option>> { + if self.is_closed() { return None; } - Some(self.pop_idle()?.into_live()) + + let permit = self.semaphore.try_acquire(1)?; + self.pop_idle(permit).ok() } - fn pop_idle(&self) -> Option>> { - if self.is_closed.load(Ordering::Acquire) { - return None; + fn pop_idle<'a>( + &'a self, + permit: SemaphoreReleaser<'a>, + ) -> Result>, SemaphoreReleaser<'a>> { + if let Some(idle) = self.idle_conns.pop() { + Ok(Floating::from_idle(idle, self, permit)) + } else { + Err(permit) } - - Some(Floating::from_idle(self.idle_conns.pop()?, self)) } pub(super) fn release(&self, mut floating: Floating<'_, Live>) { if let Some(test) = &self.options.after_release { if !test(&mut floating.raw) { - // drop the connection and do not return to the pool + // drop the connection and do not return it to the pool return; } } - let is_ok = self - .idle_conns - .push(floating.into_idle().into_leakable()) - .is_ok(); + let Floating { inner: idle, guard } = floating.into_idle(); - if !is_ok { + if !self.idle_conns.push(idle).is_ok() { panic!("BUG: connection queue overflow in release()"); } - wake_one(&self.waiters); + // NOTE: we need to make sure we drop the permit *after* we push to the idle queue + // don't decrease the size + guard.release_permit(); } /// Try to atomically increment the pool size for a new connection. /// /// Returns `None` if we are at max_connections or if the pool is closed. - pub(super) fn try_increment_size(&self) -> Option> { - if self.is_closed() { - return None; + pub(super) fn try_increment_size<'a>( + &'a self, + permit: SemaphoreReleaser<'a>, + ) -> Result, SemaphoreReleaser<'a>> { + match self + .size + .fetch_update(Ordering::AcqRel, Ordering::Acquire, |size| { + size.checked_add(1) + .filter(|size| size <= &self.options.max_connections) + }) { + // we successfully incremented the size + Ok(_) => Ok(DecrementSizeGuard::from_permit(self, permit)), + // the pool is at max capacity + Err(_) => Err(permit), } - - let mut size = self.size(); - - while size < self.options.max_connections { - match self - .size - .compare_exchange(size, size + 1, Ordering::AcqRel, Ordering::Acquire) - { - Ok(_) => return Some(DecrementSizeGuard::new(self)), - Err(new_size) => size = new_size, - } - } - - None } #[allow(clippy::needless_lifetimes)] pub(super) async fn acquire<'s>(&'s self) -> Result>, Error> { - let start = Instant::now(); - let deadline = start + self.options.connect_timeout; - let mut waited = !self.options.fair; - - // the strong ref of the `Weak` that we push to the queue - // initialized during the `timeout()` call below - // as long as we own this, we keep our place in line - let mut waiter: Option> = None; - - // Unless the pool has been closed ... - while !self.is_closed() { - // Don't cut in line unless no one is waiting - if waited || self.waiters.is_empty() { - // Attempt to immediately acquire a connection. This will return Some - // if there is an idle connection in our channel. - if let Some(conn) = self.pop_idle() { - if let Some(live) = check_conn(conn, &self.options).await { - return Ok(live); - } - } + if self.is_closed() { + return Err(Error::PoolClosed); + } - // check if we can open a new connection - if let Some(guard) = self.try_increment_size() { - // pool has slots available; open a new connection - return self.connection(deadline, guard).await; - } - } + let deadline = Instant::now() + self.options.connect_timeout; - if let Some(ref waiter) = waiter { - // return the waiter to the queue, note that this does put it to the back - // of the queue when it should ideally stay at the front - self.waiters.push(Arc::downgrade(&waiter.inner)); - } + sqlx_rt::timeout( + self.options.connect_timeout, + async { + loop { + let permit = self.semaphore.acquire(1).await; - sqlx_rt::timeout( - // Returns an error if `deadline` passes - deadline_as_timeout::(deadline)?, - // `poll_fn` gets us easy access to a `Waker` that we can push to our queue - future::poll_fn(|cx| -> Poll<()> { - let waiter = waiter.get_or_insert_with(|| Waiter::push_new(cx, &self.waiters)); - - if waiter.is_woken() { - waiter.actually_woke = true; - Poll::Ready(()) - } else { - Poll::Pending + if self.is_closed() { + return Err(Error::PoolClosed); } - }), - ) - .await - .map_err(|_| Error::PoolTimedOut)?; - if let Some(ref mut waiter) = waiter { - waiter.reset(); + // First attempt to pop a connection from the idle queue. + let guard = match self.pop_idle(permit) { + + // Then, check that we can use it... + Ok(conn) => match check_conn(conn, &self.options).await { + + // All good! + Ok(live) => return Ok(live), + + // if the connection isn't usable for one reason or another, + // we get the `DecrementSizeGuard` back to open a new one + Err(guard) => guard, + }, + Err(permit) => if let Ok(guard) = self.try_increment_size(permit) { + // we can open a new connection + guard + } else { + log::debug!("woke but was unable to acquire idle connection or open new one; retrying"); + continue; + } + }; + + // Attempt to connect... + return self.connection(deadline, guard).await; + } } - - waited = true; - } - - Err(Error::PoolClosed) + ) + .await + .map_err(|_| Error::PoolTimedOut)? } pub(super) async fn connection<'s>( @@ -277,14 +275,13 @@ fn is_beyond_idle(idle: &Idle, options: &PoolOptions) -> b async fn check_conn<'s: 'p, 'p, DB: Database>( mut conn: Floating<'s, Idle>, options: &'p PoolOptions, -) -> Option>> { +) -> Result>, DecrementSizeGuard<'s>> { // If the connection we pulled has expired, close the connection and // immediately create a new connection if is_beyond_lifetime(&conn, options) { // we're closing the connection either way // close the connection but don't really care about the result - let _ = conn.close().await; - return None; + return Err(conn.close().await); } else if options.test_before_acquire { // Check that the connection is still live if let Err(e) = conn.ping().await { @@ -293,18 +290,18 @@ async fn check_conn<'s: 'p, 'p, DB: Database>( // the error itself here isn't necessarily unexpected so WARN is too strong log::info!("ping on idle connection returned error: {}", e); // connection is broken so don't try to close nicely - return None; + return Err(conn.close().await); } } else if let Some(test) = &options.before_acquire { match test(&mut conn.live.raw).await { Ok(false) => { // connection was rejected by user-defined hook - return None; + return Err(conn.close().await); } Err(error) => { log::info!("in `before_acquire`: {}", error); - return None; + return Err(conn.close().await); } Ok(true) => {} @@ -312,7 +309,7 @@ async fn check_conn<'s: 'p, 'p, DB: Database>( } // No need to re-connect; connection is alive or we don't care - Some(conn.into_live()) + Ok(conn.into_live()) } /// if `max_lifetime` or `idle_timeout` is set, spawn a task that reaps senescent connections @@ -325,18 +322,16 @@ fn spawn_reaper(pool: &Arc>) { (None, None) => return, }; - let pool = Arc::clone(&pool); - - sqlx_rt::spawn(async move { - while !pool.is_closed() { - // only reap idle connections when no tasks are waiting - if pool.waiters.is_empty() { - do_reap(&pool).await; - } - - sqlx_rt::sleep(period).await; - } - }); + // let pool = Arc::clone(&pool); + // + // sqlx_rt::spawn(async move { + // while !pool.is_closed() { + // if !pool.idle_conns.is_empty() { + // do_reap(&pool).await; + // } + // sqlx_rt::sleep(period).await; + // } + // }); } async fn do_reap(pool: &SharedPool) { @@ -346,7 +341,7 @@ async fn do_reap(pool: &SharedPool) { // collect connections to reap let (reap, keep) = (0..max_reaped) // only connections waiting in the queue - .filter_map(|_| pool.pop_idle()) + .filter_map(|_| pool.try_acquire()) .partition::, _>(|conn| { is_beyond_idle(conn, &pool.options) || is_beyond_lifetime(conn, &pool.options) }); @@ -361,38 +356,44 @@ async fn do_reap(pool: &SharedPool) { } } -fn wake_one(waiters: &Waiters) { - while let Some(weak) = waiters.pop() { - if let Some(waiter) = weak.upgrade() { - if waiter.wake() { - return; - } - } - } -} - /// RAII guard returned by `Pool::try_increment_size()` and others. /// /// Will decrement the pool size if dropped, to avoid semantically "leaking" connections /// (where the pool thinks it has more connections than it does). pub(in crate::pool) struct DecrementSizeGuard<'a> { size: &'a AtomicU32, - waiters: &'a Waiters, + semaphore: &'a Semaphore, dropped: bool, } impl<'a> DecrementSizeGuard<'a> { - pub fn new(pool: &'a SharedPool) -> Self { + /// Create a new guard that will release a semaphore permit on-drop. + pub fn new_permit(pool: &'a SharedPool) -> Self { Self { size: &pool.size, - waiters: &pool.waiters, + semaphore: &pool.semaphore, dropped: false, } } + pub fn from_permit( + pool: &'a SharedPool, + mut permit: SemaphoreReleaser<'a>, + ) -> Self { + // here we effectively take ownership of the permit + permit.disarm(); + Self::new_permit(pool) + } + /// Return `true` if the internal references point to the same fields in `SharedPool`. pub fn same_pool(&self, pool: &'a SharedPool) -> bool { - ptr::eq(self.size, &pool.size) && ptr::eq(self.waiters, &pool.waiters) + ptr::eq(self.size, &pool.size) + } + + /// Release the semaphore permit without decreasing the pool size. + fn release_permit(self) { + self.semaphore.release(1); + self.cancel(); } pub fn cancel(self) { @@ -405,73 +406,8 @@ impl Drop for DecrementSizeGuard<'_> { assert!(!self.dropped, "double-dropped!"); self.dropped = true; self.size.fetch_sub(1, Ordering::SeqCst); - wake_one(&self.waiters); - } -} - -struct WaiterInner { - woken: AtomicBool, - waker: Waker, -} - -impl WaiterInner { - /// Wake this waiter if it has not previously been woken. - /// - /// Return `true` if this waiter was newly woken, or `false` if it was already woken. - fn wake(&self) -> bool { - // if we were the thread to flip this boolean from false to true - if let Ok(_) = self - .woken - .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) - { - self.waker.wake_by_ref(); - return true; - } - false - } -} - -struct Waiter<'a> { - inner: Arc, - queue: &'a Waiters, - actually_woke: bool, -} - -impl<'a> Waiter<'a> { - fn push_new(cx: &mut Context<'_>, queue: &'a Waiters) -> Self { - let inner = Arc::new(WaiterInner { - woken: AtomicBool::new(false), - waker: cx.waker().clone(), - }); - - queue.push(Arc::downgrade(&inner)); - - Self { - inner, - queue, - actually_woke: false, - } - } - - fn is_woken(&self) -> bool { - self.inner.woken.load(Ordering::Acquire) - } - - fn reset(&mut self) { - self.inner - .woken - .compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire) - .ok(); - self.actually_woke = false; - } -} - -impl Drop for Waiter<'_> { - fn drop(&mut self) { - // if we didn't actually wake to get a connection, wake the next task instead - if self.is_woken() && !self.actually_woke { - wake_one(self.queue); - } + // and here we release the permit we got on construction + self.semaphore.release(1); } } diff --git a/sqlx-core/src/pool/mod.rs b/sqlx-core/src/pool/mod.rs index 10b9c17335..12313e1b34 100644 --- a/sqlx-core/src/pool/mod.rs +++ b/sqlx-core/src/pool/mod.rs @@ -256,7 +256,9 @@ impl Pool { /// /// Returns `None` immediately if there are no idle connections available in the pool. pub fn try_acquire(&self) -> Option> { - self.0.try_acquire().map(|conn| conn.attach(&self.0)) + self.0 + .try_acquire() + .map(|conn| conn.into_live().attach(&self.0)) } /// Retrieves a new connection and immediately begins a new transaction. diff --git a/sqlx-core/src/pool/options.rs b/sqlx-core/src/pool/options.rs index a1b07f3721..32313808ff 100644 --- a/sqlx-core/src/pool/options.rs +++ b/sqlx-core/src/pool/options.rs @@ -231,19 +231,13 @@ impl PoolOptions { async fn init_min_connections(pool: &SharedPool) -> Result<(), Error> { for _ in 0..cmp::max(pool.options.min_connections, 1) { let deadline = Instant::now() + pool.options.connect_timeout; + let permit = pool.semaphore.acquire(1).await; // this guard will prevent us from exceeding `max_size` - if let Some(guard) = pool.try_increment_size() { + if let Ok(guard) = pool.try_increment_size(permit) { // [connect] will raise an error when past deadline let conn = pool.connection(deadline, guard).await?; - let is_ok = pool - .idle_conns - .push(conn.into_idle().into_leakable()) - .is_ok(); - - if !is_ok { - panic!("BUG: connection queue overflow in init_min_connections"); - } + pool.release(conn); } } diff --git a/tests/postgres/postgres.rs b/tests/postgres/postgres.rs index 590f06b5c5..5688aded5f 100644 --- a/tests/postgres/postgres.rs +++ b/tests/postgres/postgres.rs @@ -519,14 +519,19 @@ async fn pool_smoke_test() -> anyhow::Result<()> { for i in 0..200 { let pool = pool.clone(); sqlx_rt::spawn(async move { - loop { + for j in 0.. { if let Err(e) = sqlx::query("select 1 + 1").execute(&pool).await { // normal error at termination of the test - if !matches!(e, sqlx::Error::PoolClosed) { - eprintln!("pool task {} dying due to {}", i, e); - break; + if matches!(e, sqlx::Error::PoolClosed) { + eprintln!("pool task {} exiting normally after {} iterations", i, j); + } else { + eprintln!("pool task {} dying due to {} after {} iterations", i, e, j); } + break; } + + // shouldn't be necessary if the pool is fair + // sqlx_rt::yield_now().await; } }); } @@ -547,6 +552,8 @@ async fn pool_smoke_test() -> anyhow::Result<()> { }) .await; + // this one is necessary since this is a hot loop, + // otherwise this task will never be descheduled sqlx_rt::yield_now().await; } }); From a8544fd503f071b3d9882f4dc429d7deabbceecb Mon Sep 17 00:00:00 2001 From: Austin Bonander Date: Wed, 21 Jul 2021 16:29:20 -0700 Subject: [PATCH 15/28] fix(pg_money): handle negative values correctly in `PgMoney::from_decimal()` (#1334) closes #1321 --- sqlx-core/src/postgres/types/money.rs | 119 +++++++++++++++++++++----- 1 file changed, 96 insertions(+), 23 deletions(-) diff --git a/sqlx-core/src/postgres/types/money.rs b/sqlx-core/src/postgres/types/money.rs index 2ae47dcd63..9b9178bbe0 100644 --- a/sqlx-core/src/postgres/types/money.rs +++ b/sqlx-core/src/postgres/types/money.rs @@ -6,6 +6,7 @@ use crate::{ types::Type, }; use byteorder::{BigEndian, ByteOrder}; +use std::convert::TryFrom; use std::{ io, ops::{Add, AddAssign, Sub, SubAssign}, @@ -20,46 +21,100 @@ use std::{ /// /// Reading `MONEY` value in text format is not supported and will cause an error. /// +/// ### `locale_frac_digits` +/// This parameter corresponds to the number of digits after the decimal separator. +/// +/// This value must match what Postgres is expecting for the locale set in the database +/// or else the decimal value you see on the client side will not match the `money` value +/// on the server side. +/// +/// **For _most_ locales, this value is `2`.** +/// +/// If you're not sure what locale your database is set to or how many decimal digits it specifies, +/// you can execute `SHOW lc_monetary;` to get the locale name, and then look it up in this list +/// (you can ignore the `.utf8` prefix): +/// https://lh.2xlibre.net/values/frac_digits/ +/// +/// If that link is dead and you're on a POSIX-compliant system (Unix, FreeBSD) you can also execute: +/// +/// ```sh +/// $ LC_MONETARY= locale -k frac_digits +/// ``` +/// +/// And the value you want is `N` in `frac_digits=N`. If you have shell access to the database +/// server you should execute it there as available locales may differ between machines. +/// +/// Note that if `frac_digits` for the locale is outside the range `[0, 10]`, Postgres assumes +/// it's a sentinel value and defaults to 2: +/// https://github.com/postgres/postgres/blob/master/src/backend/utils/adt/cash.c#L114-L123 +/// /// [`MONEY`]: https://www.postgresql.org/docs/current/datatype-money.html #[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub struct PgMoney(pub i64); +pub struct PgMoney( + /// The raw integer value sent over the wire; for locales with `frac_digits=2` (i.e. most + /// of them), this will be the value in whole cents. + /// + /// E.g. for `select '$123.45'::money` with a locale of `en_US` (`frac_digits=2`), + /// this will be `12345`. + /// + /// If the currency of your locale does not have fractional units, e.g. Yen, then this will + /// just be the units of the currency. + /// + /// See the type-level docs for an explanation of `locale_frac_units`. + pub i64, +); impl PgMoney { - /// Convert the money value into a [`BigDecimal`] using the correct - /// precision defined in the PostgreSQL settings. The default precision is - /// two. + /// Convert the money value into a [`BigDecimal`] using `locale_frac_digits`. + /// + /// See the type-level docs for an explanation of `locale_frac_digits`. /// /// [`BigDecimal`]: crate::types::BigDecimal #[cfg(feature = "bigdecimal")] - pub fn to_bigdecimal(self, scale: i64) -> bigdecimal::BigDecimal { + pub fn to_bigdecimal(self, locale_frac_digits: i64) -> bigdecimal::BigDecimal { let digits = num_bigint::BigInt::from(self.0); - bigdecimal::BigDecimal::new(digits, scale) + bigdecimal::BigDecimal::new(digits, locale_frac_digits) } - /// Convert the money value into a [`Decimal`] using the correct precision - /// defined in the PostgreSQL settings. The default precision is two. + /// Convert the money value into a [`Decimal`] using `locale_frac_digits`. + /// + /// See the type-level docs for an explanation of `locale_frac_digits`. /// /// [`Decimal`]: crate::types::Decimal #[cfg(feature = "decimal")] - pub fn to_decimal(self, scale: u32) -> rust_decimal::Decimal { - rust_decimal::Decimal::new(self.0, scale) + pub fn to_decimal(self, locale_frac_digits: u32) -> rust_decimal::Decimal { + rust_decimal::Decimal::new(self.0, locale_frac_digits) } - /// Convert a [`Decimal`] value into money using the correct precision - /// defined in the PostgreSQL settings. The default precision is two. + /// Convert a [`Decimal`] value into money using `locale_frac_digits`. /// - /// Conversion may involve a loss of precision. + /// See the type-level docs for an explanation of `locale_frac_digits`. + /// + /// Note that `Decimal` has 96 bits of precision, but `PgMoney` only has 63 plus the sign bit. + /// If the value is larger than 63 bits it will be truncated. /// /// [`Decimal`]: crate::types::Decimal #[cfg(feature = "decimal")] - pub fn from_decimal(decimal: rust_decimal::Decimal, scale: u32) -> Self { - let cents = (decimal * rust_decimal::Decimal::new(10i64.pow(scale), 0)).round(); + pub fn from_decimal(mut decimal: rust_decimal::Decimal, locale_frac_digits: u32) -> Self { + // this is all we need to convert to our expected locale's `frac_digits` + decimal.rescale(locale_frac_digits); + + /// a mask to bitwise-AND with an `i64` to zero the sign bit + const SIGN_MASK: i64 = i64::MAX; - let mut buf: [u8; 8] = [0; 8]; - buf.copy_from_slice(¢s.serialize()[4..12]); + let is_negative = decimal.is_sign_negative(); + let serialized = decimal.serialize(); - Self(i64::from_le_bytes(buf)) + // interpret bytes `4..12` as an i64, ignoring the sign bit + // this is where truncation occurs + let value = i64::from_le_bytes( + *<&[u8; 8]>::try_from(&serialized[4..12]) + .expect("BUG: slice of serialized should be 8 bytes"), + ) & SIGN_MASK; // zero out the sign bit + + // negate if necessary + Self(if is_negative { -value } else { value }) } /// Convert a [`BigDecimal`](crate::types::BigDecimal) value into money using the correct precision @@ -67,12 +122,14 @@ impl PgMoney { #[cfg(feature = "bigdecimal")] pub fn from_bigdecimal( decimal: bigdecimal::BigDecimal, - scale: u32, + locale_frac_digits: u32, ) -> Result { use bigdecimal::ToPrimitive; - let multiplier = - bigdecimal::BigDecimal::new(num_bigint::BigInt::from(10i128.pow(scale)), 0); + let multiplier = bigdecimal::BigDecimal::new( + num_bigint::BigInt::from(10i128.pow(locale_frac_digits)), + 0, + ); let cents = decimal * multiplier; @@ -277,9 +334,25 @@ mod tests { #[test] #[cfg(feature = "decimal")] fn conversion_from_decimal_works() { - let dec = rust_decimal::Decimal::new(12345, 2); + assert_eq!( + PgMoney(12345), + PgMoney::from_decimal(rust_decimal::Decimal::new(12345, 2), 2) + ); + + assert_eq!( + PgMoney(12345), + PgMoney::from_decimal(rust_decimal::Decimal::new(123450, 3), 2) + ); - assert_eq!(PgMoney(12345), PgMoney::from_decimal(dec, 2)); + assert_eq!( + PgMoney(-12345), + PgMoney::from_decimal(rust_decimal::Decimal::new(-123450, 3), 2) + ); + + assert_eq!( + PgMoney(-12300), + PgMoney::from_decimal(rust_decimal::Decimal::new(-123, 0), 2) + ); } #[test] From e89cb0971ab9c9646bb9f6909e47a53e4c76e4c2 Mon Sep 17 00:00:00 2001 From: Austin Bonander Date: Wed, 21 Jul 2021 16:36:22 -0700 Subject: [PATCH 16/28] fix(macros): tell the compiler about external files/env vars to watch (#1332) * fix(macros): tell the compiler about external files/env vars to watch closes #663 closes #681 * feat(cli): add `migrate` subcommand for generating a build script suggest embedding migrations on `sqlx migrate add` in a new project --- sqlx-cli/src/lib.rs | 1 + sqlx-cli/src/migrate.rs | 57 ++++++++++++++++++++++++++++++ sqlx-cli/src/opt.rs | 9 +++++ sqlx-macros/src/lib.rs | 4 +++ sqlx-macros/src/migrate.rs | 36 ++++++++++++++++--- sqlx-macros/src/query/data.rs | 20 +++++++++-- sqlx-macros/src/query/input.rs | 30 ++++++++++++++-- sqlx-macros/src/query/mod.rs | 55 +++++++++++++++++++---------- sqlx-macros/src/query/output.rs | 11 ++++-- src/macros.rs | 62 +++++++++++++++++++++++++++++++++ tests/migrate/macro.rs | 2 ++ tests/postgres/macros.rs | 3 +- 12 files changed, 259 insertions(+), 31 deletions(-) diff --git a/sqlx-cli/src/lib.rs b/sqlx-cli/src/lib.rs index 5dd4aeefc6..8cee1cff57 100644 --- a/sqlx-cli/src/lib.rs +++ b/sqlx-cli/src/lib.rs @@ -36,6 +36,7 @@ pub async fn run(opt: Opt) -> anyhow::Result<()> { ignore_missing, } => migrate::revert(&migrate.source, &database_url, dry_run, ignore_missing).await?, MigrateCommand::Info => migrate::info(&migrate.source, &database_url).await?, + MigrateCommand::BuildScript { force } => migrate::build_script(&migrate.source, force)?, }, Command::Database(database) => match database.command { diff --git a/sqlx-cli/src/migrate.rs b/sqlx-cli/src/migrate.rs index 20d61f1985..523cf83fa4 100644 --- a/sqlx-cli/src/migrate.rs +++ b/sqlx-cli/src/migrate.rs @@ -42,6 +42,11 @@ pub async fn add( ) -> anyhow::Result<()> { fs::create_dir_all(migration_source).context("Unable to create migrations directory")?; + // if the migrations directory is empty + let has_existing_migrations = fs::read_dir(migration_source) + .map(|mut dir| dir.next().is_some()) + .unwrap_or(false); + let migrator = Migrator::new(Path::new(migration_source)).await?; // This checks if all existing migrations are of the same type as the reverisble flag passed for migration in migrator.iter() { @@ -74,6 +79,31 @@ pub async fn add( )?; } + if !has_existing_migrations { + let quoted_source = if migration_source != "migrations" { + format!("{:?}", migration_source) + } else { + "".to_string() + }; + + print!( + r#" +Congratulations on creating your first migration! + +Did you know you can embed your migrations in your application binary? +On startup, after creating your database connection or pool, add: + +sqlx::migrate!({}).run(<&your_pool OR &mut your_connection>).await?; + +Note that the compiler won't pick up new migrations if no Rust source files have changed. +You can create a Cargo build script to work around this with `sqlx migrate build-script`. + +See: https://docs.rs/sqlx/0.5/sqlx/macro.migrate.html +"#, + quoted_source + ); + } + Ok(()) } @@ -245,3 +275,30 @@ pub async fn revert( Ok(()) } + +pub fn build_script(migration_source: &str, force: bool) -> anyhow::Result<()> { + anyhow::ensure!( + Path::new("Cargo.toml").exists(), + "must be run in a Cargo project root" + ); + + anyhow::ensure!( + (force || !Path::new("build.rs").exists()), + "build.rs already exists; use --force to overwrite" + ); + + let contents = format!( + r#"// generated by `sqlx migrate build-script` +fn main() {{ + // trigger recompilation when a new migration is added + println!("cargo:rerun-if-changed={}"); +}}"#, + migration_source + ); + + fs::write("build.rs", contents)?; + + println!("Created `build.rs`; be sure to check it into version control!"); + + Ok(()) +} diff --git a/sqlx-cli/src/opt.rs b/sqlx-cli/src/opt.rs index 8d912668bf..410fd9d707 100644 --- a/sqlx-cli/src/opt.rs +++ b/sqlx-cli/src/opt.rs @@ -130,4 +130,13 @@ pub enum MigrateCommand { /// List all available migrations. Info, + + /// Generate a `build.rs` to trigger recompilation when a new migration is added. + /// + /// Must be run in a Cargo project root. + BuildScript { + /// Overwrite the build script if it already exists. + #[clap(long)] + force: bool, + }, } diff --git a/sqlx-macros/src/lib.rs b/sqlx-macros/src/lib.rs index 8956aa745b..c1f173e655 100644 --- a/sqlx-macros/src/lib.rs +++ b/sqlx-macros/src/lib.rs @@ -2,6 +2,10 @@ not(any(feature = "postgres", feature = "mysql", feature = "offline")), allow(dead_code, unused_macros, unused_imports) )] +#![cfg_attr( + any(sqlx_macros_unstable, procmacro2_semver_exempt), + feature(track_path, proc_macro_tracked_env) +)] extern crate proc_macro; use proc_macro::TokenStream; diff --git a/sqlx-macros/src/migrate.rs b/sqlx-macros/src/migrate.rs index f10bc22318..018ba1b41e 100644 --- a/sqlx-macros/src/migrate.rs +++ b/sqlx-macros/src/migrate.rs @@ -24,7 +24,7 @@ struct QuotedMigration { version: i64, description: String, migration_type: QuotedMigrationType, - sql: String, + path: String, checksum: Vec, } @@ -34,7 +34,7 @@ impl ToTokens for QuotedMigration { version, description, migration_type, - sql, + path, checksum, } = &self; @@ -43,7 +43,8 @@ impl ToTokens for QuotedMigration { version: #version, description: ::std::borrow::Cow::Borrowed(#description), migration_type: #migration_type, - sql: ::std::borrow::Cow::Borrowed(#sql), + // this tells the compiler to watch this path for changes + sql: ::std::borrow::Cow::Borrowed(include_str!(#path)), checksum: ::std::borrow::Cow::Borrowed(&[ #(#checksum),* ]), @@ -59,7 +60,7 @@ pub(crate) fn expand_migrator_from_dir(dir: LitStr) -> crate::Result crate::Result crate::Result, query: &str) -> crate::Result { - serde_json::Deserializer::from_reader(BufReader::new( + let this = serde_json::Deserializer::from_reader(BufReader::new( File::open(path.as_ref()).map_err(|e| { format!("failed to open path {}: {}", path.as_ref().display(), e) })?, @@ -69,8 +69,22 @@ pub mod offline { .deserialize_map(DataFileVisitor { query, hash: hash_string(query), - }) - .map_err(Into::into) + })?; + + #[cfg(procmacr2_semver_exempt)] + { + let path = path.as_ref().canonicalize()?; + let path = path.to_str().ok_or_else(|| { + format!( + "sqlx-data.json path cannot be represented as a string: {:?}", + path + ) + })?; + + proc_macro::tracked_path::path(path); + } + + Ok(this) } } diff --git a/sqlx-macros/src/query/input.rs b/sqlx-macros/src/query/input.rs index 86627d60b1..ecfc3e643d 100644 --- a/sqlx-macros/src/query/input.rs +++ b/sqlx-macros/src/query/input.rs @@ -8,7 +8,7 @@ use syn::{ExprArray, Type}; /// Macro input shared by `query!()` and `query_file!()` pub struct QueryMacroInput { - pub(super) src: String, + pub(super) sql: String, #[cfg_attr(not(feature = "offline"), allow(dead_code))] pub(super) src_span: Span, @@ -18,6 +18,8 @@ pub struct QueryMacroInput { pub(super) arg_exprs: Vec, pub(super) checked: bool, + + pub(super) file_path: Option, } enum QuerySrc { @@ -94,12 +96,15 @@ impl Parse for QueryMacroInput { let arg_exprs = args.unwrap_or_default(); + let file_path = src.file_path(src_span)?; + Ok(QueryMacroInput { - src: src.resolve(src_span)?, + sql: src.resolve(src_span)?, src_span, record_type, arg_exprs, checked, + file_path, }) } } @@ -112,6 +117,27 @@ impl QuerySrc { QuerySrc::File(file) => read_file_src(&file, source_span), } } + + fn file_path(&self, source_span: Span) -> syn::Result> { + if let QuerySrc::File(ref file) = *self { + let path = std::path::Path::new(file) + .canonicalize() + .map_err(|e| syn::Error::new(source_span, e))?; + + Ok(Some( + path.to_str() + .ok_or_else(|| { + syn::Error::new( + source_span, + "query file path cannot be represented as a string", + ) + })? + .to_string(), + )) + } else { + Ok(None) + } + } } fn read_file_src(source: &str, source_span: Span) -> syn::Result { diff --git a/sqlx-macros/src/query/mod.rs b/sqlx-macros/src/query/mod.rs index 91c706dc42..357c4f3524 100644 --- a/sqlx-macros/src/query/mod.rs +++ b/sqlx-macros/src/query/mod.rs @@ -23,6 +23,7 @@ mod input; mod output; struct Metadata { + #[allow(unused)] manifest_dir: PathBuf, offline: bool, database_url: Option, @@ -35,42 +36,47 @@ struct Metadata { // If we are in a workspace, lookup `workspace_root` since `CARGO_MANIFEST_DIR` won't // reflect the workspace dir: https://github.com/rust-lang/cargo/issues/3946 static METADATA: Lazy = Lazy::new(|| { - use std::env; - - let manifest_dir: PathBuf = env::var("CARGO_MANIFEST_DIR") + let manifest_dir: PathBuf = env("CARGO_MANIFEST_DIR") .expect("`CARGO_MANIFEST_DIR` must be set") .into(); #[cfg(feature = "offline")] - let target_dir = - env::var_os("CARGO_TARGET_DIR").map_or_else(|| "target".into(), |dir| dir.into()); + let target_dir = env("CARGO_TARGET_DIR").map_or_else(|_| "target".into(), |dir| dir.into()); // If a .env file exists at CARGO_MANIFEST_DIR, load environment variables from this, // otherwise fallback to default dotenv behaviour. let env_path = manifest_dir.join(".env"); - if env_path.exists() { + + #[cfg_attr(not(procmacro2_semver_exempt), allow(unused_variables))] + let env_path = if env_path.exists() { let res = dotenv::from_path(&env_path); if let Err(e) = res { panic!("failed to load environment from {:?}, {}", env_path, e); } + + Some(env_path) } else { - let _ = dotenv::dotenv(); + dotenv::dotenv().ok() + }; + + // tell the compiler to watch the `.env` for changes, if applicable + #[cfg(procmacro2_semver_exempt)] + if let Some(env_path) = env_path.as_ref().and_then(|path| path.to_str()) { + proc_macro::tracked_path::path(env_path); } - // TODO: Switch to `var_os` after feature(osstring_ascii) is stable. - // Stabilization PR: https://github.com/rust-lang/rust/pull/80193 - let offline = env::var("SQLX_OFFLINE") + let offline = env("SQLX_OFFLINE") .map(|s| s.eq_ignore_ascii_case("true") || s == "1") .unwrap_or(false); - let database_url = env::var("DATABASE_URL").ok(); + let database_url = env("DATABASE_URL").ok(); #[cfg(feature = "offline")] let workspace_root = { use serde::Deserialize; use std::process::Command; - let cargo = env::var_os("CARGO").expect("`CARGO` must be set"); + let cargo = env("CARGO").expect("`CARGO` must be set"); let output = Command::new(&cargo) .args(&["metadata", "--format-version=1"]) @@ -151,7 +157,7 @@ fn expand_from_db(input: QueryMacroInput, db_url: &str) -> crate::Result { let data = block_on(async { let mut conn = sqlx_core::postgres::PgConnection::connect(db_url.as_str()).await?; - QueryData::from_db(&mut conn, &input.src).await + QueryData::from_db(&mut conn, &input.sql).await })?; expand_with_data(input, data, false) @@ -164,7 +170,7 @@ fn expand_from_db(input: QueryMacroInput, db_url: &str) -> crate::Result { let data = block_on(async { let mut conn = sqlx_core::mssql::MssqlConnection::connect(db_url.as_str()).await?; - QueryData::from_db(&mut conn, &input.src).await + QueryData::from_db(&mut conn, &input.sql).await })?; expand_with_data(input, data, false) @@ -177,7 +183,7 @@ fn expand_from_db(input: QueryMacroInput, db_url: &str) -> crate::Result { let data = block_on(async { let mut conn = sqlx_core::mysql::MySqlConnection::connect(db_url.as_str()).await?; - QueryData::from_db(&mut conn, &input.src).await + QueryData::from_db(&mut conn, &input.sql).await })?; expand_with_data(input, data, false) @@ -190,7 +196,7 @@ fn expand_from_db(input: QueryMacroInput, db_url: &str) -> crate::Result { let data = block_on(async { let mut conn = sqlx_core::sqlite::SqliteConnection::connect(db_url.as_str()).await?; - QueryData::from_db(&mut conn, &input.src).await + QueryData::from_db(&mut conn, &input.sql).await })?; expand_with_data(input, data, false) @@ -207,7 +213,7 @@ fn expand_from_db(input: QueryMacroInput, db_url: &str) -> crate::Result crate::Result { use data::offline::DynQueryData; - let query_data = DynQueryData::from_data_file(file, &input.src)?; + let query_data = DynQueryData::from_data_file(file, &input.sql)?; assert!(!query_data.db_name.is_empty()); match &*query_data.db_name { @@ -288,7 +294,7 @@ where .all(|it| it.type_info().is_void()) { let db_path = DB::db_path(); - let sql = &input.src; + let sql = &input.sql; quote! { ::sqlx::query_with::<#db_path, _>(#sql, #query_args) @@ -368,3 +374,16 @@ where Ok(ret_tokens) } + +/// Get the value of an environment variable, telling the compiler about it if applicable. +fn env(name: &str) -> Result { + #[cfg(procmacro2_semver_exempt)] + { + proc_macro::tracked_env::var(name) + } + + #[cfg(not(procmacro2_semver_exempt))] + { + std::env::var(name) + } +} diff --git a/sqlx-macros/src/query/output.rs b/sqlx-macros/src/query/output.rs index e7b482e044..2938f8846d 100644 --- a/sqlx-macros/src/query/output.rs +++ b/sqlx-macros/src/query/output.rs @@ -157,7 +157,14 @@ pub fn quote_query_as( let db_path = DB::db_path(); let row_path = DB::row_path(); - let sql = &input.src; + + // if this query came from a file, use `include_str!()` to tell the compiler where it came from + let sql = if let Some(ref path) = &input.file_path { + quote::quote_spanned! { input.src_span => include_str!(#path) } + } else { + let sql = &input.sql; + quote! { #sql } + }; quote! { ::sqlx::query_with::<#db_path, _>(#sql, #bind_args).try_map(|row: #row_path| { @@ -200,7 +207,7 @@ pub fn quote_query_scalar( }; let db = DB::db_path(); - let query = &input.src; + let query = &input.sql; Ok(quote! { ::sqlx::query_scalar_with::<#db, #ty, _>(#query, #bind_args) diff --git a/src/macros.rs b/src/macros.rs index fdabc1e1ef..78264f3573 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -715,6 +715,68 @@ macro_rules! query_file_scalar_unchecked ( /// The directory must be relative to the project root (the directory containing `Cargo.toml`), /// unlike `include_str!()` which uses compiler internals to get the path of the file where it /// was invoked. +/// +/// ## Triggering Recompilation on Migration Changes +/// In some cases when making changes to embedded migrations, such as adding a new migration without +/// changing any Rust source files, you might find that `cargo build` doesn't actually do anything, +/// or when you do `cargo run` your application isn't applying new migrations on startup. +/// +/// This is because our ability to tell the compiler to watch external files for changes +/// from a proc-macro is very limited. The compiler by default only re-runs proc macros when +/// one ore more source files have changed, because normally it shouldn't have to otherwise. SQLx is +/// just weird in that external factors can change the output of proc macros, much to the chagrin of +/// the compiler team and IDE plugin authors. +/// +/// As of 0.5.6, we emit `include_str!()` with an absolute path for each migration, but that +/// only works to get the compiler to watch _existing_ migration files for changes. +/// +/// Our only options for telling it to watch the whole `migrations/` directory are either via the +/// user creating a Cargo build script in their project, or using an unstable API on nightly +/// governed by a `cfg`-flag. +/// +/// ##### Stable Rust: Cargo Build Script +/// The only solution on stable Rust right now is to create a Cargo build script in your project +/// and have it print `cargo:rerun-if-changed=migrations`: +/// +/// `build.rs` +/// ``` +/// fn main() { +/// println!("cargo:rerun-if-changed=migrations"); +/// } +/// ``` +/// +/// You can run `sqlx migrate build-script` to generate this file automatically. +/// +/// See: [The Cargo Book: 3.8 Build Scripts; Outputs of the Build Script](https://doc.rust-lang.org/stable/cargo/reference/build-scripts.html#outputs-of-the-build-script) +/// +/// #### Nightly Rust: `cfg` Flag +/// The `migrate!()` macro also listens to `--cfg sqlx_macros_unstable`, which will enable +/// the `track_path` feature to directly tell the compiler to watch the `migrations/` directory: +/// +/// ```sh,ignore +/// $ env RUSTFLAGS='--cfg sqlx_macros_unstable' cargo build +/// ``` +/// +/// Note that this unfortunately will trigger a fully recompile of your dependency tree, at least +/// for the first time you use it. It also, of course, requires using a nightly compiler. +/// +/// You can also set it in `build.rustflags` in `.cargo/config.toml`: +/// ```toml,ignore +/// [build] +/// rustflags = ["--cfg sqlx_macros_unstable"] +/// ``` +/// +/// And then continue building and running your project normally. +/// +/// If you're building on nightly anyways, it would be extremely helpful to help us test +/// this feature and find any bugs in it. +/// +/// Subscribe to [the `track_path` tracking issue](https://github.com/rust-lang/rust/issues/73921) +/// for discussion and the future stabilization of this feature. +/// +/// For brevity and because it involves the same commitment to unstable features in `proc_macro`, +/// if you're using `--cfg procmacro2_semver_exempt` it will also enable this feature +/// (see [`proc-macro2` docs / Unstable Features](https://docs.rs/proc-macro2/1.0.27/proc_macro2/#unstable-features)). #[cfg(feature = "migrate")] #[macro_export] macro_rules! migrate { diff --git a/tests/migrate/macro.rs b/tests/migrate/macro.rs index 9a3c16150e..7215046bef 100644 --- a/tests/migrate/macro.rs +++ b/tests/migrate/macro.rs @@ -7,6 +7,8 @@ static EMBEDDED: Migrator = sqlx::migrate!("tests/migrate/migrations"); async fn same_output() -> anyhow::Result<()> { let runtime = Migrator::new(Path::new("tests/migrate/migrations")).await?; + assert_eq!(runtime.migrations.len(), EMBEDDED.migrations.len()); + for (e, r) in EMBEDDED.iter().zip(runtime.iter()) { assert_eq!(e.version, r.version); assert_eq!(e.description, r.description); diff --git a/tests/postgres/macros.rs b/tests/postgres/macros.rs index bc770e050f..51d1f89bb8 100644 --- a/tests/postgres/macros.rs +++ b/tests/postgres/macros.rs @@ -105,7 +105,8 @@ async fn test_query_file() -> anyhow::Result<()> { .fetch_one(&mut conn) .await?; - println!("{:?}", account); + assert_eq!(account.id, 1); + assert_eq!(account.name, Option::::None); Ok(()) } From b3ae6e50de90a61d3e6b12043b4c1d9c7c2c5dbb Mon Sep 17 00:00:00 2001 From: Austin Bonander Date: Wed, 21 Jul 2021 17:24:39 -0700 Subject: [PATCH 17/28] fix(macros): prefix generated variable names in `query_as!()` (#1336) closes #1322 --- sqlx-macros/src/query/mod.rs | 1 + sqlx-macros/src/query/output.rs | 17 +++++++++++------ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/sqlx-macros/src/query/mod.rs b/sqlx-macros/src/query/mod.rs index 357c4f3524..a2f17874b8 100644 --- a/sqlx-macros/src/query/mod.rs +++ b/sqlx-macros/src/query/mod.rs @@ -320,6 +320,7 @@ where |&output::RustColumn { ref ident, ref type_, + .. }| quote!(#ident: #type_,), ); diff --git a/sqlx-macros/src/query/output.rs b/sqlx-macros/src/query/output.rs index 2938f8846d..f7d56646dd 100644 --- a/sqlx-macros/src/query/output.rs +++ b/sqlx-macros/src/query/output.rs @@ -14,6 +14,7 @@ use syn::Token; pub struct RustColumn { pub(super) ident: Ident, + pub(super) var_name: Ident, pub(super) type_: ColumnType, } @@ -114,6 +115,9 @@ fn column_to_rust(describe: &Describe, i: usize) -> crate:: }; Ok(RustColumn { + // prefix the variable name we use in `quote_query_as!()` so it doesn't conflict + // https://github.com/launchbadge/sqlx/issues/1322 + var_name: quote::format_ident!("sqlx_query_as_{}", decl.ident), ident: decl.ident, type_, }) @@ -129,7 +133,7 @@ pub fn quote_query_as( |( i, &RustColumn { - ref ident, + ref var_name, ref type_, .. }, @@ -140,20 +144,21 @@ pub fn quote_query_as( // binding to a `let` avoids confusing errors about // "try expression alternatives have incompatible types" // it doesn't seem to hurt inference in the other branches - let #ident = row.try_get_unchecked::<#type_, _>(#i)?; + let #var_name = row.try_get_unchecked::<#type_, _>(#i)?; }, // type was overridden to be a wildcard so we fallback to the runtime check - (true, ColumnType::Wildcard) => quote! ( let #ident = row.try_get(#i)?; ), + (true, ColumnType::Wildcard) => quote! ( let #var_name = row.try_get(#i)?; ), (true, ColumnType::OptWildcard) => { - quote! ( let #ident = row.try_get::<::std::option::Option<_>, _>(#i)?; ) + quote! ( let #var_name = row.try_get::<::std::option::Option<_>, _>(#i)?; ) } // macro is the `_unchecked!()` variant so this will die in decoding if it's wrong - (false, _) => quote!( let #ident = row.try_get_unchecked(#i)?; ), + (false, _) => quote!( let #var_name = row.try_get_unchecked(#i)?; ), } }, ); let ident = columns.iter().map(|col| &col.ident); + let var_name = columns.iter().map(|col| &col.var_name); let db_path = DB::db_path(); let row_path = DB::row_path(); @@ -172,7 +177,7 @@ pub fn quote_query_as( #(#instantiations)* - Ok(#out_ty { #(#ident: #ident),* }) + Ok(#out_ty { #(#ident: #var_name),* }) }) } } From dc92c28e68d99e993e420080927fc7fd5d699fbd Mon Sep 17 00:00:00 2001 From: guylapid <53928652+guylapid@users.noreply.github.com> Date: Sat, 24 Jul 2021 01:57:41 +0300 Subject: [PATCH 18/28] Use tokio `spawn_blocking` instead of `block_in_place` (#1333) This fixes a panic when sharing an SQLite connection pool between tokio runtime and actix runtime --- sqlx-rt/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sqlx-rt/src/lib.rs b/sqlx-rt/src/lib.rs index 9c9a68a534..4b5cd77823 100644 --- a/sqlx-rt/src/lib.rs +++ b/sqlx-rt/src/lib.rs @@ -105,7 +105,8 @@ pub use tokio_rustls::{client::TlsStream, TlsConnector}; #[macro_export] macro_rules! blocking { ($($expr:tt)*) => { - $crate::tokio::task::block_in_place(move || { $($expr)* }) + $crate::tokio::task::spawn_blocking(move || { $($expr)* }) + .await.expect("Blocking task failed to complete.") }; } From 34db44bffdd17566a0c95b608f4d871aa91ddc51 Mon Sep 17 00:00:00 2001 From: Luca Palmieri Date: Sat, 24 Jul 2021 00:31:49 +0100 Subject: [PATCH 19/28] Mark the original DatabaseError as source. (#1197) --- sqlx-core/src/error.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sqlx-core/src/error.rs b/sqlx-core/src/error.rs index 5b2f4780a0..6a152520db 100644 --- a/sqlx-core/src/error.rs +++ b/sqlx-core/src/error.rs @@ -36,7 +36,7 @@ pub enum Error { /// Error returned from the database. #[error("error returned from database: {0}")] - Database(Box), + Database(#[source] Box), /// Error communicating with the database backend. #[error("error communicating with the server: {0}")] @@ -105,6 +105,8 @@ pub enum Error { Migrate(#[source] Box), } +impl StdError for Box {} + impl Error { pub fn into_database_error(self) -> Option> { match self { From f0d0dce8e25c40dffd1431776b6a38510a70bde0 Mon Sep 17 00:00:00 2001 From: Daniel Faust Date: Sat, 24 Jul 2021 01:39:25 +0200 Subject: [PATCH 20/28] Use postgres as maintenance db unless maintaining postgres itself (#1339) Fixes #1283. --- sqlx-core/src/postgres/migrate.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sqlx-core/src/postgres/migrate.rs b/sqlx-core/src/postgres/migrate.rs index 7a7127624a..13bd2e3694 100644 --- a/sqlx-core/src/postgres/migrate.rs +++ b/sqlx-core/src/postgres/migrate.rs @@ -24,9 +24,9 @@ fn parse_for_maintenance(uri: &str) -> Result<(PgConnectOptions, String), Error> .to_owned(); // switch us to the maintenance database - // use `postgres` _unless_ the current user is postgres, in which case, use `template1` + // use `postgres` _unless_ the database is postgres, in which case, use `template1` // this matches the behavior of the `createdb` util - options.database = if options.username == "postgres" { + options.database = if database == "postgres" { Some("template1".into()) } else { Some("postgres".into()) From 9f7205e80fc51038ae3f594b38f3a0e5ee5a2237 Mon Sep 17 00:00:00 2001 From: Atkins Date: Thu, 29 Jul 2021 05:00:34 +0800 Subject: [PATCH 21/28] Fix GitHub Actions and integration test (#1346) * fix test suite * rustfmt * need Row * test: fix integration test scripts and update the upstream supported databases Signed-off-by: Atkins Chang * ci(actions): update supported databases Signed-off-by: Atkins Chang * ci(actions): use `pg_isready` instead of `sleep` to avoid error cause by database not ready Signed-off-by: Atkins Chang * feat(core): add `trait PgConnectionInfo` for connection parameter status from server Signed-off-by: Atkins Chang * test(postgres): fix integration test for postgres Signed-off-by: Atkins Chang * test(mysql): fix integration tests Signed-off-by: Atkins Chang * ci(actions): test database against the oldest and newest supported versions Signed-off-by: Atkins Chang * docs(core): document `trait PgConnectionInfo` Signed-off-by: Atkins Chang Co-authored-by: Montana Low --- .github/workflows/sqlx.yml | 72 ++++---- sqlx-core/src/postgres/connection/mod.rs | 17 ++ sqlx-core/src/postgres/connection/stream.rs | 88 ++++++++- sqlx-core/src/postgres/message/mod.rs | 2 + .../src/postgres/message/parameter_status.rs | 49 +++++ sqlx-core/src/postgres/mod.rs | 2 +- tests/README.md | 18 ++ tests/docker-compose.yml | 75 +++++--- tests/docker.py | 1 + tests/mssql/mssql-2017.dockerfile | 19 ++ tests/mysql/macros.rs | 29 +-- tests/postgres/postgres.rs | 77 +++++--- tests/postgres/setup.sql | 8 - tests/postgres/types.rs | 2 +- tests/sqlite/sqlite.db | Bin 36864 -> 36864 bytes tests/x.py | 173 ++++++++---------- 16 files changed, 428 insertions(+), 204 deletions(-) create mode 100644 sqlx-core/src/postgres/message/parameter_status.rs create mode 100644 tests/README.md create mode 100644 tests/mssql/mssql-2017.dockerfile diff --git a/.github/workflows/sqlx.yml b/.github/workflows/sqlx.yml index d583f33d64..229c248549 100644 --- a/.github/workflows/sqlx.yml +++ b/.github/workflows/sqlx.yml @@ -32,7 +32,8 @@ jobs: runs-on: ubuntu-20.04 strategy: matrix: - runtime: [async-std-native-tls, tokio-native-tls, actix-native-tls, async-std-rustls, tokio-rustls, actix-rustls] + runtime: [async-std, tokio, actix] + tls: [native-tls, rustls] steps: - uses: actions/checkout@v2 @@ -48,7 +49,7 @@ jobs: ~/.cargo/registry ~/.cargo/git target - key: ${{ runner.os }}-check-${{ matrix.runtime }}-${{ hashFiles('**/Cargo.lock') }} + key: ${{ runner.os }}-check-${{ matrix.runtime }}-${{ matrix.tls }}-${{ hashFiles('**/Cargo.lock') }} - uses: actions-rs/cargo@v1 with: @@ -56,21 +57,22 @@ jobs: args: > --manifest-path sqlx-core/Cargo.toml --no-default-features - --features offline,all-databases,all-types,migrate,runtime-${{ matrix.runtime }} + --features offline,all-databases,all-types,migrate,runtime-${{ matrix.runtime }}-${{ matrix.tls }} - uses: actions-rs/cargo@v1 with: command: check args: > --no-default-features - --features offline,all-databases,all-types,migrate,runtime-${{ matrix.runtime }},macros + --features offline,all-databases,all-types,migrate,runtime-${{ matrix.runtime }}-${{ matrix.tls }},macros test: name: Unit Test runs-on: ubuntu-20.04 strategy: matrix: - runtime: [async-std-native-tls, tokio-native-tls, actix-native-tls, async-std-rustls, tokio-rustls, actix-rustls] + runtime: [async-std, tokio, actix] + tls: [native-tls, rustls] steps: - uses: actions/checkout@v2 @@ -93,7 +95,7 @@ jobs: command: test args: > --manifest-path sqlx-core/Cargo.toml - --features offline,all-databases,all-types,runtime-${{ matrix.runtime }} + --features offline,all-databases,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }} cli: name: CLI Binaries @@ -148,7 +150,8 @@ jobs: runs-on: ubuntu-20.04 strategy: matrix: - runtime: [async-std-native-tls, tokio-native-tls, actix-native-tls, async-std-rustls, tokio-rustls, actix-rustls] + runtime: [async-std, tokio, actix] + tls: [native-tls, rustls] needs: check steps: - uses: actions/checkout@v2 @@ -165,14 +168,14 @@ jobs: ~/.cargo/registry ~/.cargo/git target - key: ${{ runner.os }}-sqlite-${{ matrix.runtime }}-${{ hashFiles('**/Cargo.lock') }} + key: ${{ runner.os }}-sqlite-${{ matrix.runtime }}-${{ matrix.tls }}-${{ hashFiles('**/Cargo.lock') }} - uses: actions-rs/cargo@v1 with: command: test args: > --no-default-features - --features any,macros,migrate,sqlite,all-types,runtime-${{ matrix.runtime }} + --features any,macros,migrate,sqlite,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }} -- --test-threads=1 env: @@ -183,8 +186,9 @@ jobs: runs-on: ubuntu-20.04 strategy: matrix: - postgres: [12, 10, 9_6, 9_5] - runtime: [async-std-native-tls, tokio-native-tls, actix-native-tls, async-std-rustls, tokio-rustls, actix-rustls] + postgres: [13, 9_6] + runtime: [async-std, tokio, actix] + tls: [native-tls, rustls] needs: check steps: - uses: actions/checkout@v2 @@ -201,23 +205,24 @@ jobs: ~/.cargo/registry ~/.cargo/git target - key: ${{ runner.os }}-postgres-${{ matrix.runtime }}-${{ hashFiles('**/Cargo.lock') }} + key: ${{ runner.os }}-postgres-${{ matrix.runtime }}-${{ matrix.tls }}-${{ hashFiles('**/Cargo.lock') }} - uses: actions-rs/cargo@v1 with: command: build args: > - --features postgres,all-types,runtime-${{ matrix.runtime }} + --features postgres,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }} - - run: docker-compose -f tests/docker-compose.yml run -d -p 5432:5432 postgres_${{ matrix.postgres }} - - run: sleep 10 + - run: | + docker-compose -f tests/docker-compose.yml run -d -p 5432:5432 --name postgres_${{ matrix.postgres }} postgres_${{ matrix.postgres }} + docker exec postgres_${{ matrix.postgres }} bash -c "until pg_isready; do sleep 1; done" - uses: actions-rs/cargo@v1 with: command: test args: > --no-default-features - --features any,postgres,macros,all-types,runtime-${{ matrix.runtime }} + --features any,postgres,macros,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }} env: DATABASE_URL: postgres://postgres:password@localhost:5432/sqlx @@ -226,7 +231,7 @@ jobs: command: test args: > --no-default-features - --features any,postgres,macros,migrate,all-types,runtime-${{ matrix.runtime }} + --features any,postgres,macros,migrate,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }} env: DATABASE_URL: postgres://postgres:password@localhost:5432/sqlx?sslmode=verify-ca&sslrootcert=.%2Ftests%2Fcerts%2Fca.crt @@ -235,8 +240,9 @@ jobs: runs-on: ubuntu-20.04 strategy: matrix: - mysql: [8, 5_7, 5_6] - runtime: [async-std-native-tls, tokio-native-tls, actix-native-tls, async-std-rustls, tokio-rustls, actix-rustls] + mysql: [8, 5_6] + runtime: [async-std, tokio, actix] + tls: [native-tls, rustls] needs: check steps: - uses: actions/checkout@v2 @@ -253,13 +259,13 @@ jobs: ~/.cargo/registry ~/.cargo/git target - key: ${{ runner.os }}-mysql-${{ matrix.runtime }}-${{ hashFiles('**/Cargo.lock') }} + key: ${{ runner.os }}-mysql-${{ matrix.runtime }}-${{ matrix.tls }}-${{ hashFiles('**/Cargo.lock') }} - uses: actions-rs/cargo@v1 with: command: build args: > - --features mysql,all-types,runtime-${{ matrix.runtime }} + --features mysql,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }} - run: docker-compose -f tests/docker-compose.yml run -d -p 3306:3306 mysql_${{ matrix.mysql }} - run: sleep 60 @@ -269,7 +275,7 @@ jobs: command: test args: > --no-default-features - --features any,mysql,macros,migrate,all-types,runtime-${{ matrix.runtime }} + --features any,mysql,macros,migrate,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }} env: DATABASE_URL: mysql://root:password@localhost:3306/sqlx @@ -278,8 +284,9 @@ jobs: runs-on: ubuntu-20.04 strategy: matrix: - mariadb: [10_5, 10_4, 10_3, 10_2, 10_1] - runtime: [async-std-native-tls, tokio-native-tls, actix-native-tls, async-std-rustls, tokio-rustls, actix-rustls] + mariadb: [10_6, 10_2] + runtime: [async-std, tokio, actix] + tls: [native-tls, rustls] needs: check steps: - uses: actions/checkout@v2 @@ -297,13 +304,13 @@ jobs: ~/.cargo/registry ~/.cargo/git target - key: ${{ runner.os }}-mysql-${{ matrix.runtime }}-${{ hashFiles('**/Cargo.lock') }} + key: ${{ runner.os }}-mysql-${{ matrix.runtime }}-${{ matrix.tls }}-${{ hashFiles('**/Cargo.lock') }} - uses: actions-rs/cargo@v1 with: command: build args: > - --features mysql,all-types,runtime-${{ matrix.runtime }} + --features mysql,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }} - run: docker-compose -f tests/docker-compose.yml run -d -p 3306:3306 mariadb_${{ matrix.mariadb }} - run: sleep 30 @@ -313,7 +320,7 @@ jobs: command: test args: > --no-default-features - --features any,mysql,macros,migrate,all-types,runtime-${{ matrix.runtime }} + --features any,mysql,macros,migrate,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }} env: DATABASE_URL: mysql://root:password@localhost:3306/sqlx @@ -322,8 +329,9 @@ jobs: runs-on: ubuntu-20.04 strategy: matrix: - mssql: [2019] - runtime: [async-std-native-tls, tokio-native-tls, actix-native-tls, async-std-rustls, tokio-rustls, actix-rustls] + mssql: [2019, 2017] + runtime: [async-std, tokio, actix] + tls: [native-tls, rustls] needs: check steps: - uses: actions/checkout@v2 @@ -340,13 +348,13 @@ jobs: ~/.cargo/registry ~/.cargo/git target - key: ${{ runner.os }}-mssql-${{ matrix.runtime }}-${{ hashFiles('**/Cargo.lock') }} + key: ${{ runner.os }}-mssql-${{ matrix.runtime }}-${{ matrix.tls }}-${{ hashFiles('**/Cargo.lock') }} - uses: actions-rs/cargo@v1 with: command: build args: > - --features mssql,all-types,runtime-${{ matrix.runtime }} + --features mssql,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }} - run: docker-compose -f tests/docker-compose.yml run -d -p 1433:1433 mssql_${{ matrix.mssql }} - run: sleep 80 # MSSQL takes a "bit" to startup @@ -356,6 +364,6 @@ jobs: command: test args: > --no-default-features - --features any,mssql,macros,migrate,all-types,runtime-${{ matrix.runtime }} + --features any,mssql,macros,migrate,all-types,runtime-${{ matrix.runtime }}-${{ matrix.tls }} env: DATABASE_URL: mssql://sa:Password123!@localhost/sqlx diff --git a/sqlx-core/src/postgres/connection/mod.rs b/sqlx-core/src/postgres/connection/mod.rs index e0238f5911..ea49a532cf 100644 --- a/sqlx-core/src/postgres/connection/mod.rs +++ b/sqlx-core/src/postgres/connection/mod.rs @@ -177,3 +177,20 @@ impl Connection for PgConnection { !self.stream.wbuf.is_empty() } } + +pub trait PgConnectionInfo { + /// the version number of the server in `libpq` format + fn server_version_num(&self) -> Option; +} + +impl PgConnectionInfo for PgConnection { + fn server_version_num(&self) -> Option { + self.stream.server_version_num + } +} + +impl PgConnectionInfo for crate::pool::PoolConnection { + fn server_version_num(&self) -> Option { + self.stream.server_version_num + } +} diff --git a/sqlx-core/src/postgres/connection/stream.rs b/sqlx-core/src/postgres/connection/stream.rs index f61bda62b8..950d8d0969 100644 --- a/sqlx-core/src/postgres/connection/stream.rs +++ b/sqlx-core/src/postgres/connection/stream.rs @@ -1,4 +1,6 @@ +use std::collections::BTreeMap; use std::ops::{Deref, DerefMut}; +use std::str::FromStr; use bytes::{Buf, Bytes}; use futures_channel::mpsc::UnboundedSender; @@ -8,7 +10,7 @@ use log::Level; use crate::error::Error; use crate::io::{BufStream, Decode, Encode}; use crate::net::{MaybeTlsStream, Socket}; -use crate::postgres::message::{Message, MessageFormat, Notice, Notification}; +use crate::postgres::message::{Message, MessageFormat, Notice, Notification, ParameterStatus}; use crate::postgres::{PgConnectOptions, PgDatabaseError, PgSeverity}; // the stream is a separate type from the connection to uphold the invariant where an instantiated @@ -27,6 +29,10 @@ pub struct PgStream { // this is set when creating a PgListener and only written to if that listener is // re-used for query execution in-between receiving messages pub(crate) notifications: Option>, + + pub(crate) parameter_statuses: BTreeMap, + + pub(crate) server_version_num: Option, } impl PgStream { @@ -41,6 +47,8 @@ impl PgStream { Ok(Self { inner, notifications: None, + parameter_statuses: BTreeMap::default(), + server_version_num: None, }) } @@ -108,7 +116,18 @@ impl PgStream { // informs the frontend about the current (initial) // setting of backend parameters - // we currently have no use for that data so we promptly ignore this message + let ParameterStatus { name, value } = message.decode()?; + // TODO: handle `client_encoding`, `DateStyle` change + + match name.as_str() { + "server_version" => { + self.server_version_num = parse_server_version(&value); + } + _ => { + self.parameter_statuses.insert(name, value); + } + } + continue; } @@ -165,3 +184,68 @@ impl DerefMut for PgStream { &mut self.inner } } + +// reference: +// https://github.com/postgres/postgres/blob/6feebcb6b44631c3dc435e971bd80c2dd218a5ab/src/interfaces/libpq/fe-exec.c#L1030-L1065 +fn parse_server_version(s: &str) -> Option { + let mut parts = Vec::::with_capacity(3); + + let mut from = 0; + let mut chs = s.char_indices().peekable(); + while let Some((i, ch)) = chs.next() { + match ch { + '.' => { + if let Ok(num) = u32::from_str(&s[from..i]) { + parts.push(num); + from = i + 1; + } else { + break; + } + } + _ if ch.is_digit(10) => { + if chs.peek().is_none() { + if let Ok(num) = u32::from_str(&s[from..]) { + parts.push(num); + } + break; + } + } + _ => { + if let Ok(num) = u32::from_str(&s[from..i]) { + parts.push(num); + } + break; + } + }; + } + + let version_num = match parts.as_slice() { + [major, minor, rev] => (100 * major + minor) * 100 + rev, + [major, minor] if *major >= 10 => 100 * 100 * major + minor, + [major, minor] => (100 * major + minor) * 100, + [major] => 100 * 100 * major, + _ => return None, + }; + + Some(version_num) +} + +#[cfg(test)] +mod tests { + use super::parse_server_version; + + #[test] + fn test_parse_server_version_num() { + // old style + assert_eq!(parse_server_version("9.6.1"), Some(90601)); + // new style + assert_eq!(parse_server_version("10.1"), Some(100001)); + // old style without minor version + assert_eq!(parse_server_version("9.6devel"), Some(90600)); + // new style without minor version, e.g. */ + assert_eq!(parse_server_version("10devel"), Some(100000)); + assert_eq!(parse_server_version("13devel87"), Some(130000)); + // unknown + assert_eq!(parse_server_version("unknown"), None); + } +} diff --git a/sqlx-core/src/postgres/message/mod.rs b/sqlx-core/src/postgres/message/mod.rs index 6c8d1f3023..91aa578911 100644 --- a/sqlx-core/src/postgres/message/mod.rs +++ b/sqlx-core/src/postgres/message/mod.rs @@ -14,6 +14,7 @@ mod execute; mod flush; mod notification; mod parameter_description; +mod parameter_status; mod parse; mod password; mod query; @@ -37,6 +38,7 @@ pub use execute::Execute; pub use flush::Flush; pub use notification::Notification; pub use parameter_description::ParameterDescription; +pub use parameter_status::ParameterStatus; pub use parse::Parse; pub use password::Password; pub use query::Query; diff --git a/sqlx-core/src/postgres/message/parameter_status.rs b/sqlx-core/src/postgres/message/parameter_status.rs new file mode 100644 index 0000000000..d5428d1883 --- /dev/null +++ b/sqlx-core/src/postgres/message/parameter_status.rs @@ -0,0 +1,49 @@ +use bytes::Bytes; + +use crate::error::Error; +use crate::io::{BufExt, Decode}; + +#[derive(Debug)] +pub struct ParameterStatus { + pub name: String, + pub value: String, +} + +impl Decode<'_> for ParameterStatus { + fn decode_with(mut buf: Bytes, _: ()) -> Result { + let name = buf.get_str_nul()?; + let value = buf.get_str_nul()?; + + Ok(Self { name, value }) + } +} + +#[test] +fn test_decode_parameter_status() { + const DATA: &[u8] = b"client_encoding\x00UTF8\x00"; + + let m = ParameterStatus::decode(DATA.into()).unwrap(); + + assert_eq!(&m.name, "client_encoding"); + assert_eq!(&m.value, "UTF8") +} + +#[test] +fn test_decode_empty_parameter_status() { + const DATA: &[u8] = b"\x00\x00"; + + let m = ParameterStatus::decode(DATA.into()).unwrap(); + + assert!(m.name.is_empty()); + assert!(m.value.is_empty()); +} + +#[cfg(all(test, not(debug_assertions)))] +#[bench] +fn bench_decode_parameter_status(b: &mut test::Bencher) { + const DATA: &[u8] = b"client_encoding\x00UTF8\x00"; + + b.iter(|| { + ParameterStatus::decode(test::black_box(Bytes::from_static(DATA))).unwrap(); + }); +} diff --git a/sqlx-core/src/postgres/mod.rs b/sqlx-core/src/postgres/mod.rs index bd72bc6e86..ed6af06a33 100644 --- a/sqlx-core/src/postgres/mod.rs +++ b/sqlx-core/src/postgres/mod.rs @@ -22,7 +22,7 @@ mod migrate; pub use arguments::{PgArgumentBuffer, PgArguments}; pub use column::PgColumn; -pub use connection::PgConnection; +pub use connection::{PgConnection, PgConnectionInfo}; pub use database::Postgres; pub use error::{PgDatabaseError, PgErrorPosition}; pub use listener::{PgListener, PgNotification}; diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000000..bc2dc2327c --- /dev/null +++ b/tests/README.md @@ -0,0 +1,18 @@ + + +### Running Tests +SQLx uses docker to run many compatible database systems for integration testing. You'll need to [install docker](https://docs.docker.com/engine/) to run the full suite. You can validate your docker installation with: + + $ docker run hello-world + +Start the databases with `docker-compose` before running tests: + + $ docker-compose up + +Run all tests against all supported databases using: + + $ ./x.py + +If you see test failures, or want to run a more specific set of tests against a specific database, you can specify both the features to be tests and the DATABASE_URL. e.g. + + $ DATABASE_URL=mysql://root:password@127.0.0.1:49183/sqlx cargo test --no-default-features --features macros,offline,any,all-types,mysql,runtime-async-std-native-tls diff --git a/tests/docker-compose.yml b/tests/docker-compose.yml index 4d0e39848e..30203b17e7 100644 --- a/tests/docker-compose.yml +++ b/tests/docker-compose.yml @@ -2,7 +2,7 @@ version: "3" services: # - # MySQL 5.6.x, 5.7.x, 8.x + # MySQL 8.x, 5.7.x, 5.6.x # https://www.mysql.com/support/supportedplatforms/database.html # @@ -40,12 +40,12 @@ services: MYSQL_DATABASE: sqlx # - # MariaDB 10.5, 10.4, 10.3, 10.2, 10.1 + # MariaDB 10.6, 10.5, 10.4, 10.3, 10.2 # https://mariadb.org/about/#maintenance-policy # - mariadb_10_5: - image: mariadb:10.5 + mariadb_10_6: + image: mariadb:10.6 volumes: - "./mysql/setup.sql:/docker-entrypoint-initdb.d/setup.sql" ports: @@ -54,8 +54,8 @@ services: MYSQL_ROOT_PASSWORD: password MYSQL_DATABASE: sqlx - mariadb_10_4: - image: mariadb:10.4 + mariadb_10_5: + image: mariadb:10.5 volumes: - "./mysql/setup.sql:/docker-entrypoint-initdb.d/setup.sql" ports: @@ -64,8 +64,8 @@ services: MYSQL_ROOT_PASSWORD: password MYSQL_DATABASE: sqlx - mariadb_10_3: - image: mariadb:10.3 + mariadb_10_4: + image: mariadb:10.4 volumes: - "./mysql/setup.sql:/docker-entrypoint-initdb.d/setup.sql" ports: @@ -74,8 +74,8 @@ services: MYSQL_ROOT_PASSWORD: password MYSQL_DATABASE: sqlx - mariadb_10_2: - image: mariadb:10.2 + mariadb_10_3: + image: mariadb:10.3 volumes: - "./mysql/setup.sql:/docker-entrypoint-initdb.d/setup.sql" ports: @@ -84,8 +84,8 @@ services: MYSQL_ROOT_PASSWORD: password MYSQL_DATABASE: sqlx - mariadb_10_1: - image: mariadb:10.1 + mariadb_10_2: + image: mariadb:10.2 volumes: - "./mysql/setup.sql:/docker-entrypoint-initdb.d/setup.sql" ports: @@ -95,16 +95,35 @@ services: MYSQL_DATABASE: sqlx # - # PostgreSQL 12.x, 10.x, 9.6.x, 9.5.x + # PostgreSQL 13.x, 12.x, 11.x 10.x, 9.6.x # https://www.postgresql.org/support/versioning/ # + postgres_14: + build: + context: . + dockerfile: postgres/Dockerfile + args: + VERSION: 14beta2 + ports: + - 5432 + environment: + POSTGRES_DB: sqlx + POSTGRES_USER: postgres + POSTGRES_PASSWORD: password + POSTGRES_HOST_AUTH_METHOD: scram-sha-256 + POSTGRES_INITDB_ARGS: --auth-host=scram-sha-256 + volumes: + - "./postgres/setup.sql:/docker-entrypoint-initdb.d/setup.sql" + command: > + -c ssl=on -c ssl_cert_file=/var/lib/postgresql/server.crt -c ssl_key_file=/var/lib/postgresql/server.key + postgres_13: build: context: . dockerfile: postgres/Dockerfile args: - VERSION: 13-beta1 + VERSION: 13 ports: - 5432 environment: @@ -123,7 +142,7 @@ services: context: . dockerfile: postgres/Dockerfile args: - VERSION: 12.3 + VERSION: 12 ports: - 5432 environment: @@ -137,55 +156,57 @@ services: command: > -c ssl=on -c ssl_cert_file=/var/lib/postgresql/server.crt -c ssl_key_file=/var/lib/postgresql/server.key - postgres_10: + postgres_11: build: context: . dockerfile: postgres/Dockerfile args: - VERSION: 10.13 + VERSION: 11 ports: - 5432 environment: POSTGRES_DB: sqlx POSTGRES_USER: postgres POSTGRES_PASSWORD: password - POSTGRES_HOST_AUTH_METHOD: trust + POSTGRES_HOST_AUTH_METHOD: scram-sha-256 + POSTGRES_INITDB_ARGS: --auth-host=scram-sha-256 volumes: - "./postgres/setup.sql:/docker-entrypoint-initdb.d/setup.sql" command: > -c ssl=on -c ssl_cert_file=/var/lib/postgresql/server.crt -c ssl_key_file=/var/lib/postgresql/server.key - postgres_9_6: + postgres_10: build: context: . dockerfile: postgres/Dockerfile args: - VERSION: 9.6 + VERSION: 10 ports: - 5432 environment: POSTGRES_DB: sqlx POSTGRES_USER: postgres POSTGRES_PASSWORD: password - POSTGRES_HOST_AUTH_METHOD: md5 + POSTGRES_HOST_AUTH_METHOD: scram-sha-256 + POSTGRES_INITDB_ARGS: --auth-host=scram-sha-256 volumes: - "./postgres/setup.sql:/docker-entrypoint-initdb.d/setup.sql" command: > -c ssl=on -c ssl_cert_file=/var/lib/postgresql/server.crt -c ssl_key_file=/var/lib/postgresql/server.key - postgres_9_5: + postgres_9_6: build: context: . dockerfile: postgres/Dockerfile args: - VERSION: 9.5 + VERSION: 9.6 ports: - 5432 environment: POSTGRES_DB: sqlx POSTGRES_USER: postgres POSTGRES_PASSWORD: password - POSTGRES_HOST_AUTH_METHOD: password + POSTGRES_HOST_AUTH_METHOD: md5 volumes: - "./postgres/setup.sql:/docker-entrypoint-initdb.d/setup.sql" command: > @@ -205,17 +226,17 @@ services: ports: - 1433 environment: - ACCEPT_EULA: Y + ACCEPT_EULA: "Y" SA_PASSWORD: Password123! mssql_2017: build: context: . - dockerfile: mssql/Dockerfile + dockerfile: mssql/mssql-2017.dockerfile args: VERSION: 2017-latest environment: - ACCEPT_EULA: Y + ACCEPT_EULA: "Y" SA_PASSWORD: Password123! # diff --git a/tests/docker.py b/tests/docker.py index b4cdadd650..e664c38c6e 100644 --- a/tests/docker.py +++ b/tests/docker.py @@ -1,4 +1,5 @@ import subprocess +import sys import time from os import path diff --git a/tests/mssql/mssql-2017.dockerfile b/tests/mssql/mssql-2017.dockerfile new file mode 100644 index 0000000000..a2e0b58dae --- /dev/null +++ b/tests/mssql/mssql-2017.dockerfile @@ -0,0 +1,19 @@ +# vim: set ft=dockerfile: +ARG VERSION +FROM mcr.microsoft.com/mssql/server:${VERSION} + +# Create a config directory +RUN mkdir -p /usr/config +WORKDIR /usr/config + +# Bundle config source +COPY mssql/entrypoint.sh /usr/config/entrypoint.sh +COPY mssql/configure-db.sh /usr/config/configure-db.sh +COPY mssql/setup.sql /usr/config/setup.sql + +# Grant permissions for to our scripts to be executable +USER root +RUN chmod +x /usr/config/entrypoint.sh +RUN chmod +x /usr/config/configure-db.sh + +ENTRYPOINT ["/usr/config/entrypoint.sh"] diff --git a/tests/mysql/macros.rs b/tests/mysql/macros.rs index 9b9b436c98..80eb1b2e91 100644 --- a/tests/mysql/macros.rs +++ b/tests/mysql/macros.rs @@ -188,12 +188,13 @@ async fn test_column_override_nullable() -> anyhow::Result<()> { async fn with_test_row<'a>( conn: &'a mut MySqlConnection, -) -> anyhow::Result> { +) -> anyhow::Result<(Transaction<'a, MySql>, MyInt)> { let mut transaction = conn.begin().await?; - sqlx::query!("INSERT INTO tweet(id, text, owner_id) VALUES (1, '#sqlx is pretty cool!', 1)") + let id = sqlx::query!("INSERT INTO tweet(text, owner_id) VALUES ('#sqlx is pretty cool!', 1)") .execute(&mut transaction) - .await?; - Ok(transaction) + .await? + .last_insert_id(); + Ok((transaction, MyInt(id as i64))) } #[derive(PartialEq, Eq, Debug, sqlx::Type)] @@ -211,13 +212,13 @@ struct OptionalRecord { #[sqlx_macros::test] async fn test_column_override_wildcard() -> anyhow::Result<()> { let mut conn = new::().await?; - let mut conn = with_test_row(&mut conn).await?; + let (mut conn, id) = with_test_row(&mut conn).await?; let record = sqlx::query_as!(Record, "select id as `id: _` from tweet") .fetch_one(&mut conn) .await?; - assert_eq!(record.id, MyInt(1)); + assert_eq!(record.id, id); // this syntax is also useful for expressions let record = sqlx::query_as!(Record, "select * from (select 1 as `id: _`) records") @@ -238,7 +239,7 @@ async fn test_column_override_wildcard() -> anyhow::Result<()> { #[sqlx_macros::test] async fn test_column_override_wildcard_not_null() -> anyhow::Result<()> { let mut conn = new::().await?; - let mut conn = with_test_row(&mut conn).await?; + let (mut conn, _) = with_test_row(&mut conn).await?; let record = sqlx::query_as!(Record, "select owner_id as `id!: _` from tweet") .fetch_one(&mut conn) @@ -252,13 +253,13 @@ async fn test_column_override_wildcard_not_null() -> anyhow::Result<()> { #[sqlx_macros::test] async fn test_column_override_wildcard_nullable() -> anyhow::Result<()> { let mut conn = new::().await?; - let mut conn = with_test_row(&mut conn).await?; + let (mut conn, id) = with_test_row(&mut conn).await?; let record = sqlx::query_as!(OptionalRecord, "select id as `id?: _` from tweet") .fetch_one(&mut conn) .await?; - assert_eq!(record.id, Some(MyInt(1))); + assert_eq!(record.id, Some(id)); Ok(()) } @@ -266,13 +267,13 @@ async fn test_column_override_wildcard_nullable() -> anyhow::Result<()> { #[sqlx_macros::test] async fn test_column_override_exact() -> anyhow::Result<()> { let mut conn = new::().await?; - let mut conn = with_test_row(&mut conn).await?; + let (mut conn, id) = with_test_row(&mut conn).await?; let record = sqlx::query!("select id as `id: MyInt` from tweet") .fetch_one(&mut conn) .await?; - assert_eq!(record.id, MyInt(1)); + assert_eq!(record.id, id); // we can also support this syntax for expressions let record = sqlx::query!("select * from (select 1 as `id: MyInt`) records") @@ -293,7 +294,7 @@ async fn test_column_override_exact() -> anyhow::Result<()> { #[sqlx_macros::test] async fn test_column_override_exact_not_null() -> anyhow::Result<()> { let mut conn = new::().await?; - let mut conn = with_test_row(&mut conn).await?; + let (mut conn, _) = with_test_row(&mut conn).await?; let record = sqlx::query!("select owner_id as `id!: MyInt` from tweet") .fetch_one(&mut conn) @@ -307,13 +308,13 @@ async fn test_column_override_exact_not_null() -> anyhow::Result<()> { #[sqlx_macros::test] async fn test_column_override_exact_nullable() -> anyhow::Result<()> { let mut conn = new::().await?; - let mut conn = with_test_row(&mut conn).await?; + let (mut conn, id) = with_test_row(&mut conn).await?; let record = sqlx::query!("select id as `id?: MyInt` from tweet") .fetch_one(&mut conn) .await?; - assert_eq!(record.id, Some(MyInt(1))); + assert_eq!(record.id, Some(id)); Ok(()) } diff --git a/tests/postgres/postgres.rs b/tests/postgres/postgres.rs index 5688aded5f..4f71cde7c2 100644 --- a/tests/postgres/postgres.rs +++ b/tests/postgres/postgres.rs @@ -2,7 +2,7 @@ use futures::TryStreamExt; use sqlx::postgres::{ PgConnectOptions, PgConnection, PgDatabaseError, PgErrorPosition, PgSeverity, }; -use sqlx::postgres::{PgPoolOptions, PgRow, Postgres}; +use sqlx::postgres::{PgConnectionInfo, PgPoolOptions, PgRow, Postgres}; use sqlx::{Column, Connection, Executor, Row, Statement, TypeInfo}; use sqlx_test::{new, setup_if_needed}; use std::env; @@ -968,6 +968,30 @@ async fn test_listener_cleanup() -> anyhow::Result<()> { #[sqlx_macros::test] async fn it_supports_domain_types_in_composite_domain_types() -> anyhow::Result<()> { + // Only supported in Postgres 11+ + let mut conn = new::().await?; + if matches!(conn.server_version_num(), Some(version) if version < 110000) { + return Ok(()); + } + + conn.execute( + r#" +DROP TABLE IF EXISTS heating_bills; +DROP DOMAIN IF EXISTS winter_year_month; +DROP TYPE IF EXISTS year_month; +DROP DOMAIN IF EXISTS month_id; + +CREATE DOMAIN month_id AS INT2 CHECK (1 <= value AND value <= 12); +CREATE TYPE year_month AS (year INT4, month month_id); +CREATE DOMAIN winter_year_month AS year_month CHECK ((value).month <= 3); +CREATE TABLE heating_bills ( + month winter_year_month NOT NULL PRIMARY KEY, + cost INT4 NOT NULL +); + "#, + ) + .await?; + #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] struct MonthId(i16); @@ -1039,41 +1063,44 @@ async fn it_supports_domain_types_in_composite_domain_types() -> anyhow::Result< sqlx::encode::IsNull::No } } - let mut conn = new::().await?; - { - let result = sqlx::query("DELETE FROM heating_bills;") + let result = sqlx::query("DELETE FROM heating_bills;") + .execute(&mut conn) + .await; + + let result = result.unwrap(); + assert_eq!(result.rows_affected(), 0); + + let result = + sqlx::query("INSERT INTO heating_bills(month, cost) VALUES($1::winter_year_month, 100);") + .bind(WinterYearMonth { + year: 2021, + month: MonthId(1), + }) .execute(&mut conn) .await; - let result = result.unwrap(); - assert_eq!(result.rows_affected(), 1); - } + let result = result.unwrap(); + assert_eq!(result.rows_affected(), 1); - { - let result = sqlx::query( - "INSERT INTO heating_bills(month, cost) VALUES($1::winter_year_month, 100);", - ) - .bind(WinterYearMonth { - year: 2021, - month: MonthId(1), - }) + let result = sqlx::query("DELETE FROM heating_bills;") .execute(&mut conn) .await; - let result = result.unwrap(); - assert_eq!(result.rows_affected(), 1); - } + let result = result.unwrap(); + assert_eq!(result.rows_affected(), 1); - { - let result = sqlx::query("DELETE FROM heating_bills;") - .execute(&mut conn) - .await; + Ok(()) +} - let result = result.unwrap(); - assert_eq!(result.rows_affected(), 1); - } +#[sqlx_macros::test] +async fn test_pg_server_num() -> anyhow::Result<()> { + use sqlx::postgres::PgConnectionInfo; + + let conn = new::().await?; + + assert!(conn.server_version_num().is_some()); Ok(()) } diff --git a/tests/postgres/setup.sql b/tests/postgres/setup.sql index d013d43400..9818d139ba 100644 --- a/tests/postgres/setup.sql +++ b/tests/postgres/setup.sql @@ -29,11 +29,3 @@ CREATE TABLE products ( name TEXT, price NUMERIC CHECK (price > 0) ); - -CREATE DOMAIN month_id AS INT2 CHECK (1 <= value AND value <= 12); -CREATE TYPE year_month AS (year INT4, month month_id); -CREATE DOMAIN winter_year_month AS year_month CHECK ((value).month <= 3); -CREATE TABLE heating_bills ( - month winter_year_month NOT NULL PRIMARY KEY, - cost INT4 NOT NULL -); diff --git a/tests/postgres/types.rs b/tests/postgres/types.rs index 5ae5bd217e..932f6b81f3 100644 --- a/tests/postgres/types.rs +++ b/tests/postgres/types.rs @@ -211,7 +211,7 @@ test_type!(ipnetwork_vec>(Postgres, #[cfg(feature = "mac_address")] test_type!(mac_address_vec>(Postgres, - "'{01:02:03:04:05:06,FF:FF:FF:FF:FF:FF}'::inet[]" + "'{01:02:03:04:05:06,FF:FF:FF:FF:FF:FF}'::macaddr[]" == vec![ "01:02:03:04:05:06".parse::().unwrap(), "FF:FF:FF:FF:FF:FF".parse::().unwrap() diff --git a/tests/sqlite/sqlite.db b/tests/sqlite/sqlite.db index 49913441dfd6583610d79217efdf7676d7a20153..d693511079064edcd5e0a6f55efedafe17ab855b 100644 GIT binary patch delta 1062 zcmZY7KS-NF7{~FLH;{UFKG%2Ypi2e?6OBnUe~g;=`adQ^2ImeA6`d5!(&>%5iO|8e z;G%+y4uYD&K^FyG1pf>|=^~1Yw796?VzEDy;gtuDFUN-mj^nPl>MpLjw>)7ZCnguY z;e4j%gL+ao>P+pc4fRvasVVhYWmQ7eD^vcHf8;NDBzNSBT##Som>iLo49Z&R6_4Ut zoQgfME`EqvF}Xc+QEi(!!{slGPfYv%uTfv&%XGoaavJ88;WWf)kkf#J?8=e-4y#Zf zr(RAyoGeb=oYHS(zq@#fk9BeC6<0%eLg|DB(YH8LvIrS9BDj^-D=Rz9PXI;ulDpc}ML*Xd8%)Jl7`n_lRd z9_XI#=$5YNH(k;>ozO8I&_3&WTg>)hvND^sB5=b10AyK5wW5irlt=LA8FcLyqkRZ~GG$8@R Lk2IFj`9td!C$tWy delta 45 zcmZozz|^pSX#b!MEAY|BwR!G0qL! diff --git a/tests/x.py b/tests/x.py index 2133beefe4..ed1c7f512a 100755 --- a/tests/x.py +++ b/tests/x.py @@ -88,116 +88,101 @@ def run(command, comment=None, env=None, service=None, tag=None, args=None, data # check # -run("cargo c", comment="check with a default set of features", tag="check") - -run( - "cargo c --no-default-features --features runtime-async-std-native-tls,all-databases,all-types,offline,macros", - comment="check with async-std", - tag="check_async_std" -) - -run( - "cargo c --no-default-features --features runtime-tokio-native-tls,all-databases,all-types,offline,macros", - comment="check with tokio", - tag="check_tokio" -) - -run( - "cargo c --no-default-features --features runtime-actix-native-tls,all-databases,all-types,offline,macros", - comment="check with actix", - tag="check_actix" -) +for runtime in ["async-std", "tokio", "actix"]: + for tls in ["native-tls", "rustls"]: + run( + f"cargo c --no-default-features --features all-databases,all-types,offline,macros,runtime-{runtime}-{tls}", + comment="check with async-std", + tag=f"check_{runtime}_{tls}" + ) # # unit test # -run( - "cargo test --manifest-path sqlx-core/Cargo.toml --features all-databases,all-types", - comment="unit test core", - tag="unit" -) - -run( - "cargo test --no-default-features --manifest-path sqlx-core/Cargo.toml --features all-databases,all-types,runtime-tokio-native-tls", - comment="unit test core", - tag="unit_tokio" -) +for runtime in ["async-std", "tokio", "actix"]: + for tls in ["native-tls", "rustls"]: + run( + f"cargo test --no-default-features --manifest-path sqlx-core/Cargo.toml --features all-databases,all-types,runtime-{runtime}-{tls}", + comment="unit test core", + tag=f"unit_{runtime}_{tls}" + ) # # integration tests # for runtime in ["async-std", "tokio", "actix"]: + for tls in ["native-tls", "rustls"]: - # - # sqlite - # - - run( - f"cargo test --no-default-features --features macros,offline,any,all-types,sqlite,runtime-{runtime}-native-tls", - comment=f"test sqlite", - service="sqlite", - tag=f"sqlite" if runtime == "async-std" else f"sqlite_{runtime}", - ) - - # - # postgres - # - - for version in ["12", "10", "9_6", "9_5"]: - run( - f"cargo test --no-default-features --features macros,offline,any,all-types,postgres,runtime-{runtime}-native-tls", - comment=f"test postgres {version}", - service=f"postgres_{version}", - tag=f"postgres_{version}" if runtime == "async-std" else f"postgres_{version}_{runtime}", - ) - - # +ssl - for version in ["12", "10", "9_6", "9_5"]: - run( - f"cargo test --no-default-features --features macros,offline,any,all-types,postgres,runtime-{runtime}-native-tls", - comment=f"test postgres {version} ssl", - database_url_args="sslmode=verify-ca&sslrootcert=.%2Ftests%2Fcerts%2Fca.crt", - service=f"postgres_{version}", - tag=f"postgres_{version}_ssl" if runtime == "async-std" else f"postgres_{version}_ssl_{runtime}", - ) - - # - # mysql - # - - for version in ["8", "5_7", "5_6"]: - run( - f"cargo test --no-default-features --features macros,offline,any,all-types,mysql,runtime-{runtime}-native-tls", - comment=f"test mysql {version}", - service=f"mysql_{version}", - tag=f"mysql_{version}" if runtime == "async-std" else f"mysql_{version}_{runtime}", - ) - - # - # mariadb - # + # + # sqlite + # - for version in ["10_5", "10_4", "10_3", "10_2", "10_1"]: run( - f"cargo test --no-default-features --features macros,offline,any,all-types,mysql,runtime-{runtime}-native-tls", - comment=f"test mariadb {version}", - service=f"mariadb_{version}", - tag=f"mariadb_{version}" if runtime == "async-std" else f"mariadb_{version}_{runtime}", + f"cargo test --no-default-features --features macros,offline,any,all-types,sqlite,runtime-{runtime}-native-tls", + comment=f"test sqlite", + service="sqlite", + tag=f"sqlite" if runtime == "async-std" else f"sqlite_{runtime}", ) - # - # mssql - # - - for version in ["2019"]: - run( - f"cargo test --no-default-features --features macros,offline,any,all-types,mssql,runtime-{runtime}-native-tls", - comment=f"test mssql {version}", - service=f"mssql_{version}", - tag=f"mssql_{version}" if runtime == "async-std" else f"mssql_{version}_{runtime}", - ) + # + # postgres + # + + for version in ["13", "12", "11", "10", "9_6"]: + run( + f"cargo test --no-default-features --features macros,offline,any,all-types,postgres,runtime-{runtime}-native-tls", + comment=f"test postgres {version}", + service=f"postgres_{version}", + tag=f"postgres_{version}" if runtime == "async-std" else f"postgres_{version}_{runtime}", + ) + + ## +ssl + for version in ["13", "12", "11", "10", "9_6"]: + run( + f"cargo test --no-default-features --features macros,offline,any,all-types,postgres,runtime-{runtime}-native-tls", + comment=f"test postgres {version} ssl", + database_url_args="sslmode=verify-ca&sslrootcert=.%2Ftests%2Fcerts%2Fca.crt", + service=f"postgres_{version}", + tag=f"postgres_{version}_ssl" if runtime == "async-std" else f"postgres_{version}_ssl_{runtime}", + ) + + # + # mysql + # + + for version in ["8", "5_7", "5_6"]: + run( + f"cargo test --no-default-features --features macros,offline,any,all-types,mysql,runtime-{runtime}-native-tls", + comment=f"test mysql {version}", + service=f"mysql_{version}", + tag=f"mysql_{version}" if runtime == "async-std" else f"mysql_{version}_{runtime}", + ) + + # + # mariadb + # + + for version in ["10_6", "10_5", "10_4", "10_3", "10_2"]: + run( + f"cargo test --no-default-features --features macros,offline,any,all-types,mysql,runtime-{runtime}-native-tls", + comment=f"test mariadb {version}", + service=f"mariadb_{version}", + tag=f"mariadb_{version}" if runtime == "async-std" else f"mariadb_{version}_{runtime}", + ) + + # + # mssql + # + + for version in ["2019", "2017"]: + run( + f"cargo test --no-default-features --features macros,offline,any,all-types,mssql,runtime-{runtime}-native-tls", + comment=f"test mssql {version}", + service=f"mssql_{version}", + tag=f"mssql_{version}" if runtime == "async-std" else f"mssql_{version}_{runtime}", + ) # TODO: Use [grcov] if available # ~/.cargo/bin/grcov tests/.cache/target/debug -s sqlx-core/ -t html --llvm --branch -o ./target/debug/coverage From 55c603e9e709ae62f3ef80ab7656efc5b41a6ffb Mon Sep 17 00:00:00 2001 From: Atkins Date: Sat, 7 Aug 2021 16:48:10 +0800 Subject: [PATCH 22/28] build(deps): bump git2 from 0.13.19 to 0.13.20 (#1362) Signed-off-by: Atkins Chang --- Cargo.lock | 8 ++++---- sqlx-core/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 817f5ffdfe..fc90d79c79 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -984,9 +984,9 @@ dependencies = [ [[package]] name = "git2" -version = "0.13.19" +version = "0.13.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17929de7239dea9f68aa14f94b2ab4974e7b24c1314275ffcc12a7758172fa18" +checksum = "d9831e983241f8c5591ed53f17d874833e2fa82cac2625f3888c50cbfe136cba" dependencies = [ "bitflags", "libc", @@ -1225,9 +1225,9 @@ checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e" [[package]] name = "libgit2-sys" -version = "0.12.20+1.1.0" +version = "0.12.21+1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e2f09917e00b9ad194ae72072bb5ada2cca16d8171a43e91ddba2afbb02664b" +checksum = "86271bacd72b2b9e854c3dcfb82efd538f15f870e4c11af66900effb462f6825" dependencies = [ "cc", "libc", diff --git a/sqlx-core/Cargo.toml b/sqlx-core/Cargo.toml index 468a77a528..e4942b2cf9 100644 --- a/sqlx-core/Cargo.toml +++ b/sqlx-core/Cargo.toml @@ -161,5 +161,5 @@ webpki-roots = { version = "0.21.0", optional = true } whoami = "1.0.1" stringprep = "0.1.2" bstr = { version = "0.2.14", default-features = false, features = ["std"], optional = true } -git2 = { version = "0.13.12", default-features = false, optional = true } +git2 = { version = "0.13.20", default-features = false, optional = true } hashlink = "0.7.0" From 71388a7ef204446eab7be1d9bc4ff33956f92fa9 Mon Sep 17 00:00:00 2001 From: Austin Bonander Date: Mon, 16 Aug 2021 14:39:45 -0700 Subject: [PATCH 23/28] sqlite: fix a couple segfaults (#1351) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * sqlite: use Arc instead of Copy-able StatementHandle This guarantees that StatementHandle is never used after calling `sqlite3_finalize`. Now `sqlite3_finalize` is only called when StatementHandle is dropped. (cherry picked from commit 5eebc05dc371512bae14cf94498087bdadeddec0) * sqlite: use Weak poiter to StatementHandle in the worker Otherwise some tests fail to close connection. (cherry picked from commit 5461eeeee30772e54e8874f60805b04bdc989278) * Fix segfault due to race condition in sqlite (#1300) (cherry picked from commit bb62cf767e3e44896bf4607da8e18237241ed170) * fix(sqlite): run `sqlite3_reset()` in `StatementWorker` this avoids possible race conditions without using a mutex * fix(sqlite): have `StatementWorker` keep a strong ref to `ConnectionHandle` this should prevent the database handle from being finalized before all statement handles have been finalized * fix(sqlite/test): make `concurrent_resets_dont_segfault` runtime-agnostic Co-authored-by: link2xt Co-authored-by: Adam Cigánek --- sqlx-core/src/sqlite/connection/describe.rs | 2 +- sqlx-core/src/sqlite/connection/establish.rs | 6 +- sqlx-core/src/sqlite/connection/executor.rs | 46 +++++----- sqlx-core/src/sqlite/connection/handle.rs | 35 ++++++-- sqlx-core/src/sqlite/connection/mod.rs | 2 +- sqlx-core/src/sqlite/row.rs | 6 +- sqlx-core/src/sqlite/statement/handle.rs | 50 ++++++++--- sqlx-core/src/sqlite/statement/virtual.rs | 43 ++++------ sqlx-core/src/sqlite/statement/worker.rs | 83 +++++++++++++++++-- tests/sqlite/sqlite.db | Bin 36864 -> 36864 bytes tests/sqlite/sqlite.rs | 31 +++++++ 11 files changed, 224 insertions(+), 80 deletions(-) diff --git a/sqlx-core/src/sqlite/connection/describe.rs b/sqlx-core/src/sqlite/connection/describe.rs index 8bc9f9ceed..cb86e7e024 100644 --- a/sqlx-core/src/sqlite/connection/describe.rs +++ b/sqlx-core/src/sqlite/connection/describe.rs @@ -64,7 +64,7 @@ pub(super) fn describe<'c: 'e, 'q: 'e, 'e>( // fallback to [column_decltype] if !stepped && stmt.read_only() { stepped = true; - let _ = conn.worker.step(*stmt).await; + let _ = conn.worker.step(stmt).await; } let mut ty = stmt.column_type_info(col); diff --git a/sqlx-core/src/sqlite/connection/establish.rs b/sqlx-core/src/sqlite/connection/establish.rs index 20206a4388..77f10db369 100644 --- a/sqlx-core/src/sqlite/connection/establish.rs +++ b/sqlx-core/src/sqlite/connection/establish.rs @@ -87,7 +87,7 @@ pub(crate) async fn establish(options: &SqliteConnectOptions) -> Result Result Result( +async fn prepare<'a>( + worker: &mut StatementWorker, statements: &'a mut StatementCache, statement: &'a mut Option, query: &str, @@ -39,7 +40,7 @@ fn prepare<'a>( if exists { // as this statement has been executed before, we reset before continuing // this also causes any rows that are from the statement to be inflated - statement.reset(); + statement.reset(worker).await?; } Ok(statement) @@ -61,19 +62,25 @@ fn bind( /// A structure holding sqlite statement handle and resetting the /// statement when it is dropped. -struct StatementResetter { - handle: StatementHandle, +struct StatementResetter<'a> { + handle: Arc, + worker: &'a mut StatementWorker, } -impl StatementResetter { - fn new(handle: StatementHandle) -> Self { - Self { handle } +impl<'a> StatementResetter<'a> { + fn new(worker: &'a mut StatementWorker, handle: &Arc) -> Self { + Self { + worker, + handle: Arc::clone(handle), + } } } -impl Drop for StatementResetter { +impl Drop for StatementResetter<'_> { fn drop(&mut self) { - self.handle.reset(); + // this method is designed to eagerly send the reset command + // so we don't need to await or spawn it + let _ = self.worker.reset(&self.handle); } } @@ -103,7 +110,7 @@ impl<'c> Executor<'c> for &'c mut SqliteConnection { } = self; // prepare statement object (or checkout from cache) - let stmt = prepare(statements, statement, sql, persistent)?; + let stmt = prepare(worker, statements, statement, sql, persistent).await?; // keep track of how many arguments we have bound let mut num_arguments = 0; @@ -113,7 +120,7 @@ impl<'c> Executor<'c> for &'c mut SqliteConnection { // is dropped. `StatementResetter` will reliably reset the // statement even if the stream returned from `fetch_many` // is dropped early. - let _resetter = StatementResetter::new(*stmt); + let resetter = StatementResetter::new(worker, stmt); // bind values to the statement num_arguments += bind(stmt, &arguments, num_arguments)?; @@ -125,7 +132,7 @@ impl<'c> Executor<'c> for &'c mut SqliteConnection { // invoke [sqlite3_step] on the dedicated worker thread // this will move us forward one row or finish the statement - let s = worker.step(*stmt).await?; + let s = resetter.worker.step(stmt).await?; match s { Either::Left(changes) => { @@ -145,7 +152,7 @@ impl<'c> Executor<'c> for &'c mut SqliteConnection { Either::Right(()) => { let (row, weak_values_ref) = SqliteRow::current( - *stmt, + &stmt, columns, column_names ); @@ -188,7 +195,7 @@ impl<'c> Executor<'c> for &'c mut SqliteConnection { } = self; // prepare statement object (or checkout from cache) - let virtual_stmt = prepare(statements, statement, sql, persistent)?; + let virtual_stmt = prepare(worker, statements, statement, sql, persistent).await?; // keep track of how many arguments we have bound let mut num_arguments = 0; @@ -205,18 +212,18 @@ impl<'c> Executor<'c> for &'c mut SqliteConnection { // invoke [sqlite3_step] on the dedicated worker thread // this will move us forward one row or finish the statement - match worker.step(*stmt).await? { + match worker.step(stmt).await? { Either::Left(_) => (), Either::Right(()) => { let (row, weak_values_ref) = - SqliteRow::current(*stmt, columns, column_names); + SqliteRow::current(stmt, columns, column_names); *last_row_values = Some(weak_values_ref); logger.increment_rows(); - virtual_stmt.reset(); + virtual_stmt.reset(worker).await?; return Ok(Some(row)); } } @@ -238,11 +245,12 @@ impl<'c> Executor<'c> for &'c mut SqliteConnection { handle: ref mut conn, ref mut statements, ref mut statement, + ref mut worker, .. } = self; // prepare statement object (or checkout from cache) - let statement = prepare(statements, statement, sql, true)?; + let statement = prepare(worker, statements, statement, sql, true).await?; let mut parameters = 0; let mut columns = None; diff --git a/sqlx-core/src/sqlite/connection/handle.rs b/sqlx-core/src/sqlite/connection/handle.rs index 6aa8f37667..bf59d41f5b 100644 --- a/sqlx-core/src/sqlite/connection/handle.rs +++ b/sqlx-core/src/sqlite/connection/handle.rs @@ -3,11 +3,23 @@ use std::ptr::NonNull; use libsqlite3_sys::{sqlite3, sqlite3_close, SQLITE_OK}; use crate::sqlite::SqliteError; +use std::sync::Arc; /// Managed handle to the raw SQLite3 database handle. -/// The database handle will be closed when this is dropped. +/// The database handle will be closed when this is dropped and no `ConnectionHandleRef`s exist. #[derive(Debug)] -pub(crate) struct ConnectionHandle(pub(super) NonNull); +pub(crate) struct ConnectionHandle(Arc); + +/// A wrapper around `ConnectionHandle` which only exists for a `StatementWorker` to own +/// which prevents the `sqlite3` handle from being finalized while it is running `sqlite3_step()` +/// or `sqlite3_reset()`. +/// +/// Note that this does *not* actually give access to the database handle! +pub(crate) struct ConnectionHandleRef(Arc); + +// Wrapper for `*mut sqlite3` which finalizes the handle on-drop. +#[derive(Debug)] +struct HandleInner(NonNull); // A SQLite3 handle is safe to send between threads, provided not more than // one is accessing it at the same time. This is upheld as long as [SQLITE_CONFIG_MULTITHREAD] is @@ -20,19 +32,32 @@ pub(crate) struct ConnectionHandle(pub(super) NonNull); unsafe impl Send for ConnectionHandle {} +// SAFETY: `Arc` normally only implements `Send` where `T: Sync` because it allows +// concurrent access. +// +// However, in this case we're only using `Arc` to prevent the database handle from being +// finalized while the worker still holds a statement handle; `ConnectionHandleRef` thus +// should *not* actually provide access to the database handle. +unsafe impl Send for ConnectionHandleRef {} + impl ConnectionHandle { #[inline] pub(super) unsafe fn new(ptr: *mut sqlite3) -> Self { - Self(NonNull::new_unchecked(ptr)) + Self(Arc::new(HandleInner(NonNull::new_unchecked(ptr)))) } #[inline] pub(crate) fn as_ptr(&self) -> *mut sqlite3 { - self.0.as_ptr() + self.0 .0.as_ptr() + } + + #[inline] + pub(crate) fn to_ref(&self) -> ConnectionHandleRef { + ConnectionHandleRef(Arc::clone(&self.0)) } } -impl Drop for ConnectionHandle { +impl Drop for HandleInner { fn drop(&mut self) { unsafe { // https://sqlite.org/c3ref/close.html diff --git a/sqlx-core/src/sqlite/connection/mod.rs b/sqlx-core/src/sqlite/connection/mod.rs index 92926beef4..c18add85b2 100644 --- a/sqlx-core/src/sqlite/connection/mod.rs +++ b/sqlx-core/src/sqlite/connection/mod.rs @@ -17,7 +17,7 @@ mod executor; mod explain; mod handle; -pub(crate) use handle::ConnectionHandle; +pub(crate) use handle::{ConnectionHandle, ConnectionHandleRef}; /// A connection to a [Sqlite] database. pub struct SqliteConnection { diff --git a/sqlx-core/src/sqlite/row.rs b/sqlx-core/src/sqlite/row.rs index 9f14ca58f0..f1e15f599d 100644 --- a/sqlx-core/src/sqlite/row.rs +++ b/sqlx-core/src/sqlite/row.rs @@ -23,7 +23,7 @@ pub struct SqliteRow { // IF the user drops the Row before iterating the stream (so // nearly all of our internal stream iterators), the executor moves on; otherwise, // it actually inflates this row with a list of owned sqlite3 values. - pub(crate) statement: StatementHandle, + pub(crate) statement: Arc, pub(crate) values: Arc>, pub(crate) num_values: usize, @@ -48,7 +48,7 @@ impl SqliteRow { // returns a weak reference to an atomic list where the executor should inflate if its going // to increment the statement with [step] pub(crate) fn current( - statement: StatementHandle, + statement: &Arc, columns: &Arc>, column_names: &Arc>, ) -> (Self, Weak>) { @@ -57,7 +57,7 @@ impl SqliteRow { let size = statement.column_count(); let row = Self { - statement, + statement: Arc::clone(statement), values, num_values: size, columns: Arc::clone(columns), diff --git a/sqlx-core/src/sqlite/statement/handle.rs b/sqlx-core/src/sqlite/statement/handle.rs index d1af117a7d..e796d48c5b 100644 --- a/sqlx-core/src/sqlite/statement/handle.rs +++ b/sqlx-core/src/sqlite/statement/handle.rs @@ -1,5 +1,6 @@ use std::ffi::c_void; use std::ffi::CStr; + use std::os::raw::{c_char, c_int}; use std::ptr; use std::ptr::NonNull; @@ -9,21 +10,22 @@ use std::str::{from_utf8, from_utf8_unchecked}; use libsqlite3_sys::{ sqlite3, sqlite3_bind_blob64, sqlite3_bind_double, sqlite3_bind_int, sqlite3_bind_int64, sqlite3_bind_null, sqlite3_bind_parameter_count, sqlite3_bind_parameter_name, - sqlite3_bind_text64, sqlite3_changes, sqlite3_column_blob, sqlite3_column_bytes, - sqlite3_column_count, sqlite3_column_database_name, sqlite3_column_decltype, - sqlite3_column_double, sqlite3_column_int, sqlite3_column_int64, sqlite3_column_name, - sqlite3_column_origin_name, sqlite3_column_table_name, sqlite3_column_type, - sqlite3_column_value, sqlite3_db_handle, sqlite3_reset, sqlite3_sql, sqlite3_stmt, - sqlite3_stmt_readonly, sqlite3_table_column_metadata, sqlite3_value, SQLITE_OK, - SQLITE_TRANSIENT, SQLITE_UTF8, + sqlite3_bind_text64, sqlite3_changes, sqlite3_clear_bindings, sqlite3_column_blob, + sqlite3_column_bytes, sqlite3_column_count, sqlite3_column_database_name, + sqlite3_column_decltype, sqlite3_column_double, sqlite3_column_int, sqlite3_column_int64, + sqlite3_column_name, sqlite3_column_origin_name, sqlite3_column_table_name, + sqlite3_column_type, sqlite3_column_value, sqlite3_db_handle, sqlite3_finalize, sqlite3_reset, + sqlite3_sql, sqlite3_step, sqlite3_stmt, sqlite3_stmt_readonly, sqlite3_table_column_metadata, + sqlite3_value, SQLITE_DONE, SQLITE_MISUSE, SQLITE_OK, SQLITE_ROW, SQLITE_TRANSIENT, + SQLITE_UTF8, }; use crate::error::{BoxDynError, Error}; use crate::sqlite::type_info::DataType; use crate::sqlite::{SqliteError, SqliteTypeInfo}; -#[derive(Debug, Copy, Clone)] -pub(crate) struct StatementHandle(pub(super) NonNull); +#[derive(Debug)] +pub(crate) struct StatementHandle(NonNull); // access to SQLite3 statement handles are safe to send and share between threads // as long as the `sqlite3_step` call is serialized. @@ -32,6 +34,14 @@ unsafe impl Send for StatementHandle {} unsafe impl Sync for StatementHandle {} impl StatementHandle { + pub(super) fn new(ptr: NonNull) -> Self { + Self(ptr) + } + + pub(crate) fn as_ptr(&self) -> *mut sqlite3_stmt { + self.0.as_ptr() + } + #[inline] pub(super) unsafe fn db_handle(&self) -> *mut sqlite3 { // O(c) access to the connection handle for this statement handle @@ -280,7 +290,25 @@ impl StatementHandle { Ok(from_utf8(self.column_blob(index))?) } - pub(crate) fn reset(&self) { - unsafe { sqlite3_reset(self.0.as_ptr()) }; + pub(crate) fn clear_bindings(&self) { + unsafe { sqlite3_clear_bindings(self.0.as_ptr()) }; + } +} +impl Drop for StatementHandle { + fn drop(&mut self) { + // SAFETY: we have exclusive access to the `StatementHandle` here + unsafe { + // https://sqlite.org/c3ref/finalize.html + let status = sqlite3_finalize(self.0.as_ptr()); + if status == SQLITE_MISUSE { + // Panic in case of detected misuse of SQLite API. + // + // sqlite3_finalize returns it at least in the + // case of detected double free, i.e. calling + // sqlite3_finalize on already finalized + // statement. + panic!("Detected sqlite3_finalize misuse."); + } + } } } diff --git a/sqlx-core/src/sqlite/statement/virtual.rs b/sqlx-core/src/sqlite/statement/virtual.rs index 0063e06508..3da6d33d64 100644 --- a/sqlx-core/src/sqlite/statement/virtual.rs +++ b/sqlx-core/src/sqlite/statement/virtual.rs @@ -3,13 +3,12 @@ use crate::error::Error; use crate::ext::ustr::UStr; use crate::sqlite::connection::ConnectionHandle; -use crate::sqlite::statement::StatementHandle; +use crate::sqlite::statement::{StatementHandle, StatementWorker}; use crate::sqlite::{SqliteColumn, SqliteError, SqliteRow, SqliteValue}; use crate::HashMap; use bytes::{Buf, Bytes}; use libsqlite3_sys::{ - sqlite3, sqlite3_clear_bindings, sqlite3_finalize, sqlite3_prepare_v3, sqlite3_reset, - sqlite3_stmt, SQLITE_MISUSE, SQLITE_OK, SQLITE_PREPARE_PERSISTENT, + sqlite3, sqlite3_prepare_v3, sqlite3_stmt, SQLITE_OK, SQLITE_PREPARE_PERSISTENT, }; use smallvec::SmallVec; use std::i32; @@ -31,7 +30,7 @@ pub(crate) struct VirtualStatement { // underlying sqlite handles for each inner statement // a SQL query string in SQLite is broken up into N statements // we use a [`SmallVec`] to optimize for the most likely case of a single statement - pub(crate) handles: SmallVec<[StatementHandle; 1]>, + pub(crate) handles: SmallVec<[Arc; 1]>, // each set of columns pub(crate) columns: SmallVec<[Arc>; 1]>, @@ -92,7 +91,7 @@ fn prepare( query.advance(n); if let Some(handle) = NonNull::new(statement_handle) { - return Ok(Some(StatementHandle(handle))); + return Ok(Some(StatementHandle::new(handle))); } } @@ -126,7 +125,7 @@ impl VirtualStatement { conn: &mut ConnectionHandle, ) -> Result< Option<( - &StatementHandle, + &Arc, &mut Arc>, &Arc>, &mut Option>>, @@ -159,7 +158,7 @@ impl VirtualStatement { column_names.insert(name, i); } - self.handles.push(statement); + self.handles.push(Arc::new(statement)); self.columns.push(Arc::new(columns)); self.column_names.push(Arc::new(column_names)); self.last_row_values.push(None); @@ -177,20 +176,20 @@ impl VirtualStatement { ))) } - pub(crate) fn reset(&mut self) { + pub(crate) async fn reset(&mut self, worker: &mut StatementWorker) -> Result<(), Error> { self.index = 0; for (i, handle) in self.handles.iter().enumerate() { SqliteRow::inflate_if_needed(&handle, &self.columns[i], self.last_row_values[i].take()); - unsafe { - // Reset A Prepared Statement Object - // https://www.sqlite.org/c3ref/reset.html - // https://www.sqlite.org/c3ref/clear_bindings.html - sqlite3_reset(handle.0.as_ptr()); - sqlite3_clear_bindings(handle.0.as_ptr()); - } + // Reset A Prepared Statement Object + // https://www.sqlite.org/c3ref/reset.html + // https://www.sqlite.org/c3ref/clear_bindings.html + worker.reset(handle).await?; + handle.clear_bindings(); } + + Ok(()) } } @@ -198,20 +197,6 @@ impl Drop for VirtualStatement { fn drop(&mut self) { for (i, handle) in self.handles.drain(..).enumerate() { SqliteRow::inflate_if_needed(&handle, &self.columns[i], self.last_row_values[i].take()); - - unsafe { - // https://sqlite.org/c3ref/finalize.html - let status = sqlite3_finalize(handle.0.as_ptr()); - if status == SQLITE_MISUSE { - // Panic in case of detected misuse of SQLite API. - // - // sqlite3_finalize returns it at least in the - // case of detected double free, i.e. calling - // sqlite3_finalize on already finalized - // statement. - panic!("Detected sqlite3_finalize misuse."); - } - } } } } diff --git a/sqlx-core/src/sqlite/statement/worker.rs b/sqlx-core/src/sqlite/statement/worker.rs index 8b1d229978..b5a7cf88ed 100644 --- a/sqlx-core/src/sqlite/statement/worker.rs +++ b/sqlx-core/src/sqlite/statement/worker.rs @@ -3,9 +3,14 @@ use crate::sqlite::statement::StatementHandle; use crossbeam_channel::{unbounded, Sender}; use either::Either; use futures_channel::oneshot; -use libsqlite3_sys::{sqlite3_step, SQLITE_DONE, SQLITE_ROW}; +use std::sync::{Arc, Weak}; use std::thread; +use crate::sqlite::connection::ConnectionHandleRef; + +use libsqlite3_sys::{sqlite3_reset, sqlite3_step, SQLITE_DONE, SQLITE_ROW}; +use std::future::Future; + // Each SQLite connection has a dedicated thread. // TODO: Tweak this so that we can use a thread pool per pool of SQLite3 connections to reduce @@ -18,31 +23,60 @@ pub(crate) struct StatementWorker { enum StatementWorkerCommand { Step { - statement: StatementHandle, + statement: Weak, tx: oneshot::Sender, Error>>, }, + Reset { + statement: Weak, + tx: oneshot::Sender<()>, + }, } impl StatementWorker { - pub(crate) fn new() -> Self { + pub(crate) fn new(conn: ConnectionHandleRef) -> Self { let (tx, rx) = unbounded(); thread::spawn(move || { for cmd in rx { match cmd { StatementWorkerCommand::Step { statement, tx } => { - let status = unsafe { sqlite3_step(statement.0.as_ptr()) }; + let statement = if let Some(statement) = statement.upgrade() { + statement + } else { + // statement is already finalized, the sender shouldn't be expecting a response + continue; + }; - let resp = match status { + // SAFETY: only the `StatementWorker` calls this function + let status = unsafe { sqlite3_step(statement.as_ptr()) }; + let result = match status { SQLITE_ROW => Ok(Either::Right(())), SQLITE_DONE => Ok(Either::Left(statement.changes())), _ => Err(statement.last_error().into()), }; - let _ = tx.send(resp); + let _ = tx.send(result); + } + StatementWorkerCommand::Reset { statement, tx } => { + if let Some(statement) = statement.upgrade() { + // SAFETY: this must be the only place we call `sqlite3_reset` + unsafe { sqlite3_reset(statement.as_ptr()) }; + + // `sqlite3_reset()` always returns either `SQLITE_OK` + // or the last error code for the statement, + // which should have already been handled; + // so it's assumed the return value is safe to ignore. + // + // https://www.sqlite.org/c3ref/reset.html + + let _ = tx.send(()); + } } } } + + // SAFETY: we need to make sure a strong ref to `conn` always outlives anything in `rx` + drop(conn); }); Self { tx } @@ -50,14 +84,47 @@ impl StatementWorker { pub(crate) async fn step( &mut self, - statement: StatementHandle, + statement: &Arc, ) -> Result, Error> { let (tx, rx) = oneshot::channel(); self.tx - .send(StatementWorkerCommand::Step { statement, tx }) + .send(StatementWorkerCommand::Step { + statement: Arc::downgrade(statement), + tx, + }) .map_err(|_| Error::WorkerCrashed)?; rx.await.map_err(|_| Error::WorkerCrashed)? } + + /// Send a command to the worker to execute `sqlite3_reset()` next. + /// + /// This method is written to execute the sending of the command eagerly so + /// you do not need to await the returned future unless you want to. + /// + /// The only error is `WorkerCrashed` as `sqlite3_reset()` returns the last error + /// in the statement execution which should have already been handled from `step()`. + pub(crate) fn reset( + &mut self, + statement: &Arc, + ) -> impl Future> { + // execute the sending eagerly so we don't need to spawn the future + let (tx, rx) = oneshot::channel(); + + let send_res = self + .tx + .send(StatementWorkerCommand::Reset { + statement: Arc::downgrade(statement), + tx, + }) + .map_err(|_| Error::WorkerCrashed); + + async move { + send_res?; + + // wait for the response + rx.await.map_err(|_| Error::WorkerCrashed) + } + } } diff --git a/tests/sqlite/sqlite.db b/tests/sqlite/sqlite.db index d693511079064edcd5e0a6f55efedafe17ab855b..a3d8d5cc2964ec37bc568d4e00340395edd8a5d2 100644 GIT binary patch delta 76 zcmZozz|^pSX# anyhow::Result<()> { Ok(()) } + +// https://github.com/launchbadge/sqlx/issues/1300 +#[sqlx_macros::test] +async fn concurrent_resets_dont_segfault() { + use sqlx::{sqlite::SqliteConnectOptions, ConnectOptions}; + use std::{str::FromStr, time::Duration}; + + let mut conn = SqliteConnectOptions::from_str(":memory:") + .unwrap() + .connect() + .await + .unwrap(); + + sqlx::query("CREATE TABLE stuff (name INTEGER, value INTEGER)") + .execute(&mut conn) + .await + .unwrap(); + + sqlx_rt::spawn(async move { + for i in 0..1000 { + sqlx::query("INSERT INTO stuff (name, value) VALUES (?, ?)") + .bind(i) + .bind(0) + .execute(&mut conn) + .await + .unwrap(); + } + }); + + sqlx_rt::sleep(Duration::from_millis(1)).await; +} From e77219f7c70c520f452de217d12bda6dbb35f5fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20Eppl=C3=A9e?= Date: Tue, 17 Aug 2021 01:33:42 +0200 Subject: [PATCH 24/28] Add docs for fetch_all, example for postgres transactions (#1255) * reference fetch_all() in query macros * add example for using transactions in postgres --- Cargo.lock | 9 +++++ Cargo.toml | 1 + examples/postgres/transaction/Cargo.toml | 10 +++++ examples/postgres/transaction/README.md | 18 +++++++++ .../migrations/20200718111257_todos.sql | 6 +++ examples/postgres/transaction/src/main.rs | 37 +++++++++++++++++++ src/macros.rs | 2 + 7 files changed, 83 insertions(+) create mode 100644 examples/postgres/transaction/Cargo.toml create mode 100644 examples/postgres/transaction/README.md create mode 100644 examples/postgres/transaction/migrations/20200718111257_todos.sql create mode 100644 examples/postgres/transaction/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index fc90d79c79..03c97fa1e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2458,6 +2458,15 @@ dependencies = [ "structopt", ] +[[package]] +name = "sqlx-example-postgres-transaction" +version = "0.1.0" +dependencies = [ + "async-std", + "futures", + "sqlx", +] + [[package]] name = "sqlx-example-sqlite-todos" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 8db3a7c021..f5928be8b4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,7 @@ members = [ "examples/postgres/listen", "examples/postgres/todos", "examples/postgres/mockable-todos", + "examples/postgres/transaction", "examples/sqlite/todos", ] diff --git a/examples/postgres/transaction/Cargo.toml b/examples/postgres/transaction/Cargo.toml new file mode 100644 index 0000000000..a51d4d37f8 --- /dev/null +++ b/examples/postgres/transaction/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "sqlx-example-postgres-transaction" +version = "0.1.0" +edition = "2018" +workspace = "../../../" + +[dependencies] +async-std = { version = "1.8.0", features = [ "attributes", "unstable" ] } +sqlx = { path = "../../../", features = [ "postgres", "tls", "runtime-async-std-native-tls" ] } +futures = "0.3.1" diff --git a/examples/postgres/transaction/README.md b/examples/postgres/transaction/README.md new file mode 100644 index 0000000000..2cfc1907c3 --- /dev/null +++ b/examples/postgres/transaction/README.md @@ -0,0 +1,18 @@ +# Postgres Transaction Example + +A simple example demonstrating how to obtain and roll back a transaction with postgres. + +## Usage + +Declare the database URL. This example does not include any reading or writing of data. + +``` +export DATABASE_URL="postgres://postgres@localhost/postgres" +``` + +Run. + +``` +cargo run +``` + diff --git a/examples/postgres/transaction/migrations/20200718111257_todos.sql b/examples/postgres/transaction/migrations/20200718111257_todos.sql new file mode 100644 index 0000000000..6599f8c10a --- /dev/null +++ b/examples/postgres/transaction/migrations/20200718111257_todos.sql @@ -0,0 +1,6 @@ +CREATE TABLE IF NOT EXISTS todos +( + id BIGSERIAL PRIMARY KEY, + description TEXT NOT NULL, + done BOOLEAN NOT NULL DEFAULT FALSE +); diff --git a/examples/postgres/transaction/src/main.rs b/examples/postgres/transaction/src/main.rs new file mode 100644 index 0000000000..50539fe2ac --- /dev/null +++ b/examples/postgres/transaction/src/main.rs @@ -0,0 +1,37 @@ +use sqlx::query; + +#[async_std::main] +async fn main() -> Result<(), Box> { + let conn_str = + std::env::var("DATABASE_URL").expect("Env var DATABASE_URL is required for this example."); + let pool = sqlx::PgPool::connect(&conn_str).await?; + + let mut transaction = pool.begin().await?; + + let test_id = 1; + query!( + r#"INSERT INTO todos (id, description) + VALUES ( $1, $2 ) + "#, + test_id, + "test todo" + ) + .execute(&mut transaction) + .await?; + + // check that inserted todo can be fetched + let _ = query!(r#"SELECT FROM todos WHERE id = $1"#, test_id) + .fetch_one(&mut transaction) + .await?; + + transaction.rollback(); + + // check that inserted todo is now gone + let inserted_todo = query!(r#"SELECT FROM todos WHERE id = $1"#, test_id) + .fetch_one(&pool) + .await; + + assert!(inserted_todo.is_err()); + + Ok(()) +} diff --git a/src/macros.rs b/src/macros.rs index 78264f3573..c1dceaba1b 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -36,6 +36,7 @@ /// | Zero or One | `.fetch_optional(...).await`| `sqlx::Result>` | Extra rows are ignored. | /// | Exactly One | `.fetch_one(...).await` | `sqlx::Result<{adhoc struct}>` | Errors if no rows were returned. Extra rows are ignored. Aggregate queries, use this. | /// | At Least One | `.fetch(...)` | `impl Stream>` | Call `.try_next().await` to get each row result. | +/// | Multiple | `.fetch_all(...)` | `sqlx::Result>` | | /// /// \* All methods accept one of `&mut {connection type}`, `&mut Transaction` or `&Pool`. /// † Only callable if the query returns no columns; otherwise it's assumed the query *may* return at least one row. @@ -459,6 +460,7 @@ macro_rules! query_file_unchecked ( /// | Zero or One | `.fetch_optional(...).await`| `sqlx::Result>` | Extra rows are ignored. | /// | Exactly One | `.fetch_one(...).await` | `sqlx::Result` | Errors if no rows were returned. Extra rows are ignored. Aggregate queries, use this. | /// | At Least One | `.fetch(...)` | `impl Stream>` | Call `.try_next().await` to get each row result. | +/// | Multiple | `.fetch_all(...)` | `sqlx::Result>` | | /// /// \* All methods accept one of `&mut {connection type}`, `&mut Transaction` or `&Pool`. /// (`.execute()` is omitted as this macro requires at least one column to be returned.) From e7c3610e85c3574b1a58e4ca753cdd28e73c5948 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fredrik=20=C3=98strem?= <1686349+frxstrem@users.noreply.github.com> Date: Tue, 17 Aug 2021 01:36:18 +0200 Subject: [PATCH 25/28] fix: Ignore __CARGO_FIX_PLZ when running "cargo metadata" in macro (#1352) --- sqlx-macros/src/query/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/sqlx-macros/src/query/mod.rs b/sqlx-macros/src/query/mod.rs index a2f17874b8..4e2ba900bb 100644 --- a/sqlx-macros/src/query/mod.rs +++ b/sqlx-macros/src/query/mod.rs @@ -81,6 +81,7 @@ static METADATA: Lazy = Lazy::new(|| { let output = Command::new(&cargo) .args(&["metadata", "--format-version=1"]) .current_dir(&manifest_dir) + .env_remove("__CARGO_FIX_PLZ") .output() .expect("Could not fetch metadata"); From dd27aa05870e71c642116809d14773a7b06c354c Mon Sep 17 00:00:00 2001 From: Atkins Date: Tue, 17 Aug 2021 07:50:11 +0800 Subject: [PATCH 26/28] Fix bug for PostgreSQL if the statement has type holes (#1363) * fix: wait until ready after executing other helper queries while pg quering is executing Signed-off-by: Atkins Chang * fix: use tls parameter for testing target Signed-off-by: Atkins Chang --- sqlx-core/src/postgres/connection/executor.rs | 4 ++ tests/docker-compose.yml | 2 + tests/postgres/postgres.rs | 37 +++++++++++++++++++ tests/x.py | 12 +++--- 4 files changed, 49 insertions(+), 6 deletions(-) diff --git a/sqlx-core/src/postgres/connection/executor.rs b/sqlx-core/src/postgres/connection/executor.rs index ee2ab76802..a1e27a6a8b 100644 --- a/sqlx-core/src/postgres/connection/executor.rs +++ b/sqlx-core/src/postgres/connection/executor.rs @@ -218,6 +218,10 @@ impl PgConnection { // patch holes created during encoding arguments.apply_patches(self, &metadata.parameters).await?; + // apply patches use fetch_optional thaht may produce `PortalSuspended` message, + // consume messages til `ReadyForQuery` before bind and execute + self.wait_until_ready().await?; + // bind to attach the arguments to the statement and create a portal self.stream.write(Bind { portal: None, diff --git a/tests/docker-compose.yml b/tests/docker-compose.yml index 30203b17e7..6aafbffcc8 100644 --- a/tests/docker-compose.yml +++ b/tests/docker-compose.yml @@ -235,6 +235,8 @@ services: dockerfile: mssql/mssql-2017.dockerfile args: VERSION: 2017-latest + ports: + - 1433 environment: ACCEPT_EULA: "Y" SA_PASSWORD: Password123! diff --git a/tests/postgres/postgres.rs b/tests/postgres/postgres.rs index 4f71cde7c2..a88c0085e7 100644 --- a/tests/postgres/postgres.rs +++ b/tests/postgres/postgres.rs @@ -1104,3 +1104,40 @@ async fn test_pg_server_num() -> anyhow::Result<()> { Ok(()) } + +#[sqlx_macros::test] + +async fn test_issue_1254() -> anyhow::Result<()> { + #[derive(sqlx::Type)] + #[sqlx(type_name = "pair")] + struct Pair { + one: i32, + two: i32, + } + + // array for custom type is not supported, use wrapper + #[derive(sqlx::Type)] + #[sqlx(type_name = "_pair")] + struct Pairs(Vec); + + let mut conn = new::().await?; + conn.execute( + " +DROP TABLE IF EXISTS issue_1254; +DROP TYPE IF EXISTS pair; + +CREATE TYPE pair AS (one INT4, two INT4); +CREATE TABLE issue_1254 (id INT4 PRIMARY KEY, pairs PAIR[]); +", + ) + .await?; + + let result = sqlx::query("INSERT INTO issue_1254 VALUES($1, $2)") + .bind(0) + .bind(Pairs(vec![Pair { one: 94, two: 87 }])) + .execute(&mut conn) + .await?; + assert_eq!(result.rows_affected(), 1); + + Ok(()) +} diff --git a/tests/x.py b/tests/x.py index ed1c7f512a..3fd77e8892 100755 --- a/tests/x.py +++ b/tests/x.py @@ -120,7 +120,7 @@ def run(command, comment=None, env=None, service=None, tag=None, args=None, data # run( - f"cargo test --no-default-features --features macros,offline,any,all-types,sqlite,runtime-{runtime}-native-tls", + f"cargo test --no-default-features --features macros,offline,any,all-types,sqlite,runtime-{runtime}-{tls}", comment=f"test sqlite", service="sqlite", tag=f"sqlite" if runtime == "async-std" else f"sqlite_{runtime}", @@ -132,7 +132,7 @@ def run(command, comment=None, env=None, service=None, tag=None, args=None, data for version in ["13", "12", "11", "10", "9_6"]: run( - f"cargo test --no-default-features --features macros,offline,any,all-types,postgres,runtime-{runtime}-native-tls", + f"cargo test --no-default-features --features macros,offline,any,all-types,postgres,runtime-{runtime}-{tls}", comment=f"test postgres {version}", service=f"postgres_{version}", tag=f"postgres_{version}" if runtime == "async-std" else f"postgres_{version}_{runtime}", @@ -141,7 +141,7 @@ def run(command, comment=None, env=None, service=None, tag=None, args=None, data ## +ssl for version in ["13", "12", "11", "10", "9_6"]: run( - f"cargo test --no-default-features --features macros,offline,any,all-types,postgres,runtime-{runtime}-native-tls", + f"cargo test --no-default-features --features macros,offline,any,all-types,postgres,runtime-{runtime}-{tls}", comment=f"test postgres {version} ssl", database_url_args="sslmode=verify-ca&sslrootcert=.%2Ftests%2Fcerts%2Fca.crt", service=f"postgres_{version}", @@ -154,7 +154,7 @@ def run(command, comment=None, env=None, service=None, tag=None, args=None, data for version in ["8", "5_7", "5_6"]: run( - f"cargo test --no-default-features --features macros,offline,any,all-types,mysql,runtime-{runtime}-native-tls", + f"cargo test --no-default-features --features macros,offline,any,all-types,mysql,runtime-{runtime}-{tls}", comment=f"test mysql {version}", service=f"mysql_{version}", tag=f"mysql_{version}" if runtime == "async-std" else f"mysql_{version}_{runtime}", @@ -166,7 +166,7 @@ def run(command, comment=None, env=None, service=None, tag=None, args=None, data for version in ["10_6", "10_5", "10_4", "10_3", "10_2"]: run( - f"cargo test --no-default-features --features macros,offline,any,all-types,mysql,runtime-{runtime}-native-tls", + f"cargo test --no-default-features --features macros,offline,any,all-types,mysql,runtime-{runtime}-{tls}", comment=f"test mariadb {version}", service=f"mariadb_{version}", tag=f"mariadb_{version}" if runtime == "async-std" else f"mariadb_{version}_{runtime}", @@ -178,7 +178,7 @@ def run(command, comment=None, env=None, service=None, tag=None, args=None, data for version in ["2019", "2017"]: run( - f"cargo test --no-default-features --features macros,offline,any,all-types,mssql,runtime-{runtime}-native-tls", + f"cargo test --no-default-features --features macros,offline,any,all-types,mssql,runtime-{runtime}-{tls}", comment=f"test mssql {version}", service=f"mssql_{version}", tag=f"mssql_{version}" if runtime == "async-std" else f"mssql_{version}_{runtime}", From 38435ca64766ea43a1d5badf72bc057b841d2c0a Mon Sep 17 00:00:00 2001 From: Austin Bonander Date: Mon, 16 Aug 2021 16:51:31 -0700 Subject: [PATCH 27/28] fix(cli): pin `clap_derive` version (#1381) When `clap_derive 3.0.0-beta.4` released, new invocations of `cargo install sqlx-cli` would try to compile that against `clap 3.0.0-beta.2` which caused some breakages. Until `clap 3.0.0` proper is released, we need to pin both versions to insure against potential breakages from automatic upgrades. closes #1378 --- Cargo.lock | 9 +++++---- sqlx-cli/Cargo.toml | 4 ++++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 03c97fa1e7..d7426ac7f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1808,9 +1808,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" +checksum = "5c7ed8b8c7b886ea3ed7dde405212185f423ab44682667c8c6dd14aa1d9f6612" dependencies = [ "unicode-xid", ] @@ -2329,6 +2329,7 @@ dependencies = [ "async-trait", "chrono", "clap 3.0.0-beta.2", + "clap_derive", "console", "dialoguer", "dotenv", @@ -2646,9 +2647,9 @@ checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" [[package]] name = "syn" -version = "1.0.72" +version = "1.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82" +checksum = "1873d832550d4588c3dbc20f01361ab00bfe741048f71e3fecf145a7cc18b29c" dependencies = [ "proc-macro2", "quote", diff --git a/sqlx-cli/Cargo.toml b/sqlx-cli/Cargo.toml index 0c0d92a0bb..82233be9e1 100644 --- a/sqlx-cli/Cargo.toml +++ b/sqlx-cli/Cargo.toml @@ -34,7 +34,11 @@ sqlx = { version = "0.5.5", path = "..", default-features = false, features = [ "offline", ] } futures = "0.3" +# FIXME: we need to fix both of these versions until Clap 3.0 proper is released, then we can drop `clap_derive` +# https://github.com/launchbadge/sqlx/issues/1378 +# https://github.com/clap-rs/clap/issues/2705 clap = "=3.0.0-beta.2" +clap_derive = "=3.0.0-beta.2" chrono = "0.4" anyhow = "1.0" url = { version = "2.1.1", default-features = false } From b9af2c00c32879058a1ffa603670c3a6a3538da2 Mon Sep 17 00:00:00 2001 From: altanozlu Date: Tue, 25 May 2021 17:50:42 +0300 Subject: [PATCH 28/28] parse parameter status and fix crdb error in query! --- sqlx-core/src/postgres/connection/describe.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/sqlx-core/src/postgres/connection/describe.rs b/sqlx-core/src/postgres/connection/describe.rs index 097058cef2..6d7a3f7dc0 100644 --- a/sqlx-core/src/postgres/connection/describe.rs +++ b/sqlx-core/src/postgres/connection/describe.rs @@ -399,13 +399,16 @@ SELECT oid FROM pg_catalog.pg_type WHERE typname ILIKE $1 .fetch_all(&mut *self) .await?; - // patch up our null inference with data from EXPLAIN - let nullable_patch = self - .nullables_from_explain(stmt_id, meta.parameters.len()) - .await?; + // if it's cockroachdb skip this step #1248 + if !self.stream.parameter_statuses.contains_key("crdb_version") { + // patch up our null inference with data from EXPLAIN + let nullable_patch = self + .nullables_from_explain(stmt_id, meta.parameters.len()) + .await?; - for (nullable, patch) in nullables.iter_mut().zip(nullable_patch) { - *nullable = patch.or(*nullable); + for (nullable, patch) in nullables.iter_mut().zip(nullable_patch) { + *nullable = patch.or(*nullable); + } } Ok(nullables)