From 5f3ceee5293a0eda0282b6057bc1c4fa7b4dbd56 Mon Sep 17 00:00:00 2001 From: Aaron Kelbsch Date: Fri, 2 Aug 2024 10:42:49 +0200 Subject: [PATCH 1/3] feat: allow custom db_url_env to be passed through the expand_query macro --- sqlx-macros-core/src/query/input.rs | 7 +++ sqlx-macros-core/src/query/mod.rs | 93 ++++++++++++++++++----------- 2 files changed, 65 insertions(+), 35 deletions(-) diff --git a/sqlx-macros-core/src/query/input.rs b/sqlx-macros-core/src/query/input.rs index 63e35ec77d..4fc8b944b7 100644 --- a/sqlx-macros-core/src/query/input.rs +++ b/sqlx-macros-core/src/query/input.rs @@ -19,6 +19,8 @@ pub struct QueryMacroInput { pub(super) checked: bool, pub(super) file_path: Option, + + pub(super) db_url_env: Option } enum QuerySrc { @@ -38,6 +40,7 @@ impl Parse for QueryMacroInput { let mut args: Option> = None; let mut record_type = RecordType::Generated; let mut checked = true; + let mut db_url_env = None; let mut expect_comma = false; @@ -82,6 +85,9 @@ impl Parse for QueryMacroInput { } else if key == "checked" { let lit_bool = input.parse::()?; checked = lit_bool.value; + } else if key == "db_url_env" { + let lit_str = input.parse::()?; + db_url_env = Some(lit_str.value()); } else { let message = format!("unexpected input key: {key}"); return Err(syn::Error::new_spanned(key, message)); @@ -104,6 +110,7 @@ impl Parse for QueryMacroInput { arg_exprs, checked, file_path, + db_url_env }) } } diff --git a/sqlx-macros-core/src/query/mod.rs b/sqlx-macros-core/src/query/mod.rs index 1536eebaa1..ce72a0c1bc 100644 --- a/sqlx-macros-core/src/query/mod.rs +++ b/sqlx-macros-core/src/query/mod.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; use std::path::PathBuf; use std::sync::{Arc, Mutex}; use std::{fs, io}; @@ -72,8 +73,9 @@ struct Metadata { #[allow(unused)] manifest_dir: PathBuf, offline: bool, - database_url: Option, + default_database_url: Option, workspace_root: Arc>>, + env_cache: HashMap } impl Metadata { @@ -139,13 +141,16 @@ static METADATA: Lazy = Lazy::new(|| { .map(|s| s.eq_ignore_ascii_case("true") || s == "1") .unwrap_or(false); - let database_url = env("DATABASE_URL").ok(); + let env_cache = HashMap::from_iter(dotenvy::vars()); + + let default_database_url = env("DATABASE_URL").ok(); Metadata { manifest_dir, offline, - database_url, + default_database_url, workspace_root: Arc::new(Mutex::new(None)), + env_cache } }); @@ -153,40 +158,58 @@ pub fn expand_input<'a>( input: QueryMacroInput, drivers: impl IntoIterator, ) -> crate::Result { - let data_source = match &*METADATA { - Metadata { - offline: false, - database_url: Some(db_url), - .. - } => QueryDataSource::live(db_url)?, - - Metadata { offline, .. } => { - // Try load the cached query metadata file. - let filename = format!("query-{}.json", hash_string(&input.sql)); - - // Check SQLX_OFFLINE_DIR, then local .sqlx, then workspace .sqlx. - let dirs = [ - || env("SQLX_OFFLINE_DIR").ok().map(PathBuf::from), - || Some(METADATA.manifest_dir.join(".sqlx")), - || Some(METADATA.workspace_root().join(".sqlx")), - ]; - let Some(data_file_path) = dirs - .iter() - .filter_map(|path| path()) - .map(|path| path.join(&filename)) - .find(|path| path.exists()) - else { - return Err( - if *offline { - "`SQLX_OFFLINE=true` but there is no cached data for this query, run `cargo sqlx prepare` to update the query cache or unset `SQLX_OFFLINE`" - } else { - "set `DATABASE_URL` to use query macros online, or run `cargo sqlx prepare` to update the query cache" - }.into() - ); - }; - QueryDataSource::Cached(DynQueryData::from_data_file(&data_file_path, &input.sql)?) + + // If we don't require the query to be offline, check if we have a valid online datasource url + let online_data_source: Option = if METADATA.offline == false { + if let Some(ref custom_env) = input.db_url_env { + // Get the custom db url environment + METADATA.env_cache.get(custom_env) + .map(|custom_db_url| QueryDataSource::live(custom_db_url)) + .transpose()? + } else if let Some(default_database_url) = &METADATA.default_database_url { + // Get the default db url env + Some(QueryDataSource::live(default_database_url)?) + } else { + None } + } else { + None + }; + + + let data_source = if let Some(data_source) = online_data_source { + data_source + } else { + // If we don't have a live source, try load the cached query metadata file. + let filename = format!("query-{}.json", hash_string(&input.sql)); + + // Check SQLX_OFFLINE_DIR, then local .sqlx, then workspace .sqlx. + let dirs = [ + || env("SQLX_OFFLINE_DIR").ok().map(PathBuf::from), + || Some(METADATA.manifest_dir.join(".sqlx")), + || Some(METADATA.workspace_root().join(".sqlx")), + ]; + let Some(data_file_path) = dirs + .iter() + .filter_map(|path| path()) + .map(|path| path.join(&filename)) + .find(|path| path.exists()) + else { + return Err( + if METADATA.offline { + "`SQLX_OFFLINE=true` but there is no cached data for this query, run `cargo sqlx prepare` to update the query cache or unset `SQLX_OFFLINE`".to_string() + } else { + if let Some(custom_env) = input.db_url_env { + format!("set custom env `{:?}` to use query macros online, or run `cargo sqlx prepare` to update the query cache", custom_env) + } else { + "set `DATABASE_URL` to use query macros online, or run `cargo sqlx prepare` to update the query cache".to_string() + } + }.into() + ); + }; + + QueryDataSource::Cached(DynQueryData::from_data_file(&data_file_path, &input.sql)?) }; for driver in drivers { From f42774b112d84c4b58a84f0f2762d37c4461820c Mon Sep 17 00:00:00 2001 From: Aaron Kelbsch Date: Fri, 2 Aug 2024 13:34:59 +0200 Subject: [PATCH 2/3] feat: made sqlx prepare use custom env as subfolder for cache --- sqlx-macros-core/src/query/mod.rs | 45 ++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/sqlx-macros-core/src/query/mod.rs b/sqlx-macros-core/src/query/mod.rs index ce72a0c1bc..0325aa2b77 100644 --- a/sqlx-macros-core/src/query/mod.rs +++ b/sqlx-macros-core/src/query/mod.rs @@ -75,7 +75,7 @@ struct Metadata { offline: bool, default_database_url: Option, workspace_root: Arc>>, - env_cache: HashMap + env_cache: HashMap, } impl Metadata { @@ -150,7 +150,7 @@ static METADATA: Lazy = Lazy::new(|| { offline, default_database_url, workspace_root: Arc::new(Mutex::new(None)), - env_cache + env_cache, } }); @@ -158,13 +158,13 @@ pub fn expand_input<'a>( input: QueryMacroInput, drivers: impl IntoIterator, ) -> crate::Result { - - // If we don't require the query to be offline, check if we have a valid online datasource url let online_data_source: Option = if METADATA.offline == false { if let Some(ref custom_env) = input.db_url_env { // Get the custom db url environment - METADATA.env_cache.get(custom_env) + METADATA + .env_cache + .get(custom_env) .map(|custom_db_url| QueryDataSource::live(custom_db_url)) .transpose()? } else if let Some(default_database_url) = &METADATA.default_database_url { @@ -176,8 +176,7 @@ pub fn expand_input<'a>( } else { None }; - - + let data_source = if let Some(data_source) = online_data_source { data_source } else { @@ -193,7 +192,13 @@ pub fn expand_input<'a>( let Some(data_file_path) = dirs .iter() .filter_map(|path| path()) - .map(|path| path.join(&filename)) + .map(|path| { + if let Some(ref custom_env) = input.db_url_env { + path.join(custom_env).join(&filename) + } else { + path.join(&filename) + } + }) .find(|path| path.exists()) else { return Err( @@ -387,8 +392,28 @@ where .into()); } - // .sqlx exists and is a directory, store data. - data.save_in(path)?; + if let Some(custom_db_env) = input.db_url_env { + let full_path: PathBuf = path.join(custom_db_env); + + match fs::create_dir(&full_path) { + Ok(_) => {} + Err(err) => { + match err.kind() { + std::io::ErrorKind::AlreadyExists => {} + _ => return Err(format!( + "Failed to create offline cache path {full_path:?}: {err}" + ) + .into()), + } + } + }; + + // created subfolder if not exists, store data. + data.save_in(full_path)?; + } else { + // .sqlx exists and is a directory, store data. + data.save_in(path)?; + } } } } From b86c67968b3a87749620dfad92017ce910ccadae Mon Sep 17 00:00:00 2001 From: Aaron Kelbsch Date: Fri, 2 Aug 2024 16:11:44 +0200 Subject: [PATCH 3/3] feat: added procmacro to generate query macros with custom env --- sqlx-macros-core/src/database_macro/expand.rs | 752 ++++++++++++++++++ sqlx-macros-core/src/database_macro/input.rs | 39 + sqlx-macros-core/src/database_macro/mod.rs | 5 + sqlx-macros-core/src/lib.rs | 2 + sqlx-macros/src/lib.rs | 7 + 5 files changed, 805 insertions(+) create mode 100644 sqlx-macros-core/src/database_macro/expand.rs create mode 100644 sqlx-macros-core/src/database_macro/input.rs create mode 100644 sqlx-macros-core/src/database_macro/mod.rs diff --git a/sqlx-macros-core/src/database_macro/expand.rs b/sqlx-macros-core/src/database_macro/expand.rs new file mode 100644 index 0000000000..0bcb37e7f1 --- /dev/null +++ b/sqlx-macros-core/src/database_macro/expand.rs @@ -0,0 +1,752 @@ +use proc_macro2::TokenStream; +use quote::quote; + +use super::DatabaseMacroInput; + +pub fn expand_database_macros(input: DatabaseMacroInput) -> TokenStream { + let env = input.env; + + let expanded = quote! { + /// Statically checked SQL query with `println!()` style syntax. + /// + /// This expands to an instance of [`query::Map`][crate::query::Map] that outputs an ad-hoc anonymous + /// struct type, if the query has at least one output column that is not `Void`, or `()` (unit) otherwise: + /// + /// ```rust,ignore + /// # use sqlx::Connect; + /// # #[cfg(all(feature = "mysql", feature = "_rt-async-std"))] + /// # #[async_std::main] + /// # async fn main() -> sqlx::Result<()>{ + /// # let db_url = dotenvy::var("DATABASE_URL").expect("DATABASE_URL must be set"); + /// # + /// # if !(db_url.starts_with("mysql") || db_url.starts_with("mariadb")) { return Ok(()) } + /// # let mut conn = sqlx::MySqlConnection::connect(db_url).await?; + /// // let mut conn = ; + /// let account = sqlx::query!("select (1) as id, 'Herp Derpinson' as name") + /// .fetch_one(&mut conn) + /// .await?; + /// + /// // anonymous struct has `#[derive(Debug)]` for convenience + /// println!("{account:?}"); + /// println!("{}: {}", account.id, account.name); + /// + /// # Ok(()) + /// # } + /// # + /// # #[cfg(any(not(feature = "mysql"), not(feature = "_rt-async-std")))] + /// # fn main() {} + /// ``` + /// + /// The output columns will be mapped to their corresponding Rust types. + /// See the documentation for your database for details: + /// + /// * Postgres: [crate::postgres::types] + /// * MySQL: [crate::mysql::types] + /// * Note: due to wire protocol limitations, the query macros do not know when + /// a column should be decoded as `bool`. It will be inferred to be `i8` instead. + /// See the link above for details. + /// * SQLite: [crate::sqlite::types] + /// + /// **The method you want to call on the result depends on how many rows you're expecting.** + /// + /// | Number of Rows | Method to Call* | Returns | Notes | + /// |----------------| ----------------------------|-----------------------------------------------------|-------| + /// | None† | `.execute(...).await` | `sqlx::Result` | For `INSERT`/`UPDATE`/`DELETE` without `RETURNING`. | + /// | Zero or One | `.fetch_optional(...).await`| `sqlx::Result>` | Extra rows are ignored. | + /// | Exactly One | `.fetch_one(...).await` | `sqlx::Result<{adhoc struct}>` | Errors if no rows were returned. Extra rows are ignored. Aggregate queries, use this. | + /// | At Least One | `.fetch(...)` | `impl Stream>` | Call `.try_next().await` to get each row result. | + /// | Multiple | `.fetch_all(...)` | `sqlx::Result>` | | + /// + /// \* All methods accept one of `&mut {connection type}`, `&mut Transaction` or `&Pool`. + /// † Only callable if the query returns no columns; otherwise it's assumed the query *may* return at least one row. + /// ## Requirements + /// * The `DATABASE_URL` environment variable must be set at build-time to point to a database + /// server with the schema that the query string will be checked against. All variants of `query!()` + /// use [dotenv]1 so this can be in a `.env` file instead. + /// + /// * Or, `.sqlx` must exist at the workspace root. See [Offline Mode](#offline-mode-requires-the-offline-feature) + /// below. + /// + /// * The query must be a string literal, or concatenation of string literals using `+` (useful + /// for queries generated by macro), or else it cannot be introspected (and thus cannot be dynamic + /// or the result of another macro). + /// + /// * The `QueryAs` instance will be bound to the same database type as `query!()` was compiled + /// against (e.g. you cannot build against a Postgres database and then run the query against + /// a MySQL database). + /// + /// * The schema of the database URL (e.g. `postgres://` or `mysql://`) will be used to + /// determine the database type. + /// + /// 1 The `dotenv` crate itself appears abandoned as of [December 2021](https://github.com/dotenv-rs/dotenv/issues/74) + /// so we now use the [dotenvy] crate instead. The file format is the same. + /// + /// [dotenv]: https://crates.io/crates/dotenv + /// [dotenvy]: https://crates.io/crates/dotenvy + /// ## Query Arguments + /// Like `println!()` and the other formatting macros, you can add bind parameters to your SQL + /// and this macro will typecheck passed arguments and error on missing ones: + /// + /// ```rust,ignore + /// # use sqlx::Connect; + /// # #[cfg(all(feature = "mysql", feature = "_rt-async-std"))] + /// # #[async_std::main] + /// # async fn main() -> sqlx::Result<()>{ + /// # let db_url = dotenvy::var("DATABASE_URL").expect("DATABASE_URL must be set"); + /// # + /// # if !(db_url.starts_with("mysql") || db_url.starts_with("mariadb")) { return Ok(()) } + /// # let mut conn = sqlx::mysql::MySqlConnection::connect(db_url).await?; + /// // let mut conn = ; + /// let account = sqlx::query!( + /// // just pretend "accounts" is a real table + /// "select * from (select (1) as id, 'Herp Derpinson' as name) accounts where id = ?", + /// 1i32 + /// ) + /// .fetch_one(&mut conn) + /// .await?; + /// + /// println!("{account:?}"); + /// println!("{}: {}", account.id, account.name); + /// # Ok(()) + /// # } + /// # + /// # #[cfg(any(not(feature = "mysql"), not(feature = "_rt-async-std")))] + /// # fn main() {} + /// ``` + /// + /// Bind parameters in the SQL string are specific to the database backend: + /// + /// * Postgres: `$N` where `N` is the 1-based positional argument index + /// * MySQL/SQLite: `?` which matches arguments in order that it appears in the query + /// + /// ## Nullability: Bind Parameters + /// For a given expected type `T`, both `T` and `Option` are allowed (as well as either + /// behind references). `Option::None` will be bound as `NULL`, so if binding a type behind `Option` + /// be sure your query can support it. + /// + /// Note, however, if binding in a `where` clause, that equality comparisons with `NULL` may not + /// work as expected; instead you must use `IS NOT NULL` or `IS NULL` to check if a column is not + /// null or is null, respectively. + /// + /// In Postgres and MySQL you may also use `IS [NOT] DISTINCT FROM` to compare with a possibly + /// `NULL` value. In MySQL `IS NOT DISTINCT FROM` can be shortened to `<=>`. + /// In SQLite you can use `IS` or `IS NOT`. Note that operator precedence may be different. + /// + /// ## Nullability: Output Columns + /// In most cases, the database engine can tell us whether or not a column may be `NULL`, and + /// the `query!()` macro adjusts the field types of the returned struct accordingly. + /// + /// For Postgres, this only works for columns which come directly from actual tables, + /// as the implementation will need to query the table metadata to find if a given column + /// has a `NOT NULL` constraint. Columns that do not have a `NOT NULL` constraint or are the result + /// of an expression are assumed to be nullable and so `Option` is used instead of `T`. + /// + /// For MySQL, the implementation looks at [the `NOT_NULL` flag](https://dev.mysql.com/doc/dev/mysql-server/8.0.12/group__group__cs__column__definition__flags.html#ga50377f5ca5b3e92f3931a81fe7b44043) + /// of [the `ColumnDefinition` structure in `COM_QUERY_OK`](https://dev.mysql.com/doc/internals/en/com-query-response.html#column-definition): + /// if it is set, `T` is used; if it is not set, `Option` is used. + /// + /// MySQL appears to be capable of determining the nullability of a result column even if it + /// is the result of an expression, depending on if the expression may in any case result in + /// `NULL` which then depends on the semantics of what functions are used. Consult the MySQL + /// manual for the functions you are using to find the cases in which they return `NULL`. + /// + /// For SQLite we perform a similar check to Postgres, looking for `NOT NULL` constraints + /// on columns that come from tables. However, for SQLite we also can step through the output + /// of `EXPLAIN` to identify columns that may or may not be `NULL`. + /// + /// To override the nullability of an output column, [see below](#type-overrides-output-columns). + /// + /// ## Type Overrides: Bind Parameters (Postgres only) + /// For typechecking of bind parameters, casts using `as` are treated as overrides for the inferred + /// types of bind parameters and no typechecking is emitted: + /// + /// ```rust,ignore + /// #[derive(sqlx::Type)] + /// #[sqlx(transparent)] + /// struct MyInt4(i32); + /// + /// let my_int = MyInt4(1); + /// + /// sqlx::query!("select $1::int4 as id", my_int as MyInt4) + /// ``` + /// + /// Using `expr as _` simply signals to the macro to not type-check that bind expression, + /// and then that syntax is stripped from the expression so as to not trigger type errors. + /// + /// ## Type Overrides: Output Columns + /// Type overrides are also available for output columns, utilizing the SQL standard's support + /// for arbitrary text in column names: + /// + /// ##### Force Not-Null + /// Selecting a column `foo as "foo!"` (Postgres / SQLite) or `` foo as `foo!` `` (MySQL) overrides + /// inferred nullability and forces the column to be treated as `NOT NULL`; this is useful e.g. for + /// selecting expressions in Postgres where we cannot infer nullability: + /// + /// ```rust,ignore + /// # async fn main() { + /// # let mut conn = panic!(); + /// // Postgres: using a raw query string lets us use unescaped double-quotes + /// // Note that this query wouldn't work in SQLite as we still don't know the exact type of `id` + /// let record = sqlx::query!(r#"select 1 as "id!""#) // MySQL: use "select 1 as `id!`" instead + /// .fetch_one(&mut conn) + /// .await?; + /// + /// // For Postgres this would have been inferred to be Option instead + /// assert_eq!(record.id, 1i32); + /// # } + /// + /// ``` + /// + /// ##### Force Nullable + /// Selecting a column `foo as "foo?"` (Postgres / SQLite) or `` foo as `foo?` `` (MySQL) overrides + /// inferred nullability and forces the column to be treated as nullable; this is provided mainly + /// for symmetry with `!`. + /// + /// ```rust,ignore + /// # async fn main() { + /// # let mut conn = panic!(); + /// // Postgres/SQLite: + /// let record = sqlx::query!(r#"select 1 as "id?""#) // MySQL: use "select 1 as `id?`" instead + /// .fetch_one(&mut conn) + /// .await?; + /// + /// // For Postgres this would have been inferred to be Option anyway + /// // but this is just a basic example + /// assert_eq!(record.id, Some(1i32)); + /// # } + /// ``` + /// + /// MySQL should be accurate with regards to nullability as it directly tells us when a column is + /// expected to never be `NULL`. Any mistakes should be considered a bug in MySQL. + /// + /// However, inference in SQLite and Postgres is more fragile as it depends primarily on observing + /// `NOT NULL` constraints on columns. If a `NOT NULL` column is brought in by a `LEFT JOIN` then + /// that column may be `NULL` if its row does not satisfy the join condition. Similarly, a + /// `FULL JOIN` or `RIGHT JOIN` may generate rows from the primary table that are all `NULL`. + /// + /// Unfortunately, the result of mistakes in inference is a `UnexpectedNull` error at runtime. + /// + /// In Postgres, we patch up this inference by analyzing `EXPLAIN VERBOSE` output (which is not + /// well documented, is highly dependent on the query plan that Postgres generates, and may differ + /// between releases) to find columns that are the result of left/right/full outer joins. This + /// analysis errs on the side of producing false positives (marking columns nullable that are not + /// in practice) but there are likely edge cases that it does not cover yet. + /// + /// Using `?` as an override we can fix this for columns we know to be nullable in practice: + /// + /// ```rust,ignore + /// # async fn main() { + /// # let mut conn = panic!(); + /// // Ironically this is the exact column we primarily look at to determine nullability in Postgres + /// let record = sqlx::query!( + /// r#"select attnotnull as "attnotnull?" from (values (1)) ids left join pg_attribute on false"# + /// ) + /// .fetch_one(&mut conn) + /// .await?; + /// + /// // Although we do our best, under Postgres this might have been inferred to be `bool` + /// // In that case, we would have gotten an error + /// assert_eq!(record.attnotnull, None); + /// # } + /// ``` + /// + /// If you find that you need to use this override, please open an issue with a query we can use + /// to reproduce the problem. For Postgres users, especially helpful would be the output of + /// `EXPLAIN (VERBOSE, FORMAT JSON) ` with bind parameters substituted in the query + /// (as the exact value of bind parameters can change the query plan) + /// and the definitions of any relevant tables (or sufficiently anonymized equivalents). + /// + /// ##### Force a Different/Custom Type + /// Selecting a column `foo as "foo: T"` (Postgres / SQLite) or `` foo as `foo: T` `` (MySQL) + /// overrides the inferred type which is useful when selecting user-defined custom types + /// (dynamic type checking is still done so if the types are incompatible this will be an error + /// at runtime instead of compile-time). Note that this syntax alone doesn't override inferred nullability, + /// but it is compatible with the forced not-null and forced nullable annotations: + /// + /// ```rust,ignore + /// # async fn main() { + /// # let mut conn = panic!(); + /// #[derive(sqlx::Type)] + /// #[sqlx(transparent)] + /// struct MyInt4(i32); + /// + /// let my_int = MyInt4(1); + /// + /// // Postgres/SQLite + /// sqlx::query!(r#"select 1 as "id!: MyInt4""#) // MySQL: use "select 1 as `id: MyInt4`" instead + /// .fetch_one(&mut conn) + /// .await?; + /// + /// // For Postgres this would have been inferred to be `Option`, MySQL/SQLite `i32` + /// // Note that while using `id: MyInt4` (without the `!`) would work the same for MySQL/SQLite, + /// // Postgres would expect `Some(MyInt4(1))` and the code wouldn't compile + /// assert_eq!(record.id, MyInt4(1)); + /// # } + /// ``` + /// + /// ##### Overrides cheatsheet + /// + /// | Syntax | Nullability | Type | + /// | --------- | --------------- | ---------- | + /// | `foo!` | Forced not-null | Inferred | + /// | `foo?` | Forced nullable | Inferred | + /// | `foo: T` | Inferred | Overridden | + /// | `foo!: T` | Forced not-null | Overridden | + /// | `foo?: T` | Forced nullable | Overridden | + /// + /// ## Offline Mode + /// The macros can be configured to not require a live database connection for compilation, + /// but it requires a couple extra steps: + /// + /// * Run `cargo install sqlx-cli`. + /// * In your project with `DATABASE_URL` set (or in a `.env` file) and the database server running, + /// run `cargo sqlx prepare`. + /// * Check the generated `.sqlx` directory into version control. + /// * Don't have `DATABASE_URL` set during compilation. + /// + /// Your project can now be built without a database connection (you must omit `DATABASE_URL` or + /// else it will still try to connect). To update the generated file simply run `cargo sqlx prepare` + /// again. + /// + /// To ensure that your `.sqlx` directory is kept up-to-date, both with the queries in your + /// project and your database schema itself, run + /// `cargo install sqlx-cli && cargo sqlx prepare --check` in your Continuous Integration script. + /// + /// See [the README for `sqlx-cli`](https://crates.io/crates/sqlx-cli) for more information. + /// + /// ## See Also + /// * [`query_as!`][`crate::query_as!`] if you want to use a struct you can name, + /// * [`query_file!`][`crate::query_file!`] if you want to define the SQL query out-of-line, + /// * [`query_file_as!`][`crate::query_file_as!`] if you want both of the above. + + #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] + macro_rules! query ( + // in Rust 1.45 we can now invoke proc macros in expression position + ($query:expr) => ({ + sqlx::sqlx_macros::expand_query!(source = $query, db_url_env = #env) + }); + // RFC: this semantically should be `$($args:expr),*` (with `$(,)?` to allow trailing comma) + // but that doesn't work in 1.45 because `expr` fragments get wrapped in a way that changes + // their hygiene, which is fixed in 1.46 so this is technically just a temp. workaround. + // My question is: do we care? + // I was hoping using the `expr` fragment might aid code completion but it doesn't in my + // experience, at least not with IntelliJ-Rust at the time of writing (version 0.3.126.3220-201) + // so really the only benefit is making the macros _slightly_ self-documenting, but it's + // not like it makes them magically understandable at-a-glance. + ($query:expr, $($args:tt)*) => ({ + sqlx::sqlx_macros::expand_query!(source = $query, args = [$($args)*], db_url_env = #env) + }) + ); + pub(crate) use query; + + /// A variant of [`query!`][`crate::query!`] which does not check the input or output types. This still does parse + /// the query to ensure it's syntactically and semantically valid for the current database. + + #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] + macro_rules! query_unchecked ( + ($query:expr) => ({ + sqlx::sqlx_macros::expand_query!(source = $query, checked = false, db_url_env = #env) + }); + ($query:expr, $($args:tt)*) => ({ + sqlx::sqlx_macros::expand_query!(source = $query, args = [$($args)*], checked = false, db_url_env = #env) + }) + ); + pub(crate) use query_unchecked; + + /// A variant of [`query!`][`crate::query!`] where the SQL query is stored in a separate file. + /// + /// Useful for large queries and potentially cleaner than multiline strings. + /// + /// The syntax and requirements (see [`query!`][`crate::query!`]) are the same except the SQL + /// string is replaced by a file path. + /// + /// The file must be relative to the project root (the directory containing `Cargo.toml`), + /// unlike `include_str!()` which uses compiler internals to get the path of the file where it + /// was invoked. + /// + /// ----- + /// + /// `examples/queries/account-by-id.sql`: + /// ```text + /// select * from (select (1) as id, 'Herp Derpinson' as name) accounts + /// where id = ? + /// ``` + /// + /// `src/my_query.rs`: + /// ```rust,ignore + /// # use sqlx::Connect; + /// # #[cfg(all(feature = "mysql", feature = "_rt-async-std"))] + /// # #[async_std::main] + /// # async fn main() -> sqlx::Result<()>{ + /// # let db_url = dotenvy::var("DATABASE_URL").expect("DATABASE_URL must be set"); + /// # + /// # if !(db_url.starts_with("mysql") || db_url.starts_with("mariadb")) { return Ok(()) } + /// # let mut conn = sqlx::MySqlConnection::connect(db_url).await?; + /// let account = sqlx::query_file!("tests/test-query-account-by-id.sql", 1i32) + /// .fetch_one(&mut conn) + /// .await?; + /// + /// println!("{account:?}"); + /// println!("{}: {}", account.id, account.name); + /// + /// # Ok(()) + /// # } + /// # + /// # #[cfg(any(not(feature = "mysql"), not(feature = "_rt-async-std")))] + /// # fn main() {} + /// ``` + + #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] + macro_rules! query_file ( + ($path:literal) => ({ + sqlx::sqlx_macros::expand_query!(source_file = $path, db_url_env = #env) + }); + ($path:literal, $($args:tt)*) => ({ + sqlx::sqlx_macros::expand_query!(source_file = $path, args = [$($args)*], db_url_env = #env) + }) + ); + pub(crate) use query_file; + + /// A variant of [`query_file!`][`crate::query_file!`] which does not check the input or output + /// types. This still does parse the query to ensure it's syntactically and semantically valid + /// for the current database. + + #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] + macro_rules! query_file_unchecked ( + ($path:literal) => ({ + sqlx::sqlx_macros::expand_query!(source_file = $path, checked = false, db_url_env = #env) + }); + ($path:literal, $($args:tt)*) => ({ + sqlx::sqlx_macros::expand_query!(source_file = $path, args = [$($args)*], checked = false, db_url_env = #env) + }) + ); + pub(crate) use query_file_unchecked; + + + /// A variant of [`query!`][`crate::query!`] which takes a path to an explicitly defined struct + /// as the output type. + /// + /// This lets you return the struct from a function or add your own trait implementations. + /// + /// **This macro does not use [`FromRow`][crate::FromRow]**; in fact, no trait implementations are + /// required at all, though this may change in future versions. + /// + /// The macro maps rows using a struct literal where the names of columns in the query are expected + /// to be the same as the fields of the struct (but the order does not need to be the same). + /// The types of the columns are based on the query and not the corresponding fields of the struct, + /// so this is type-safe as well. + /// + /// This enforces a few things: + /// * The query must output at least one column. + /// * The column names of the query must match the field names of the struct. + /// * The field types must be the Rust equivalent of their SQL counterparts; see the corresponding + /// module for your database for mappings: + /// * Postgres: [crate::postgres::types] + /// * MySQL: [crate::mysql::types] + /// * Note: due to wire protocol limitations, the query macros do not know when + /// a column should be decoded as `bool`. It will be inferred to be `i8` instead. + /// See the link above for details. + /// * SQLite: [crate::sqlite::types] + /// * If a column may be `NULL`, the corresponding field's type must be wrapped in `Option<_>`. + /// * Neither the query nor the struct may have unused fields. + /// + /// The only modification to the `query!()` syntax is that the struct name is given before the SQL + /// string: + /// ```rust,ignore + /// # use sqlx::Connect; + /// # #[cfg(all(feature = "mysql", feature = "_rt-async-std"))] + /// # #[async_std::main] + /// # async fn main() -> sqlx::Result<()>{ + /// # let db_url = dotenvy::var("DATABASE_URL").expect("DATABASE_URL must be set"); + /// # + /// # if !(db_url.starts_with("mysql") || db_url.starts_with("mariadb")) { return Ok(()) } + /// # let mut conn = sqlx::MySqlConnection::connect(db_url).await?; + /// #[derive(Debug)] + /// struct Account { + /// id: i32, + /// name: String + /// } + /// + /// // let mut conn = ; + /// let account = sqlx::query_as!( + /// Account, + /// "select * from (select (1) as id, 'Herp Derpinson' as name) accounts where id = ?", + /// 1i32 + /// ) + /// .fetch_one(&mut conn) + /// .await?; + /// + /// println!("{account:?}"); + /// println!("{}: {}", account.id, account.name); + /// + /// # Ok(()) + /// # } + /// # + /// # #[cfg(any(not(feature = "mysql"), not(feature = "_rt-async-std")))] + /// # fn main() {} + /// ``` + /// + /// **The method you want to call depends on how many rows you're expecting.** + /// + /// | Number of Rows | Method to Call* | Returns (`T` being the given struct) | Notes | + /// |----------------| ----------------------------|----------------------------------------|-------| + /// | Zero or One | `.fetch_optional(...).await`| `sqlx::Result>` | Extra rows are ignored. | + /// | Exactly One | `.fetch_one(...).await` | `sqlx::Result` | Errors if no rows were returned. Extra rows are ignored. Aggregate queries, use this. | + /// | At Least One | `.fetch(...)` | `impl Stream>` | Call `.try_next().await` to get each row result. | + /// | Multiple | `.fetch_all(...)` | `sqlx::Result>` | | + /// + /// \* All methods accept one of `&mut {connection type}`, `&mut Transaction` or `&Pool`. + /// (`.execute()` is omitted as this macro requires at least one column to be returned.) + /// + /// ### Column Type Override: Infer from Struct Field + /// In addition to the column type overrides supported by [`query!`][`crate::query!`], + /// [`query_as!()`][`crate::query_as!`] supports an + /// additional override option: + /// + /// If you select a column `foo as "foo: _"` (Postgres/SQLite) or `` foo as `foo: _` `` (MySQL) + /// it causes that column to be inferred based on the type of the corresponding field in the given + /// record struct. Runtime type-checking is still done so an error will be emitted if the types + /// are not compatible. + /// + /// This allows you to override the inferred type of a column to instead use a custom-defined type: + /// + /// ```rust,ignore + /// #[derive(sqlx::Type)] + /// #[sqlx(transparent)] + /// struct MyInt4(i32); + /// + /// struct Record { + /// id: MyInt4, + /// } + /// + /// let my_int = MyInt4(1); + /// + /// // Postgres/SQLite + /// sqlx::query_as!(Record, r#"select 1 as "id: _""#) // MySQL: use "select 1 as `id: _`" instead + /// .fetch_one(&mut conn) + /// .await?; + /// + /// assert_eq!(record.id, MyInt4(1)); + /// ``` + /// + /// ### Troubleshooting: "error: mismatched types" + /// If you get a "mismatched types" error from an invocation of this macro and the error + /// isn't pointing specifically at a parameter. + /// + /// For example, code like this (using a Postgres database): + /// + /// ```rust,ignore + /// struct Account { + /// id: i32, + /// name: Option, + /// } + /// + /// let account = sqlx::query_as!( + /// Account, + /// r#"SELECT id, name from (VALUES (1, 'Herp Derpinson')) accounts(id, name)"#, + /// ) + /// .fetch_one(&mut conn) + /// .await?; + /// ``` + /// + /// Might produce an error like this: + /// ```text,ignore + /// error[E0308]: mismatched types + /// --> tests/postgres/macros.rs:126:19 + /// | + /// 126 | let account = sqlx::query_as!( + /// | ___________________^ + /// 127 | | Account, + /// 128 | | r#"SELECT id, name from (VALUES (1, 'Herp Derpinson')) accounts(id, name)"#, + /// 129 | | ) + /// | |_____^ expected `i32`, found enum `std::option::Option` + /// | + /// = note: expected type `i32` + /// found enum `std::option::Option` + /// ``` + /// + /// This means that you need to check that any field of the "expected" type (here, `i32`) matches + /// the Rust type mapping for its corresponding SQL column (see the `types` module of your database, + /// listed above, for mappings). The "found" type is the SQL->Rust mapping that the macro chose. + /// + /// In the above example, the returned column is inferred to be nullable because it's being + /// returned from a `VALUES` statement in Postgres, so the macro inferred the field to be nullable + /// and so used `Option` instead of `i32`. **In this specific case** we could use + /// `select id as "id!"` to override the inferred nullability because we know in practice + /// that column will never be `NULL` and it will fix the error. + /// + /// Nullability inference and type overrides are discussed in detail in the docs for + /// [`query!`][`crate::query!`]. + /// + /// It unfortunately doesn't appear to be possible right now to make the error specifically mention + /// the field; this probably requires the `const-panic` feature (still unstable as of Rust 1.45). + + #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] + macro_rules! query_as ( + ($out_struct:path, $query:expr) => ( { + sqlx::sqlx_macros::expand_query!(record = $out_struct, source = $query, db_url_env = #env) + }); + ($out_struct:path, $query:expr, $($args:tt)*) => ( { + sqlx::sqlx_macros::expand_query!(record = $out_struct, source = $query, args = [$($args)*], db_url_env = #env) + }) + ); + pub(crate) use query_as; + + /// Combines the syntaxes of [`query_as!`][`crate::query_as!`] and [`query_file!`][`crate::query_file!`]. + /// + /// Enforces requirements of both macros; see them for details. + /// + /// ```rust,ignore + /// # use sqlx::Connect; + /// # #[cfg(all(feature = "mysql", feature = "_rt-async-std"))] + /// # #[async_std::main] + /// # async fn main() -> sqlx::Result<()>{ + /// # let db_url = dotenvy::var("DATABASE_URL").expect("DATABASE_URL must be set"); + /// # + /// # if !(db_url.starts_with("mysql") || db_url.starts_with("mariadb")) { return Ok(()) } + /// # let mut conn = sqlx::MySqlConnection::connect(db_url).await?; + /// #[derive(Debug)] + /// struct Account { + /// id: i32, + /// name: String + /// } + /// + /// // let mut conn = ; + /// let account = sqlx::query_file_as!(Account, "tests/test-query-account-by-id.sql", 1i32) + /// .fetch_one(&mut conn) + /// .await?; + /// + /// println!("{account:?}"); + /// println!("{}: {}", account.id, account.name); + /// + /// # Ok(()) + /// # } + /// # + /// # #[cfg(any(not(feature = "mysql"), not(feature = "_rt-async-std")))] + /// # fn main() {} + /// ``` + + #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] + macro_rules! query_file_as ( + ($out_struct:path, $path:literal) => ( { + sqlx::sqlx_macros::expand_query!(record = $out_struct, source_file = $path, db_url_env = #env) + }); + ($out_struct:path, $path:literal, $($args:tt)*) => ( { + sqlx::sqlx_macros::expand_query!(record = $out_struct, source_file = $path, args = [$($args)*], db_url_env = #env) + }) + ); + pub(crate) use query_file_as; + + /// A variant of [`query_as!`][`crate::query_as!`] which does not check the input or output types. This still does parse + /// the query to ensure it's syntactically and semantically valid for the current database. + + #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] + macro_rules! query_as_unchecked ( + ($out_struct:path, $query:expr) => ( { + sqlx::sqlx_macros::expand_query!(record = $out_struct, source = $query, checked = false, db_url_env = #env) + }); + + ($out_struct:path, $query:expr, $($args:tt)*) => ( { + sqlx::sqlx_macros::expand_query!(record = $out_struct, source = $query, args = [$($args)*], checked = false, db_url_env = #env) + }) + ); + pub(crate) use query_as_unchecked; + + /// A variant of [`query_file_as!`][`crate::query_file_as!`] which does not check the input or output types. This + /// still does parse the query to ensure it's syntactically and semantically valid + /// for the current database. + + #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] + macro_rules! query_file_as_unchecked ( + ($out_struct:path, $path:literal) => ( { + sqlx::sqlx_macros::expand_query!(record = $out_struct, source_file = $path, checked = false, db_url_env = #env) + }); + + ($out_struct:path, $path:literal, $($args:tt)*) => ( { + sqlx::sqlx_macros::expand_query!(record = $out_struct, source_file = $path, args = [$($args)*], checked = false, db_url_env = #env) + }) + ); + pub(crate) use query_file_as_unchecked; + + /// A variant of [`query!`][`crate::query!`] which expects a single column from the query and evaluates to an + /// instance of [QueryScalar][crate::query::QueryScalar]. + /// + /// The name of the column is not required to be a valid Rust identifier, however you can still + /// use the column type override syntax in which case the column name _does_ have to be a valid + /// Rust identifier for the override to parse properly. If the override parse fails the error + /// is silently ignored (we just don't have a reliable way to tell the difference). **If you're + /// getting a different type than expected, please check to see if your override syntax is correct + /// before opening an issue.** + /// + /// Wildcard overrides like in [`query_as!`][`crate::query_as!`] are also allowed, in which case the output type + /// is left up to inference. + /// + /// See [`query!`][`crate::query!`] for more information. + + #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] + macro_rules! query_scalar ( + ($query:expr) => ( + sqlx::sqlx_macros::expand_query!(scalar = _, source = $query, db_url_env = #env) + ); + ($query:expr, $($args:tt)*) => ( + sqlx::sqlx_macros::expand_query!(scalar = _, source = $query, args = [$($args)*], db_url_env = #env) + ) + ); + pub(crate) use query_scalar; + + /// A variant of [`query_scalar!`][`crate::query_scalar!`] which takes a file path like + /// [`query_file!`][`crate::query_file!`]. + + #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] + macro_rules! query_file_scalar ( + ($path:literal) => ( + sqlx::sqlx_macros::expand_query!(scalar = _, source_file = $path, db_url_env = #env) + ); + ($path:literal, $($args:tt)*) => ( + sqlx::sqlx_macros::expand_query!(scalar = _, source_file = $path, args = [$($args)*], db_url_env = #env) + ) + ); + pub(crate) use query_file_scalar; + + /// A variant of [`query_scalar!`][`crate::query_scalar!`] which does not typecheck bind parameters + /// and leaves the output type to inference. + /// The query itself is still checked that it is syntactically and semantically + /// valid for the database, that it only produces one column and that the number of bind parameters + /// is correct. + /// + /// For this macro variant the name of the column is irrelevant. + + #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] + macro_rules! query_scalar_unchecked ( + ($query:expr) => ( + sqlx::sqlx_macros::expand_query!(scalar = _, source = $query, checked = false, db_url_env = #env) + ); + ($query:expr, $($args:tt)*) => ( + sqlx::sqlx_macros::expand_query!(scalar = _, source = $query, args = [$($args)*], checked = false, db_url_env = #env) + ) + ); + pub(crate) use query_scalar_unchecked; + + /// A variant of [`query_file_scalar!`][`crate::query_file_scalar!`] which does not typecheck bind + /// parameters and leaves the output type to inference. + /// The query itself is still checked that it is syntactically and + /// semantically valid for the database, that it only produces one column and that the number of + /// bind parameters is correct. + /// + /// For this macro variant the name of the column is irrelevant. + + #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] + macro_rules! query_file_scalar_unchecked ( + ($path:literal) => ( + sqlx::sqlx_macros::expand_query!(scalar = _, source_file = $path, checked = false, db_url_env = #env) + ); + ($path:literal, $($args:tt)*) => ( + sqlx::sqlx_macros::expand_query!(scalar = _, source_file = $path, args = [$($args)*], checked = false, db_url_env = #env) + ) + ); + pub(crate) use query_file_scalar_unchecked; + + }; + + expanded +} diff --git a/sqlx-macros-core/src/database_macro/input.rs b/sqlx-macros-core/src/database_macro/input.rs new file mode 100644 index 0000000000..1311541016 --- /dev/null +++ b/sqlx-macros-core/src/database_macro/input.rs @@ -0,0 +1,39 @@ +use syn::{ + parse::{Parse, ParseStream}, + Ident, LitStr, +}; + +/// Macro input `query!()` and `query_file!()` +pub struct DatabaseMacroInput { + pub(super) env: String, +} + +impl Parse for DatabaseMacroInput { + fn parse(input: ParseStream) -> syn::Result { + let mut env = None; + + let mut expect_comma = false; + while !input.is_empty() { + if expect_comma { + let _ = input.parse::()?; + } + + let key: Ident = input.parse()?; + + let _ = input.parse::()?; + + if key == "env" { + env = Some(input.parse::()?.value()); + } else { + let message = format!("unexpected input key: {key}"); + return Err(syn::Error::new_spanned(key, message)); + } + + expect_comma = true; + } + + let env = env.ok_or_else(|| input.error("expected `env` key"))?; + + Ok(DatabaseMacroInput { env }) + } +} diff --git a/sqlx-macros-core/src/database_macro/mod.rs b/sqlx-macros-core/src/database_macro/mod.rs new file mode 100644 index 0000000000..377805e53e --- /dev/null +++ b/sqlx-macros-core/src/database_macro/mod.rs @@ -0,0 +1,5 @@ +mod expand; +mod input; + +pub use expand::expand_database_macros; +pub use input::DatabaseMacroInput; diff --git a/sqlx-macros-core/src/lib.rs b/sqlx-macros-core/src/lib.rs index d2a40aa831..4c341f0ab6 100644 --- a/sqlx-macros-core/src/lib.rs +++ b/sqlx-macros-core/src/lib.rs @@ -29,6 +29,8 @@ pub type Result = std::result::Result; mod common; mod database; +#[cfg(feature = "macros")] +pub mod database_macro; #[cfg(feature = "derive")] pub mod derives; #[cfg(feature = "macros")] diff --git a/sqlx-macros/src/lib.rs b/sqlx-macros/src/lib.rs index 987794acbc..0cd5483607 100644 --- a/sqlx-macros/src/lib.rs +++ b/sqlx-macros/src/lib.rs @@ -22,6 +22,13 @@ pub fn expand_query(input: TokenStream) -> TokenStream { } } +#[cfg(feature = "macros")] +#[proc_macro] +pub fn database_macros(input: TokenStream) -> TokenStream { + let input = syn::parse_macro_input!(input as database_macro::DatabaseMacroInput); + database_macro::expand_database_macros(input).into() +} + #[cfg(feature = "derive")] #[proc_macro_derive(Encode, attributes(sqlx))] pub fn derive_encode(tokenstream: TokenStream) -> TokenStream {