diff --git a/.cargo/audit.toml b/.cargo/audit.toml index 6a896fffd0..fd1ced6d46 100644 --- a/.cargo/audit.toml +++ b/.cargo/audit.toml @@ -1,4 +1,5 @@ [advisories] ignore = [ "RUSTSEC-2024-0365", # Bound by diesel 1.4 (4GB limit n/a to tokenserver) + "RUSTSEC-2022-0090", # Bound by diesel 1.4.8, diesel_migrations 1.4.0, diesel_logger 0.1.1 ] diff --git a/.circleci/config.yml b/.circleci/config.yml index 1fecfe8257..49abaa0dee 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -53,18 +53,25 @@ commands: - run: name: Rust Clippy MySQL command: | - cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/mysql --features=py_verifier -- -D warnings + cargo clippy --workspace --all-targets --no-default-features --features=mysql,py_verifier -- -D warnings + rust-clippy-sqlite: + steps: + - run: + name: Rust Clippy SQLite + command: | + cargo clippy --workspace --all-targets --no-default-features --features=sqlite,py_verifier -- -D warnings rust-clippy-spanner: steps: - run: name: Rust Clippy Spanner command: | - cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/spanner --features=py_verifier -- -D warnings + cargo clippy --workspace --all-targets --no-default-features --features=spanner,py_verifier -- -D warnings cargo-build: steps: - run: name: cargo build - command: cargo build + # This only builds the mysql version as it was the old default + command: cargo build --workspace --all-targets --no-default-features --features=mysql,py_verifier setup-mysql: steps: - run: @@ -95,10 +102,10 @@ commands: steps: - run: name: cargo test - command: cargo test --workspace --verbose + command: cargo test --workspace --verbose --no-default-features --features mysql,py_verifier - run: name: quota test - command: cargo test --workspace --verbose + command: cargo test --workspace --verbose --no-default-features --features mysql,py_verifier environment: SYNC_SYNCSTORAGE__ENFORCE_QUOTA: 1 @@ -116,6 +123,20 @@ commands: environment: SYNCSTORAGE_RS_IMAGE: app:build + run-e2e-sqlite-tests: + steps: + - run: + name: e2e tests (syncstorage sqlite) + command: > + /usr/local/bin/docker-compose + -f docker-compose.sqlite.yaml + -f docker-compose.e2e.sqlite.yaml + up + --exit-code-from sqlite-e2e-tests + --abort-on-container-exit + environment: + SYNCSTORAGE_RS_IMAGE: app:build + run-tokenserver-scripts-tests: steps: - run: @@ -251,6 +272,38 @@ jobs: paths: - /home/circleci/cache + build-sqlite-image: + docker: + - image: cimg/rust:1.78.0 # RUST_VER + auth: + username: $DOCKER_USER + password: $DOCKER_PASS + resource_class: large + steps: + - setup_remote_docker: + docker_layer_caching: true + - checkout + - display-versions + - write-version + - run: + name: Build SQLite Docker image + command: docker build -t app:build --build-arg DATABASE_BACKEND=sqlite . + no_output_timeout: 30m + # save the built docker container into CircleCI's cache. This is + # required since Workflows do not have the same remote docker instance. + - run: + name: docker save app:build + command: | + mkdir -p /home/circleci/cache + docker save -o /home/circleci/cache/docker.tar "app:build" + - run: + name: Save docker-compose config + command: cp docker-compose*sqlite.yaml /home/circleci/cache + - save_cache: + key: sqlite-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }}-{{ epoch }} + paths: + - /home/circleci/cache + build-spanner-image: docker: - image: cimg/rust:1.78.0 # RUST_VER @@ -321,6 +374,25 @@ jobs: command: cp /home/circleci/cache/docker-compose*.yaml . - run-e2e-spanner-tests + sqlite-e2e-tests: + docker: + - image: docker/compose:1.24.0 + auth: + username: $DOCKER_USER + password: $DOCKER_PASS + steps: + - setup_remote_docker + - display-versions + - restore_cache: + key: sqlite-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }} + - run: + name: Restore Docker image cache + command: docker load -i /home/circleci/cache/docker.tar + - run: + name: Restore docker-compose config + command: cp /home/circleci/cache/docker-compose*.yaml . + - run-e2e-sqlite-tests + deploy: docker: - image: docker:18.02.0-ce @@ -422,6 +494,12 @@ workflows: filters: tags: only: /.*/ + - build-sqlite-image: + requires: + - build-and-test + filters: + tags: + only: /.*/ - mysql-e2e-tests: requires: - build-mysql-image @@ -434,10 +512,17 @@ workflows: filters: tags: only: /.*/ + - sqlite-e2e-tests: + requires: + - build-sqlite-image + filters: + tags: + only: /.*/ - deploy: requires: - mysql-e2e-tests - spanner-e2e-tests + - sqlite-e2e-tests filters: tags: only: /.*/ @@ -448,6 +533,7 @@ workflows: requires: - mysql-e2e-tests - spanner-e2e-tests + - sqlite-e2e-tests filters: tags: only: /.*/ diff --git a/Cargo.lock b/Cargo.lock index 9d63860f54..763a33cf64 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "actix-codec" @@ -8,7 +8,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "bytes", "futures-core", "futures-sink", @@ -36,9 +36,9 @@ dependencies = [ [[package]] name = "actix-http" -version = "3.8.0" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ae682f693a9cd7b058f2b0b5d9a6d7728a8555779bedbbc35dd88528611d020" +checksum = "d48f96fc3003717aeb9856ca3d02a8c7de502667ad76eeacd830b48d2e91fac4" dependencies = [ "actix-codec", "actix-rt", @@ -46,7 +46,7 @@ dependencies = [ "actix-utils", "ahash", "base64 0.22.1", - "bitflags 2.5.0", + "bitflags 2.6.0", "brotli", "bytes", "bytestring", @@ -54,7 +54,7 @@ dependencies = [ "encoding_rs", "flate2", "futures-core", - "h2", + "h2 0.3.26", "http 0.2.12", "httparse", "httpdate", @@ -80,7 +80,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" dependencies = [ "quote", - "syn 2.0.67", + "syn 2.0.90", ] [[package]] @@ -94,7 +94,7 @@ dependencies = [ "http 0.2.12", "regex", "regex-lite", - "serde 1.0.203", + "serde 1.0.215", "tracing", ] @@ -111,9 +111,9 @@ dependencies = [ [[package]] name = "actix-server" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b02303ce8d4e8be5b855af6cf3c3a08f3eff26880faad82bab679c22d3650cb5" +checksum = "7ca2549781d8dd6d75c40cf6b6051260a2cc2f3c62343d761a969a0640646894" dependencies = [ "actix-rt", "actix-service", @@ -149,9 +149,9 @@ dependencies = [ [[package]] name = "actix-web" -version = "4.8.0" +version = "4.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1988c02af8d2b718c05bc4aeb6a66395b7cdf32858c2c71131e5637a8c05a9ff" +checksum = "9180d76e5cc7ccbc4d60a506f2c727730b154010262df5b910eb17dbe4b8cb38" dependencies = [ "actix-codec", "actix-http", @@ -171,6 +171,7 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", + "impl-more", "itoa", "language-tags", "log", @@ -179,13 +180,13 @@ dependencies = [ "pin-project-lite", "regex", "regex-lite", - "serde 1.0.203", + "serde 1.0.215", "serde_json", "serde_urlencoded", "smallvec", "socket2", "time", - "url 2.5.2", + "url 2.5.4", ] [[package]] @@ -197,23 +198,23 @@ dependencies = [ "actix-router", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.90", ] [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" +name = "adler2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "ahash" @@ -278,9 +279,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.14" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", @@ -293,43 +294,43 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.7" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.3" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" [[package]] name = "arc-swap" @@ -349,21 +350,27 @@ version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" dependencies = [ - "serde 1.0.203", + "serde 1.0.215", "serde_json", ] [[package]] name = "async-trait" -version = "0.1.80" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.90", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "atty" version = "0.2.14" @@ -377,23 +384,23 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -439,9 +446,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "block-buffer" @@ -496,37 +503,37 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" [[package]] name = "bytestring" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d80203ea6b29df88012294f62733de21cfeab47f17b41af3a38bc30a03ee72" +checksum = "e465647ae23b2823b0753f50decb2d5a86d2bb2cac04788fafd1f80e45378e5f" dependencies = [ "bytes", ] [[package]] name = "cadence" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f338b979d9ebfff4bb9801ae8f3af0dc3615f7f1ca963f2e4782bcf9acb3753" +checksum = "62fd689c825a93386a2ac05a46f88342c6df9ec3e79416f665650614e92e7475" dependencies = [ "crossbeam-channel", ] [[package]] name = "cc" -version = "1.0.99" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c51067fd44124faa7f870b4b1c969379ad32b2ba805aa959430ceaa384f695" +checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d" dependencies = [ "jobserver", "libc", - "once_cell", + "shlex", ] [[package]] @@ -544,18 +551,24 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chrono" -version = "0.4.38" +version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits 0.2.19", "wasm-bindgen", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -586,18 +599,18 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.50" +version = "0.1.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" +checksum = "c682c223677e0e5b6b7f63a64b9351844c3f1b1678a68b7ee617e30fb082620e" dependencies = [ "cc", ] [[package]] name = "colorchoice" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "colored" @@ -618,7 +631,7 @@ dependencies = [ "lazy_static", "nom 5.1.3", "rust-ini", - "serde 1.0.203", + "serde 1.0.215", "serde-hjson", "serde_json", "toml", @@ -644,15 +657,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" dependencies = [ "libc", ] @@ -693,9 +706,9 @@ dependencies = [ [[package]] name = "curl" -version = "0.4.46" +version = "0.4.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e2161dd6eba090ff1594084e95fd67aeccf04382ffea77999ea94ed42ec67b6" +checksum = "d9fb4d13a1be2b58f14d60adba57c9834b78c62fd86c3e76a148f732686e9265" dependencies = [ "curl-sys", "libc", @@ -708,9 +721,9 @@ dependencies = [ [[package]] name = "curl-sys" -version = "0.4.72+curl-8.6.0" +version = "0.4.78+curl-8.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29cbdc8314c447d11e8fd156dcdd031d9e02a7a976163e396b548c03153bc9ea" +checksum = "8eec768341c5c7789611ae51cf6c459099f22e64a5d5d0ce4892434e33821eaf" dependencies = [ "cc", "libc", @@ -723,9 +736,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ "darling_core", "darling_macro", @@ -733,27 +746,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.67", + "syn 2.0.90", ] [[package]] name = "darling_macro" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.67", + "syn 2.0.90", ] [[package]] @@ -782,7 +795,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" dependencies = [ - "serde 1.0.203", + "serde 1.0.215", "uuid", ] @@ -805,7 +818,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.67", + "syn 2.0.90", ] [[package]] @@ -816,6 +829,7 @@ checksum = "b28135ecf6b7d446b43e27e225622a038cc4e2930a1022f51cdb97ada19b8e4d" dependencies = [ "byteorder", "diesel_derives", + "libsqlite3-sys", "mysqlclient-sys", "r2d2", "url 1.7.2", @@ -884,6 +898,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "docopt" version = "1.1.1" @@ -892,7 +917,7 @@ checksum = "7f3f119846c823f9eafcf953a8f6ffb6ed69bf6240883261a7f13b634579a51f" dependencies = [ "lazy_static", "regex", - "serde 1.0.203", + "serde 1.0.215", "strsim 0.10.0", ] @@ -904,24 +929,24 @@ checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" [[package]] name = "either" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "encoding_rs" -version = "0.8.34" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ "cfg-if", ] [[package]] name = "env_filter" -version = "0.1.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" +checksum = "4f2c92ceda6ceec50f43169f9ee8424fe2db276791afde7b2cd8bc084cb376ab" dependencies = [ "log", "regex", @@ -942,9 +967,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.3" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" +checksum = "e13fa619b91fb2381732789fc5de83b45675e882f66623b7d8cb4f643017018d" dependencies = [ "anstream", "anstyle", @@ -965,17 +990,17 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" dependencies = [ - "serde 1.0.203", + "serde 1.0.215", ] [[package]] name = "errno" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -992,9 +1017,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.30" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" dependencies = [ "crc32fast", "miniz_oxide", @@ -1023,9 +1048,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1038,9 +1063,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1048,15 +1073,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -1065,38 +1090,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.90", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures 0.1.31", "futures-channel", @@ -1136,16 +1161,16 @@ dependencies = [ [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glean" version = "0.17.15" dependencies = [ "chrono", - "serde 1.0.203", + "serde 1.0.215", "serde_derive", "serde_json", "uuid", @@ -1163,7 +1188,7 @@ version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "864a48916c62ddbd1dc289be6d041d8ca61160c9c6169298e5cf3da11baf8370" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "grpcio", "protobuf", ] @@ -1218,25 +1243,44 @@ dependencies = [ "tracing", ] +[[package]] +name = "h2" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.2.0", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "hashbrown" -version = "0.14.5" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" [[package]] name = "hawk" -version = "5.0.0" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba86b7cbed4f24e509c720688eaf4963eac20d9341689bf69bcf5ee5e0f1cd2" +checksum = "ab302457b3e28e621daab18932d67a67328f29240bfaa5f604b7627ece1eacda" dependencies = [ "anyhow", - "base64 0.21.7", + "base64 0.22.1", "log", "once_cell", "ring", - "thiserror", - "url 2.5.2", + "thiserror 1.0.69", + "url 2.5.4", ] [[package]] @@ -1260,6 +1304,12 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + [[package]] name = "hex" version = "0.4.3" @@ -1317,9 +1367,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -1328,23 +1378,12 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" -dependencies = [ - "bytes", - "http 0.2.12", - "pin-project-lite", -] - -[[package]] -name = "http-body" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.1.0", + "http 1.2.0", ] [[package]] @@ -1355,16 +1394,16 @@ checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", "futures-util", - "http 1.1.0", - "http-body 1.0.0", + "http 1.2.0", + "http-body", "pin-project-lite", ] [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -1380,41 +1419,20 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.29" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" +checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" dependencies = [ "bytes", "futures-channel", - "futures-core", "futures-util", - "h2", - "http 0.2.12", - "http-body 0.4.6", + "h2 0.4.7", + "http 1.2.0", + "http-body", "httparse", "httpdate", "itoa", "pin-project-lite", - "tokio", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" -dependencies = [ - "bytes", - "futures-channel", - "futures-util", - "http 1.1.0", - "http-body 1.0.0", - "httparse", - "itoa", - "pin-project-lite", "smallvec", "tokio", "want", @@ -1422,13 +1440,13 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", - "http 1.1.0", - "hyper 1.3.1", + "http 1.2.0", + "hyper", "hyper-util", "rustls", "rustls-pki-types", @@ -1440,29 +1458,28 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.5" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.1.0", - "http-body 1.0.0", - "hyper 1.3.1", + "http 1.2.0", + "http-body", + "hyper", "pin-project-lite", "socket2", "tokio", - "tower", "tower-service", "tracing", ] [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1481,6 +1498,124 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -1508,11 +1643,38 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "impl-more" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aae21c3177a27788957044151cc2800043d127acaa460a47ebb9b84dfa2c6aa0" + [[package]] name = "indexmap" -version = "2.2.6" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "equivalent", "hashbrown", @@ -1526,48 +1688,49 @@ checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is-terminal" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi 0.4.0", "libc", "windows-sys 0.52.0", ] [[package]] name = "is_terminal_polyfill" -version = "1.70.0" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" [[package]] name = "jobserver" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7" dependencies = [ + "once_cell", "wasm-bindgen", ] @@ -1580,7 +1743,7 @@ dependencies = [ "base64 0.21.7", "js-sys", "ring", - "serde 1.0.203", + "serde 1.0.215", "serde_json", ] @@ -1592,9 +1755,9 @@ checksum = "d4345964bb142484797b161f473a503a434de77149dd8c7427788c6e13379388" [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "lazycell" @@ -1617,18 +1780,18 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.155" +version = "0.2.168" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" [[package]] name = "libloading" -version = "0.8.3" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -1637,15 +1800,25 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "libc", ] +[[package]] +name = "libsqlite3-sys" +version = "0.22.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290b64917f8b0cb885d9de0f9959fe1f775d7fa12f1da2db9001c1c8ab60f89d" +dependencies = [ + "pkg-config", + "vcpkg", +] + [[package]] name = "libz-sys" -version = "1.1.18" +version = "1.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" dependencies = [ "cc", "libc", @@ -1665,6 +1838,12 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "litemap" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" + [[package]] name = "local-channel" version = "0.1.5" @@ -1694,9 +1873,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.21" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "matches" @@ -1754,35 +1933,40 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.4" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" dependencies = [ - "adler", + "adler2", ] [[package]] name = "mio" -version = "0.8.11" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", "log", "wasi", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "mockito" -version = "1.4.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2f6e023aa5bdf392aa06c78e4a4e6d498baab5138d0c993503350ebbc37bf1e" +checksum = "652cd6d169a36eaf9d1e6bce1a221130439a966d7f27858af66a33a66e9c4ee2" dependencies = [ "assert-json-diff", + "bytes", "colored", - "futures-core", - "hyper 0.14.29", + "futures-util", + "http 1.2.0", + "http-body", + "http-body-util", + "hyper", + "hyper-util", "log", "rand", "regex", @@ -1794,9 +1978,9 @@ dependencies = [ [[package]] name = "mysqlclient-sys" -version = "0.2.3" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879ce08e38739c54d87b7f8332a476004fe2a095f40a142a36f889779d9942b7" +checksum = "f61b381528ba293005c42a409dd73d034508e273bf90481f17ec2e964a6e969b" dependencies = [ "pkg-config", "vcpkg", @@ -1859,18 +2043,18 @@ dependencies = [ [[package]] name = "object" -version = "0.36.0" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "576dfe1fc8f9df304abb159d767a29d0476f7750fbf8aa7ad07816004a207434" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "openssl-probe" @@ -1880,9 +2064,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.102" +version = "0.9.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" +checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" dependencies = [ "cc", "libc", @@ -1892,12 +2076,12 @@ dependencies = [ [[package]] name = "os_info" -version = "3.8.2" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae99c7fa6dd38c7cafe1ec085e804f8f555a2f8659b0dbe03f1f9963a9b51092" +checksum = "e5ca711d8b83edbb00b44d504503cd247c9c0bd8b0fa2694f2a1a3d8165379ce" dependencies = [ "log", - "serde 1.0.203", + "serde 1.0.215", "windows-sys 0.52.0", ] @@ -1921,7 +2105,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -1948,31 +2132,11 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" -[[package]] -name = "pin-project" -version = "1.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.67", -] - [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -1982,15 +2146,15 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "portable-atomic" -version = "1.6.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" [[package]] name = "powerfmt" @@ -2000,9 +2164,12 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] [[package]] name = "proc-macro-error" @@ -2030,9 +2197,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -2045,9 +2212,9 @@ checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" [[package]] name = "pyo3" -version = "0.22.5" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d922163ba1f79c04bc49073ba7b32fd5a8d3b76a87c955921234b8e77333c51" +checksum = "f402062616ab18202ae8319da13fa4279883a2b8a9d9f83f20dbade813ce1884" dependencies = [ "cfg-if", "indoc", @@ -2063,9 +2230,9 @@ dependencies = [ [[package]] name = "pyo3-build-config" -version = "0.22.5" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc38c5feeb496c8321091edf3d63e9a6829eab4b863b4a6a65f26f3e9cc6b179" +checksum = "b14b5775b5ff446dd1056212d778012cbe8a0fbffd368029fd9e25b514479c38" dependencies = [ "once_cell", "target-lexicon", @@ -2073,9 +2240,9 @@ dependencies = [ [[package]] name = "pyo3-ffi" -version = "0.22.5" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94845622d88ae274d2729fcefc850e63d7a3ddff5e3ce11bd88486db9f1d357d" +checksum = "9ab5bcf04a2cdcbb50c7d6105de943f543f9ed92af55818fd17b660390fc8636" dependencies = [ "libc", "pyo3-build-config", @@ -2083,81 +2250,86 @@ dependencies = [ [[package]] name = "pyo3-macros" -version = "0.22.5" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e655aad15e09b94ffdb3ce3d217acf652e26bbc37697ef012f5e5e348c716e5e" +checksum = "0fd24d897903a9e6d80b968368a34e1525aeb719d568dba8b3d4bfa5dc67d453" dependencies = [ "proc-macro2", "pyo3-macros-backend", "quote", - "syn 2.0.67", + "syn 2.0.90", ] [[package]] name = "pyo3-macros-backend" -version = "0.22.5" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1e3f09eecd94618f60a455a23def79f79eba4dc561a97324bf9ac8c6df30ce" +checksum = "36c011a03ba1e50152b4b394b479826cad97e7a21eb52df179cd91ac411cbfbe" dependencies = [ "heck", "proc-macro2", "pyo3-build-config", "quote", - "syn 2.0.67", + "syn 2.0.90", ] [[package]] name = "quinn" -version = "0.11.2" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ceeeeabace7857413798eb1ffa1e9c905a9946a57d81fb69b4b71c4d8eb3ad" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" dependencies = [ "bytes", "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 1.1.0", + "rustc-hash 2.1.0", "rustls", - "thiserror", + "socket2", + "thiserror 2.0.6", "tokio", "tracing", ] [[package]] name = "quinn-proto" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", + "getrandom", "rand", "ring", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "rustls", + "rustls-pki-types", "slab", - "thiserror", + "thiserror 2.0.6", "tinyvec", "tracing", + "web-time", ] [[package]] name = "quinn-udp" -version = "0.5.2" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9096629c45860fc7fb143e125eb826b5e721e10be3263160c7d60ca832cf8c46" +checksum = "52cd4b1eff68bf27940dd39811292c49e007f4d0b4c357358dc9b0197be6b527" dependencies = [ + "cfg_aliases", "libc", "once_cell", "socket2", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] @@ -2205,29 +2377,29 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.2" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", ] [[package]] name = "redox_users" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom", "libredox", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "regex" -version = "1.10.5" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", @@ -2237,9 +2409,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", @@ -2254,24 +2426,24 @@ checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.5" +version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ "base64 0.22.1", "bytes", "futures-core", "futures-util", - "http 1.1.0", - "http-body 1.0.0", + "http 1.2.0", + "http-body", "http-body-util", - "hyper 1.3.1", + "hyper", "hyper-rustls", "hyper-util", "ipnet", @@ -2285,19 +2457,19 @@ dependencies = [ "rustls", "rustls-pemfile", "rustls-pki-types", - "serde 1.0.203", + "serde 1.0.215", "serde_json", "serde_urlencoded", "sync_wrapper", "tokio", "tokio-rustls", "tower-service", - "url 2.5.2", + "url 2.5.4", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", "webpki-roots", - "winreg", + "windows-registry", ] [[package]] @@ -2335,37 +2507,37 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] [[package]] name = "rustix" -version = "0.38.34" +version = "0.38.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "rustls" -version = "0.23.10" +version = "0.23.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" dependencies = [ "once_cell", "ring", @@ -2377,25 +2549,27 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.2" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.7.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +dependencies = [ + "web-time", +] [[package]] name = "rustls-webpki" -version = "0.102.4" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring", "rustls-pki-types", @@ -2404,9 +2578,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "ryu" @@ -2425,11 +2599,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2503,7 +2677,7 @@ dependencies = [ "once_cell", "rand", "sentry-types", - "serde 1.0.203", + "serde 1.0.215", "serde_json", ] @@ -2539,11 +2713,11 @@ dependencies = [ "debugid", "hex", "rand", - "serde 1.0.203", + "serde 1.0.215", "serde_json", - "thiserror", + "thiserror 1.0.69", "time", - "url 2.5.2", + "url 2.5.4", "uuid", ] @@ -2555,9 +2729,9 @@ checksum = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8" [[package]] name = "serde" -version = "1.0.203" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] @@ -2576,24 +2750,25 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.90", ] [[package]] name = "serde_json" -version = "1.0.117" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "itoa", + "memchr", "ryu", - "serde 1.0.203", + "serde 1.0.215", ] [[package]] @@ -2605,7 +2780,7 @@ dependencies = [ "form_urlencoded", "itoa", "ryu", - "serde 1.0.203", + "serde 1.0.215", ] [[package]] @@ -2647,9 +2822,9 @@ dependencies = [ [[package]] name = "similar" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa42c91313f1d05da9b26f267f931cf178d4aba455b4c4622dd7355eb80c6640" +checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" [[package]] name = "slab" @@ -2703,7 +2878,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f400f1c5db96f1f52065e8931ca0c524cceb029f7537c9e6d5424488ca137ca0" dependencies = [ "chrono", - "serde 1.0.203", + "serde 1.0.215", "serde_json", "slog", ] @@ -2751,9 +2926,9 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", "windows-sys 0.52.0", @@ -2765,6 +2940,12 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "static_assertions" version = "1.1.0" @@ -2791,9 +2972,9 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "subtle" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d0208408ba0c3df17ed26eb06992cb1a1268d41b2c0e12e65203fbe3972cee5" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" @@ -2808,9 +2989,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.67" +version = "2.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff8655ed1d86f3af4ee3fd3263786bc14245ad17c4c7e85ba7187fb3ae028c90" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" dependencies = [ "proc-macro2", "quote", @@ -2819,9 +3000,12 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] [[package]] name = "syncserver" @@ -2838,19 +3022,19 @@ dependencies = [ "chrono", "docopt", "dyn-clone", - "futures 0.3.30", + "futures 0.3.31", "glean", "hawk", "hex", "hmac", "hostname", - "http 1.1.0", + "http 1.2.0", "lazy_static", "mime", "rand", "regex", "sentry", - "serde 1.0.203", + "serde 1.0.215", "serde_derive", "serde_json", "sha2", @@ -2866,7 +3050,7 @@ dependencies = [ "syncserver-settings", "syncstorage-db", "syncstorage-settings", - "thiserror", + "thiserror 1.0.69", "time", "tokenserver-auth", "tokenserver-common", @@ -2886,13 +3070,13 @@ dependencies = [ "actix-web", "backtrace", "cadence", - "futures 0.3.30", + "futures 0.3.31", "futures-util", "hkdf", "scopeguard", "sentry", "sentry-backtrace", - "serde 1.0.203", + "serde 1.0.215", "serde_json", "sha2", "slog", @@ -2907,10 +3091,10 @@ dependencies = [ "deadpool", "diesel", "diesel_migrations", - "futures 0.3.30", - "http 1.1.0", + "futures 0.3.31", + "http 1.2.0", "syncserver-common", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -2919,12 +3103,12 @@ version = "0.17.15" dependencies = [ "config", "num_cpus", - "serde 1.0.203", + "serde 1.0.215", "slog-scope", "syncserver-common", "syncstorage-settings", "tokenserver-settings", - "url 2.5.2", + "url 2.5.4", ] [[package]] @@ -2933,8 +3117,8 @@ version = "0.17.15" dependencies = [ "async-trait", "cadence", - "env_logger 0.11.3", - "futures 0.3.30", + "env_logger 0.11.5", + "futures 0.3.31", "hostname", "lazy_static", "log", @@ -2947,6 +3131,7 @@ dependencies = [ "syncstorage-mysql", "syncstorage-settings", "syncstorage-spanner", + "syncstorage-sqlite", "tokio", ] @@ -2959,14 +3144,14 @@ dependencies = [ "chrono", "diesel", "diesel_migrations", - "futures 0.3.30", - "http 1.1.0", + "futures 0.3.31", + "http 1.2.0", "lazy_static", - "serde 1.0.203", + "serde 1.0.215", "serde_json", "syncserver-common", "syncserver-db-common", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -2979,17 +3164,18 @@ dependencies = [ "diesel", "diesel_logger", "diesel_migrations", - "env_logger 0.11.3", - "futures 0.3.30", - "http 1.1.0", + "env_logger 0.11.5", + "futures 0.3.31", + "http 1.2.0", "slog-scope", "syncserver-common", "syncserver-db-common", "syncserver-settings", "syncstorage-db-common", "syncstorage-settings", - "thiserror", - "url 2.5.2", + "syncstorage-sql-db-common", + "thiserror 1.0.69", + "url 2.5.4", ] [[package]] @@ -2997,7 +3183,7 @@ name = "syncstorage-settings" version = "0.17.15" dependencies = [ "rand", - "serde 1.0.203", + "serde 1.0.215", "syncserver-common", "time", ] @@ -3011,12 +3197,12 @@ dependencies = [ "backtrace", "cadence", "deadpool", - "env_logger 0.11.3", + "env_logger 0.11.5", "form_urlencoded", - "futures 0.3.30", + "futures 0.3.31", "google-cloud-rust-raw", "grpcio", - "http 1.1.0", + "http 1.2.0", "log", "protobuf", "slog-scope", @@ -3024,12 +3210,70 @@ dependencies = [ "syncserver-db-common", "syncstorage-db-common", "syncstorage-settings", - "thiserror", + "thiserror 1.0.69", "tokio", - "url 2.5.2", + "url 2.5.4", "uuid", ] +[[package]] +name = "syncstorage-sql-db-common" +version = "0.17.15" +dependencies = [ + "async-trait", + "backtrace", + "base64 0.22.1", + "diesel", + "diesel_logger", + "diesel_migrations", + "env_logger 0.11.5", + "futures 0.3.31", + "http 1.2.0", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncserver-settings", + "syncstorage-db-common", + "syncstorage-settings", + "thiserror 1.0.69", + "url 2.5.4", +] + +[[package]] +name = "syncstorage-sqlite" +version = "0.17.15" +dependencies = [ + "async-trait", + "backtrace", + "base64 0.22.1", + "diesel", + "diesel_logger", + "diesel_migrations", + "env_logger 0.11.5", + "futures 0.3.31", + "http 1.2.0", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncserver-settings", + "syncstorage-db-common", + "syncstorage-settings", + "syncstorage-sql-db-common", + "thiserror 1.0.69", + "url 2.5.4", +] + +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "take_mut" version = "0.2.2" @@ -3038,9 +3282,9 @@ checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" [[package]] name = "target-lexicon" -version = "0.12.14" +version = "0.12.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f" +checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" [[package]] name = "term" @@ -3073,22 +3317,42 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.61" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" +dependencies = [ + "thiserror-impl 2.0.6", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ - "thiserror-impl", + "proc-macro2", + "quote", + "syn 2.0.90", ] [[package]] name = "thiserror-impl" -version = "1.0.61" +version = "2.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" +checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.90", ] [[package]] @@ -3103,15 +3367,15 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", "num-conv", "powerfmt", - "serde 1.0.203", + "serde 1.0.215", "time-core", "time-macros", ] @@ -3124,19 +3388,29 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] @@ -3154,7 +3428,7 @@ dependencies = [ "async-trait", "base64 0.22.1", "dyn-clone", - "futures 0.3.30", + "futures 0.3.31", "hex", "hkdf", "hmac", @@ -3163,12 +3437,12 @@ dependencies = [ "pyo3", "reqwest", "ring", - "serde 1.0.203", + "serde 1.0.215", "serde_json", "sha2", "slog-scope", "syncserver-common", - "thiserror", + "thiserror 1.0.69", "tokenserver-common", "tokenserver-settings", "tokio", @@ -3180,12 +3454,12 @@ version = "0.17.15" dependencies = [ "actix-web", "backtrace", - "http 1.1.0", + "http 1.2.0", "jsonwebtoken", - "serde 1.0.203", + "serde 1.0.215", "serde_json", "syncserver-common", - "thiserror", + "thiserror 1.0.69", "tokio", ] @@ -3198,18 +3472,98 @@ dependencies = [ "diesel", "diesel_logger", "diesel_migrations", - "env_logger 0.11.3", - "futures 0.3.30", - "http 1.1.0", - "serde 1.0.203", + "env_logger 0.11.5", + "futures 0.3.31", + "http 1.2.0", + "serde 1.0.215", + "serde_derive", + "serde_json", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncserver-settings", + "thiserror 1.0.69", + "tokenserver-common", + "tokenserver-db-common", + "tokenserver-db-mysql", + "tokenserver-db-sqlite", + "tokenserver-settings", + "tokio", +] + +[[package]] +name = "tokenserver-db-common" +version = "0.17.15" +dependencies = [ + "async-trait", + "backtrace", + "diesel", + "diesel_logger", + "diesel_migrations", + "env_logger 0.11.5", + "futures 0.3.31", + "http 1.2.0", + "serde 1.0.215", + "serde_derive", + "serde_json", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncserver-settings", + "thiserror 1.0.69", + "tokenserver-common", + "tokenserver-settings", + "tokio", +] + +[[package]] +name = "tokenserver-db-mysql" +version = "0.17.15" +dependencies = [ + "async-trait", + "backtrace", + "diesel", + "diesel_logger", + "diesel_migrations", + "env_logger 0.11.5", + "futures 0.3.31", + "http 1.2.0", + "serde 1.0.215", "serde_derive", "serde_json", "slog-scope", "syncserver-common", "syncserver-db-common", "syncserver-settings", - "thiserror", + "thiserror 1.0.69", "tokenserver-common", + "tokenserver-db-common", + "tokenserver-settings", + "tokio", +] + +[[package]] +name = "tokenserver-db-sqlite" +version = "0.17.15" +dependencies = [ + "async-trait", + "backtrace", + "diesel", + "diesel_logger", + "diesel_migrations", + "env_logger 0.11.5", + "futures 0.3.31", + "http 1.2.0", + "serde 1.0.215", + "serde_derive", + "serde_json", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncserver-settings", + "thiserror 1.0.69", + "tokenserver-common", + "tokenserver-db-common", "tokenserver-settings", "tokio", ] @@ -3219,15 +3573,15 @@ name = "tokenserver-settings" version = "0.17.15" dependencies = [ "jsonwebtoken", - "serde 1.0.203", + "serde 1.0.215", "tokenserver-common", ] [[package]] name = "tokio" -version = "1.38.0" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" dependencies = [ "backtrace", "bytes", @@ -3238,36 +3592,35 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.90", ] [[package]] name = "tokio-rustls" -version = "0.26.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ "rustls", - "rustls-pki-types", "tokio", ] [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", @@ -3282,64 +3635,31 @@ version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ - "serde 1.0.203", -] - -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "pin-project", - "pin-project-lite", - "tokio", - "tower-layer", - "tower-service", + "serde 1.0.215", ] -[[package]] -name = "tower-layer" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" - [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", - "tracing-attributes", "tracing-core", ] -[[package]] -name = "tracing-attributes" -version = "0.1.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.67", -] - [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -3347,9 +3667,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "tracing-core", ] @@ -3377,30 +3697,30 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] [[package]] name = "unicode-width" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unindent" @@ -3427,14 +3747,14 @@ dependencies = [ [[package]] name = "url" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 0.5.0", + "idna 1.0.3", "percent-encoding 2.3.1", - "serde 1.0.203", + "serde 1.0.215", ] [[package]] @@ -3443,6 +3763,18 @@ version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -3456,7 +3788,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" dependencies = [ "getrandom", - "serde 1.0.203", + "serde 1.0.215", ] [[package]] @@ -3468,24 +3800,24 @@ dependencies = [ "idna 0.5.0", "once_cell", "regex", - "serde 1.0.203", + "serde 1.0.215", "serde_derive", "serde_json", - "url 2.5.2", + "url 2.5.4", ] [[package]] name = "validator_derive" -version = "0.18.1" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55591299b7007f551ed1eb79a684af7672c19c3193fb9e0a31936987bb2438ec" +checksum = "df0bcf92720c40105ac4b2dda2a4ea3aa717d4d6a862cc217da653a4bd5c6b10" dependencies = [ "darling", "once_cell", "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.90", ] [[package]] @@ -3508,9 +3840,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "walkdir" @@ -3539,46 +3871,47 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" dependencies = [ "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.90", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.42" +version = "0.4.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2" dependencies = [ "cfg-if", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3586,28 +3919,38 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.90", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" [[package]] name = "web-sys" -version = "0.3.69" +version = "0.3.76" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ "js-sys", "wasm-bindgen", @@ -3615,9 +3958,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.3" +version = "0.26.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" +checksum = "5d642ff16b7e79272ae451b7322067cdc17cadf68c23264be9d94a32319efe7e" dependencies = [ "rustls-pki-types", ] @@ -3652,11 +3995,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3672,7 +4015,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ "windows-core", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -3681,7 +4024,37 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", ] [[package]] @@ -3699,7 +4072,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -3719,18 +4101,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.5", - "windows_aarch64_msvc 0.52.5", - "windows_i686_gnu 0.52.5", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.5", - "windows_x86_64_gnu 0.52.5", - "windows_x86_64_gnullvm 0.52.5", - "windows_x86_64_msvc 0.52.5", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -3741,9 +4123,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -3753,9 +4135,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -3765,15 +4147,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -3783,9 +4165,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -3795,9 +4177,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -3807,9 +4189,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -3819,19 +4201,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" - -[[package]] -name = "winreg" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "woothee" @@ -3843,6 +4215,18 @@ dependencies = [ "regex", ] +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "yaml-rust" version = "0.4.5" @@ -3852,24 +4236,70 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde 1.0.215", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", + "synstructure", +] + [[package]] name = "zerocopy" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ + "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.90", +] + +[[package]] +name = "zerofrom" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", + "synstructure", ] [[package]] @@ -3878,29 +4308,51 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "zstd" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "7.1.0" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd99b45c6bc03a018c8b8a86025678c87e55526064e38f9df301989dce7ec0a" +checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.11+zstd.1.5.6" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75652c55c0b6f3e6f12eb786fe1bc960396bf05a1eb3bf1f3691c3610ac2e6d4" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index 7a545b6fa6..cb3acb8ed4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,11 +9,15 @@ members = [ "syncstorage-mysql", "syncstorage-settings", "syncstorage-spanner", + "syncstorage-sqlite", "tokenserver-auth", "tokenserver-common", "tokenserver-db", + "tokenserver-db-common", + "tokenserver-db-mysql", + "tokenserver-db-sqlite", "tokenserver-settings", - "syncserver", + "syncserver", "syncstorage-sql-db-common", ] default-members = ["syncserver"] @@ -23,6 +27,7 @@ authors = [ "Ben Bangert ", "Phil Jenvey ", "Mozilla Services Engineering ", + "Eragon ", ] edition = "2021" license = "MPL-2.0" diff --git a/Dockerfile b/Dockerfile index 770b43eb8b..2f073a61ab 100644 --- a/Dockerfile +++ b/Dockerfile @@ -19,7 +19,7 @@ RUN \ apt-get -q install -y --no-install-recommends libmysqlclient-dev cmake COPY --from=planner /app/recipe.json recipe.json -RUN cargo chef cook --release --no-default-features --features=syncstorage-db/$DATABASE_BACKEND --features=py_verifier --recipe-path recipe.json +RUN cargo chef cook --release --no-default-features --features=$DATABASE_BACKEND,py_verifier --recipe-path recipe.json FROM chef as builder ARG DATABASE_BACKEND=spanner @@ -46,7 +46,7 @@ ENV PATH=$PATH:/root/.cargo/bin RUN \ cargo --version && \ rustc --version && \ - cargo install --path ./syncserver --no-default-features --features=syncstorage-db/$DATABASE_BACKEND --features=py_verifier --locked --root /app && \ + cargo install --path ./syncserver --no-default-features --features=$DATABASE_BACKEND,py_verifier --locked --root /app && \ if [ "$DATABASE_BACKEND" = "spanner" ] ; then cargo install --path ./syncstorage-spanner --locked --root /app --bin purge_ttl ; fi FROM docker.io/library/debian:bullseye-slim diff --git a/Makefile b/Makefile index 73b95c7171..d3e964f94b 100644 --- a/Makefile +++ b/Makefile @@ -13,13 +13,17 @@ PATH_TO_GRPC_CERT = ../server-syncstorage/local/lib/python2.7/site-packages/grpc SRC_ROOT = $(shell pwd) PYTHON_SITE_PACKGES = $(shell $(SRC_ROOT)/venv/bin/python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") +clippy_sqlite: + # Matches what's run in circleci + cargo clippy --workspace --all-targets --no-default-features --features=sqlite,py_verifier -- -D warnings + clippy_mysql: # Matches what's run in circleci - cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/mysql --features=py_verifier -- -D warnings + cargo clippy --workspace --all-targets --no-default-features --features=mysql,py_verifier -- -D warnings clippy_spanner: # Matches what's run in circleci - cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/spanner --features=py_verifier -- -D warnings + cargo clippy --workspace --all-targets --no-default-features --features=spanner,py_verifier -- -D warnings clean: cargo clean @@ -53,9 +57,18 @@ run_mysql: python # See https://github.com/PyO3/pyo3/issues/1741 for discussion re: why we need to set the # below env var PYTHONPATH=$(PYTHON_SITE_PACKGES) \ - RUST_LOG=debug \ + RUST_LOG=debug \ + RUST_BACKTRACE=full \ + cargo run --no-default-features --features=mysql,py_verifier -- --config config/local.toml + +run_sqlite: python + PATH="./venv/bin:$(PATH)" \ + # See https://github.com/PyO3/pyo3/issues/1741 for discussion re: why we need to set the + # below env var + PYTHONPATH=$(PYTHON_SITE_PACKGES) \ + RUST_LOG=debug \ RUST_BACKTRACE=full \ - cargo run --no-default-features --features=syncstorage-db/mysql --features=py_verifier -- --config config/local.toml + cargo run --no-default-features --features=sqlite,py_verifier -- --config config/local.toml run_spanner: python GOOGLE_APPLICATION_CREDENTIALS=$(PATH_TO_SYNC_SPANNER_KEYS) \ @@ -63,13 +76,19 @@ run_spanner: python # See https://github.com/PyO3/pyo3/issues/1741 for discussion re: why we need to set the # below env var PYTHONPATH=$(PYTHON_SITE_PACKGES) \ - PATH="./venv/bin:$(PATH)" \ + PATH="./venv/bin:$(PATH)" \ RUST_LOG=debug \ RUST_BACKTRACE=full \ - cargo run --no-default-features --features=syncstorage-db/spanner --features=py_verifier -- --config config/local.toml + cargo run --no-default-features --features=spanner,py_verifier -- --config config/local.toml -test: +test_mysql: SYNC_SYNCSTORAGE__DATABASE_URL=mysql://sample_user:sample_password@localhost/syncstorage_rs \ SYNC_TOKENSERVER__DATABASE_URL=mysql://sample_user:sample_password@localhost/tokenserver_rs \ RUST_TEST_THREADS=1 \ - cargo test --workspace + cargo test --workspace --no-default-features --features=mysql,py_verifier + +test_sqlite: + SYNC_SYNCSTORAGE__DATABASE_URL=sqlite:///tmp/syncstorage.db\ + SYNC_TOKENSERVER__DATABASE_URL=sqlite:///tmp/tokenserver.db \ + RUST_TEST_THREADS=1 \ + cargo test --workspace --no-default-features --features=sqlite,py_verifier diff --git a/README.md b/README.md index c38e4a2859..cf39aad143 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ -[![License: MPL 2.0][mpl-svg]][mpl] [![Build Status][circleci-badge]][circleci] [![Connect to Matrix via the Riot webapp][matrix-badge]][matrix] +[![License: MPL 2.0][mpl-svg]][mpl] [![Build Status][circleci-badge]][circleci] +[![Connect to Matrix via the Riot webapp][matrix-badge]][matrix] # Syncstorage-rs @@ -12,6 +13,7 @@ Mozilla Sync Storage built with [Rust](https://rust-lang.org). - [Local Setup](#local-setup) - [MySQL](#mysql) - [Spanner](#spanner) + - [Sqlite](#sqlite) - [Running via Docker](#running-via-docker) - [Connecting to Firefox](#connecting-to-firefox) - [Logging](#logging) @@ -37,8 +39,13 @@ Mozilla Sync Storage built with [Rust](https://rust-lang.org). - pkg-config - [Rust stable](https://rustup.rs) - python 3.9+ -- MySQL 5.7 (or compatible) - * libmysqlclient (`brew install mysql` on macOS, `apt install libmysqlclient-dev` on Ubuntu, `apt install libmariadb-dev-compat` on Debian) +- At least one database backend (depending on which one you'll be running) + * MySQL 5.7 (or compatible) + * libmysqlclient ( + `brew install mysql` on macOS, + `apt install libmysqlclient-dev` on Ubuntu, + `apt install libmariadb-dev-compat` on Debian) + * SQLite v3.24 or greater Depending on your OS, you may also need to install `libgrpcdev`, and `protobuf-compiler-grpc`. *Note*: if the code complies cleanly, @@ -47,18 +54,23 @@ are missing `libcurl4-openssl-dev`. ## Local Setup -1. Follow the instructions below to use either MySQL or Spanner as your DB. -2. Now `cp config/local.example.toml config/local.toml`. Open `config/local.toml` and make sure you have the desired settings configured. For a complete list of available configuration options, check out [docs/config.md](docs/config.md). +1. Follow the instructions below to use either MySQL, Spanner, or SQLite as your DB. +2. Now `cp config/local.example.toml config/local.toml`. +Open `config/local.toml` and make sure you have the desired settings configured. +For a complete list of available configuration options, check out [docs/config.md](docs/config.md). 3. To start a local server in debug mode, run either: - `make run_mysql` if using MySQL or, - - `make run_spanner` if using spanner. + - `make run_spanner` if using spanner or, + - `make run_sqlite` if using SQLite. - The above starts the server in debug mode, using your new `local.toml` file for config options. Or, simply `cargo run` with your own config options provided as env vars. + The above starts the server in debug mode, using your new `local.toml` file for config options. + Or, simply `cargo run` with your own config options provided as env vars. 4. Visit `http://localhost:8000/__heartbeat__` to make sure the server is running. ### MySQL -Durable sync needs only a valid mysql DSN in order to set up connections to a MySQL database. The database can be local and is usually specified with a DSN like: +Durable sync needs only a valid mysql DSN in order to set up connections to a MySQL database. +The database can be local and is usually specified with a DSN like: `mysql://_user_:_password_@_host_/_database_` @@ -80,34 +92,59 @@ GRANT ALL PRIVILEGES on tokenserver_rs.* to sample_user@localhost; ### Spanner #### Authenticating via OAuth -The correct way to authenticate with Spanner is by generating an OAuth token and pointing your local application server to the token. In order for this to work, your Google Cloud account must have the correct permissions; contact the Ops team to ensure the correct permissions are added to your account. - -First, install the Google Cloud command-line interface by following the instructions for your operating system [here](https://cloud.google.com/sdk/docs/install). Next, run the following to log in with your Google account (this should be the Google account associated with your Mozilla LDAP credentials): +The correct way to authenticate with Spanner is by generating an OAuth token +and pointing your local application server to the token. +In order for this to work, your Google Cloud account must have the correct permissions; +contact the Ops team to ensure the correct permissions are added to your account. + +First, install the Google Cloud command-line interface by following the instructions +for your operating system [here](https://cloud.google.com/sdk/docs/install). +Next, run the following to log in with your Google account +(this should be the Google account associated with your Mozilla LDAP credentials): ```sh gcloud auth application-default login ``` -The above command will prompt you to visit a webpage in your browser to complete the login process. Once completed, ensure that a file called `application_default_credentials.json` has been created in the appropriate directory (on Linux, this directory is `$HOME/.config/gcloud/`). The Google Cloud SDK knows to check this location for your credentials, so no further configuration is needed. +The above command will prompt you to visit a webpage in your browser to complete the login process. +Once completed, ensure that a file called `application_default_credentials.json` has been created in the appropriate directory +(on Linux, this directory is `$HOME/.config/gcloud/`). +The Google Cloud SDK knows to check this location for your credentials, +so no further configuration is needed. ##### Key Revocation -Accidents happen, and you may need to revoke the access of a set of credentials if they have been publicly leaked. To do this, run: +Accidents happen, and you may need to revoke the access of a set of credentials if they have been publicly leaked. +To do this, run: ```sh gcloud auth application-default revoke ``` -This will revoke the access of the credentials currently stored in the `application_default_credentials.json` file. **If the file in that location does not contain the leaked credentials, you will need to copy the file containing the leaked credentials to that location and re-run the above command.** You can ensure that the leaked credentials are no longer active by attempting to connect to Spanner using the credentials. If access has been revoked, your application server should print an error saying that the token has expired or has been revoked. +This will revoke the access of the credentials currently stored in the `application_default_credentials.json` file. +**If the file in that location does not contain the leaked credentials, +you will need to copy the file containing the leaked credentials to that location and re-run the above command.** +You can ensure that the leaked credentials are no longer active by attempting to connect to Spanner using the credentials. +If access has been revoked, your application server should print an error saying that the token has expired or has been revoked. #### Authenticating via Service Account -An alternative to authentication via application default credentials is authentication via a service account. **Note that this method of authentication is not recommended. Service accounts are intended to be used by other applications or virtual machines and not people. See [this article](https://cloud.google.com/iam/docs/service-accounts#what_are_service_accounts) for more information.** +An alternative to authentication via application default credentials is authentication via a service account. +**Note that this method of authentication is not recommended. +Service accounts are intended to be used by other applications or virtual machines and not people. +See [this article](https://cloud.google.com/iam/docs/service-accounts#what_are_service_accounts) for more information.** -Your system administrator will be able to tell you which service account keys have access to the Spanner instance to which you are trying to connect. Once you are given the email identifier of an active key, log into the [Google Cloud Console Service Accounts](https://console.cloud.google.com/iam-admin/serviceaccounts) page. Be sure to select the correct project. +Your system administrator will be able to tell you which service account keys +have access to the Spanner instance to which you are trying to connect. +Once you are given the email identifier of an active key, +log into the [Google Cloud Console Service Accounts](https://console.cloud.google.com/iam-admin/serviceaccounts) page. +Be sure to select the correct project. - Locate the email identifier of the access key and pick the vertical dot menu at the far right of the row. - Select "_Create Key_" from the pop-up menu. - Select "JSON" from the Dialog Box. -A proper key file will be downloaded to your local directory. It's important to safeguard that key file. For this example, we're going to name the file +A proper key file will be downloaded to your local directory. +It's important to safeguard that key file. +For this example, we're going to name the file `service-account.json`. -The proper key file is in JSON format. An example file is provided below, with private information replaced by "`...`" +The proper key file is in JSON format. +An example file is provided below, with private information replaced by "`...`" ```json { @@ -135,11 +172,17 @@ To point to a GCP-hosted Spanner instance from your local machine, follow these 4. `make run_spanner`. 5. Visit `http://localhost:8000/__heartbeat__` to make sure the server is running. -Note, that unlike MySQL, there is no automatic migrations facility. Currently, the Spanner schema must be hand edited and modified. +Note, that unlike MySQL, there is no automatic migrations facility. +Currently, the Spanner schema must be hand edited and modified. #### Emulator -Google supports an in-memory Spanner emulator, which can run on your local machine for development purposes. You can install the emulator via the gcloud CLI or Docker by following the instructions [here](https://cloud.google.com/spanner/docs/emulator#installing_and_running_the_emulator). Once the emulator is running, you'll need to create a new instance and a new database. To create an instance using the REST API (exposed via port 9020 on the emulator), we can use `curl`: +Google supports an in-memory Spanner emulator, which can run on your local machine for development purposes. +You can install the emulator via the gcloud CLI or Docker by following the instructions +[here](https://cloud.google.com/spanner/docs/emulator#installing_and_running_the_emulator). +Once the emulator is running, you'll need to create a new instance and a new database. +To create an instance using the REST API (exposed via port 9020 on the emulator), +we can use `curl`: ```sh curl --request POST \ @@ -149,7 +192,11 @@ curl --request POST \ --data "{\"instance\":{\"config\":\"emulator-test-config\",\"nodeCount\":1,\"displayName\":\"Test Instance\"},\"instanceId\":\"$INSTANCE_ID\"}" ``` -Note that you may set `PROJECT_ID` and `INSTANCE_ID` to your liking. To create a new database on this instance, we'll use a similar HTTP request, but we'll need to include information about the database schema. Since we don't have migrations for Spanner, we keep an up-to-date schema in `src/db/spanner/schema.ddl`. The `jq` utility allows us to parse this file for use in the JSON body of an HTTP POST request: +Note that you may set `PROJECT_ID` and `INSTANCE_ID` to your liking. +To create a new database on this instance, we'll use a similar HTTP request, +but we'll need to include information about the database schema. +Since we don't have migrations for Spanner, we keep an up-to-date schema in `src/db/spanner/schema.ddl`. +The `jq` utility allows us to parse this file for use in the JSON body of an HTTP POST request: ```sh DDL_STATEMENTS=$( @@ -171,7 +218,10 @@ curl -sS --request POST \ --data "{\"createStatement\":\"CREATE DATABASE \`$DATABASE_ID\`\",\"extraStatements\":$DDL_STATEMENTS}" ``` -Note that, again, you may set `DATABASE_ID` to your liking. Make sure that the `database_url` config variable reflects your choice of project name, instance name, and database name (i.e. it should be of the format `spanner://projects//instances//databases/`). +Note that, again, you may set `DATABASE_ID` to your liking. +Make sure that the `database_url` config variable reflects your choice of project name, +instance name, and database name +(i.e. it should be of the format `spanner://projects//instances//databases/`). To run an application server that points to the local Spanner emulator: @@ -179,13 +229,41 @@ To run an application server that points to the local Spanner emulator: SYNC_SYNCSTORAGE__SPANNER_EMULATOR_HOST=localhost:9010 make run_spanner ``` +### SQLite + +Setting up the server with SQLite only requires a path to the database file, +which will be created automatically: + +One for the syncserver data +`sqlite:///syncdb.sqlite` +And one for the tokenserver data +`sqlite:///tokendb.sqlite` + +Note that after database initialisation you will still need to run two SQL +insert on the tokenserver database to announce the presence of your syncserver +to the clients. +```sql +-- Create a new service record +INSERT INTO `services` (`id`, `service`, `pattern`) +VALUES ('1', 'sync-1.5', '{node}/1.5/{uid}'); + +-- Create a new service node record. Set the node field to the path of your +-- syncserver. +INSERT INTO `nodes` (`id`, `service`, `node`, `available`, `current_load`, `capacity`, `downed`, `backoff`) +VALUES ('1', '1', 'http://localhost:8000', '1', '0', '1', '0', '0'); +``` + ### Running via Docker -This requires access to [Google Cloud Rust (raw)](https://crates.io/crates/google-cloud-rust-raw/) crate. Please note that due to interdependencies, you will need to ensure that `grpcio` and `protobuf` match the version used by `google-cloud-rust-raw`. +This requires access to [Google Cloud Rust (raw)](https://crates.io/crates/google-cloud-rust-raw/) crate. +Please note that due to interdependencies, +you will need to ensure that `grpcio` and `protobuf` match the version used by `google-cloud-rust-raw`. 1. Make sure you have [Docker installed](https://docs.docker.com/install/) locally. 2. Copy the contents of mozilla-rust-sdk into top level root dir here. -3. Comment out the `image` value under `syncserver` in either docker-compose.mysql.yml or docker-compose.spanner.yml (depending on which database backend you want to run), and add this instead: +3. Comment out the `image` value under `syncserver` in either docker-compose.mysql.yml +or docker-compose.spanner.yml +(depending on which database backend you want to run), and add this instead: ```yml build: @@ -193,15 +271,19 @@ This requires access to [Google Cloud Rust (raw)](https://crates.io/crates/googl ``` 4. If you are using MySQL, adjust the MySQL db credentials in docker-compose.mysql.yml to match your local setup. -5. `make docker_start_mysql` or `make docker_start_spanner` - You can verify it's working by visiting [localhost:8000/\_\_heartbeat\_\_](http://localhost:8000/__heartbeat__) +5. `make docker_start_mysql` or `make docker_start_spanner` - +You can verify it's working by visiting [localhost:8000/\_\_heartbeat\_\_](http://localhost:8000/__heartbeat__) ### Connecting to Firefox This will walk you through the steps to connect this project to your local copy of Firefox. -1. Follow the steps outlined above for running this project using [MySQL](https://github.com/mozilla-services/syncstorage-rs#mysql). +1. Follow the steps outlined above for running this project using [MySQL](#mysql) or [SQLite](#sqlite). -2. Setup a local copy of [syncserver](https://github.com/mozilla-services/syncserver), with a few special changes to [syncserver.ini](https://github.com/mozilla-services/syncserver/blob/master/syncserver.ini); make sure that you're using the following values (in addition to all of the other defaults): +2. Setup a local copy of [syncserver](https://github.com/mozilla-services/syncserver), +with a few special changes to +[syncserver.ini](https://github.com/mozilla-services/syncserver/blob/master/syncserver.ini); +make sure that you're using the following values (in addition to all of the other defaults): ```ini [server:main] @@ -221,13 +303,18 @@ This will walk you through the steps to connect this project to your local copy sync-1.5 = "http://localhost:8000/1.5/1" ``` -3. In Firefox, go to `about:config`. Change `identity.sync.tokenserver.uri` to `http://localhost:5000/1.0/sync/1.5`. +3. In Firefox, go to `about:config`. +Change `identity.sync.tokenserver.uri` to `http://localhost:5000/1.0/sync/1.5`. 4. Restart Firefox. Now, try syncing. You should see new BSOs in your local MySQL instance. ## Logging ### Sentry: -1. If you want to connect to the existing [Sentry project](https://sentry.prod.mozaws.net/operations/syncstorage-local/) for local development, login to Sentry, and go to the page with [api keys](https://sentry.prod.mozaws.net/settings/operations/syncstorage-local/keys/). Copy the `DSN` value. +1. If you want to connect to the existing +[Sentry project](https://sentry.prod.mozaws.net/operations/syncstorage-local/) for local development, +login to Sentry, and go to the page with +[api keys](https://sentry.prod.mozaws.net/settings/operations/syncstorage-local/keys/). +Copy the `DSN` value. 2. Comment out the `human_logs` line in your `config/local.toml` file. 3. You can force an error to appear in Sentry by adding a `panic!` into main.rs, just before the final `Ok(())`. 4. Now, `SENTRY_DSN={INSERT_DSN_FROM_STEP_1_HERE} make run`. @@ -262,11 +349,17 @@ in the mysql client: ### End-to-End tests -Functional tests live in [server-syncstorage](https://github.com/mozilla-services/server-syncstorage/) and can be run against a local server, e.g.: +Functional tests live in +[server-syncstorage](https://github.com/mozilla-services/server-syncstorage/) +and can be run against a local server, e.g.: -1. If you haven't already followed the instructions [here](https://mozilla-services.readthedocs.io/en/latest/howtos/run-sync-1.5.html) to get all the dependencies for the [server-syncstorage](https://github.com/mozilla-services/server-syncstorage/) repo, you should start there. +1. If you haven't already followed the instructions +[here](https://mozilla-services.readthedocs.io/en/latest/howtos/run-sync-1.5.html) +to get all the dependencies for the +[server-syncstorage](https://github.com/mozilla-services/server-syncstorage/) repo, +you should start there. -2. Install (Python) server-syncstorage: +2. Install (Python) server-syncstorage: $ git clone https://github.com/mozilla-services/server-syncstorage/ $ cd server-syncstorage @@ -276,48 +369,68 @@ Functional tests live in [server-syncstorage](https://github.com/mozilla-service 4. To run all tests: - $ ./local/bin/python syncstorage/tests/functional/test_storage.py http://localhost:8000# + $ ./local/bin/python syncstorage/tests/functional/test_storage.py http://localhost:8000# 5. Individual tests can be specified via the `SYNC_TEST_PREFIX` env var: - $ SYNC_TEST_PREFIX=test_get_collection \ - ./local/bin/python syncstorage/tests/functional/test_storage.py http://localhost:8000# + $ SYNC_TEST_PREFIX=test_get_collection \ + ./local/bin/python syncstorage/tests/functional/test_storage.py http://localhost:8000# ## Creating Releases 1. Switch to master branch of syncstorage-rs -1. `git pull` to ensure that the local copy is up-to-date. -1. `git pull origin master` to make sure that you've incorporated any changes to the master branch. -1. `git diff origin/master` to ensure that there are no local staged or uncommited changes. -1. Bump the version number in [Cargo.toml](https://github.com/mozilla-services/syncstorage-rs/blob/master/Cargo.toml) (this new version number will be designated as `` in this checklist) -1. create a git branch for the new version `git checkout -b release/` -1. `cargo build --release` - Build with the release profile [release mode](https://doc.rust-lang.org/book/ch14-01-release-profiles.html). -1. `clog -C CHANGELOG.md` - Generate release notes. We're using [clog](https://github.com/clog-tool/clog-cli) for release notes. Add a `-p`, `-m` or `-M` flag to denote major/minor/patch version, ie `clog -C CHANGELOG.md -p`. -1. Review the `CHANGELOG.md` file and ensure all relevant changes since the last tag are included. -1. Create a new [release in Sentry](https://docs.sentry.io/product/releases/#create-release): `VERSION={release-version-here} bash scripts/sentry-release.sh`. If you're doing this for the first time, checkout the [tips below](https://github.com/mozilla-services/syncstorage-rs#troubleshooting) for troubleshooting sentry cli access. -1. `git commit -am "chore: tag "` to commit the new version and changes -1. `git tag -s -m "chore: tag " ` to create a signed tag of the current HEAD commit for release. -1. `git push origin release/` to push the commits to a new origin release branch -1. `git push --tags origin release/` to push the tags to the release branch. -1. Submit a Pull Request (PR) on github to merge the release branch to master. -1. Go to the [GitHub release](https://github.com/mozilla-services/syncstorage-rs/releases), you should see the new tag with no release information. -1. Click the `Draft a new release` button. -1. Enter the \ number for `Tag version`. -1. Copy and paste the most recent change set from `CHANGELOG.md` into the release description, omitting the top 2 lines (the name and version) -1. Once your PR merges, click [Publish Release] on the [GitHub release](https://github.com/mozilla-services/syncstorage-rs/releases) page. - -Sync server is automatically deployed to STAGE, however QA may need to be notified if testing is required. Once QA signs off, then a bug should be filed to promote the server to PRODUCTION. +2. `git pull` to ensure that the local copy is up-to-date. +3. `git pull origin master` to make sure that you've incorporated any changes to the master branch. +4. `git diff origin/master` to ensure that there are no local staged or uncommited changes. +5. Bump the version number in [Cargo.toml](https://github.com/mozilla-services/syncstorage-rs/blob/master/Cargo.toml) +(this new version number will be designated as `` in this checklist) +6. create a git branch for the new version `git checkout -b release/` +7. `cargo build --release` - Build with the release profile +[release mode](https://doc.rust-lang.org/book/ch14-01-release-profiles.html). +8. `clog -C CHANGELOG.md` - Generate release notes. +We're using [clog](https://github.com/clog-tool/clog-cli) for release notes. +Add a `-p`, `-m` or `-M` flag to denote major/minor/patch version, ie `clog -C CHANGELOG.md -p`. +9. Review the `CHANGELOG.md` file and ensure all relevant changes since the last tag are included. +10. Create a new [release in Sentry](https://docs.sentry.io/product/releases/#create-release): +`VERSION={release-version-here} bash scripts/sentry-release.sh`. +If you're doing this for the first time, checkout the +[tips below](https://github.com/mozilla-services/syncstorage-rs#troubleshooting) for troubleshooting sentry cli access. +11. `git commit -am "chore: tag "` to commit the new version and changes +12. `git tag -s -m "chore: tag " ` to create a signed tag of the current HEAD commit for release. +13. `git push origin release/` to push the commits to a new origin release branch +14. `git push --tags origin release/` to push the tags to the release branch. +15. Submit a Pull Request (PR) on github to merge the release branch to master. +16. Go to the [GitHub release](https://github.com/mozilla-services/syncstorage-rs/releases), +you should see the new tag with no release information. +17. Click the `Draft a new release` button. +18. Enter the \ number for `Tag version`. +19. Copy and paste the most recent change set from `CHANGELOG.md` into the release description, +omitting the top 2 lines (the name and version) +20. Once your PR merges, click [Publish Release] on the +[GitHub release](https://github.com/mozilla-services/syncstorage-rs/releases) page. + +Sync server is automatically deployed to STAGE, +however QA may need to be notified if testing is required. +Once QA signs off, then a bug should be filed to promote the server to PRODUCTION. ## Troubleshooting - `rm Cargo.lock; cargo clean;` - Try this if you're having problems compiling. -- Some versions of OpenSSL 1.1.1 can conflict with grpcio's built in BoringSSL. These errors can cause syncstorage to fail to run or compile. -If you see a problem related to `libssl` you may need to specify the `cargo` option `--features grpcio/openssl` to force grpcio to use OpenSSL. +- Some versions of OpenSSL 1.1.1 can conflict with grpcio's built in BoringSSL. +These errors can cause syncstorage to fail to run or compile. +If you see a problem related to `libssl` you may need to specify the `cargo` option +`--features grpcio/openssl` to force grpcio to use OpenSSL. ### Sentry -- If you're having trouble working with Sentry to create releases, try authenticating using their self hosted server option that's outlined [here](https://docs.sentry.io/product/cli/configuration/) Ie, `sentry-cli --url https://selfhosted.url.com/ login`. It's also recommended to create a `.sentryclirc` config file. See [this example](https://github.com/mozilla-services/syncstorage-rs/blob/master/.sentryclirc.example) for the config values you'll need. +- If you're having trouble working with Sentry to create releases, +try authenticating using their self hosted server option that's outlined +[here](https://docs.sentry.io/product/cli/configuration/) Ie, +`sentry-cli --url https://selfhosted.url.com/ login`. +It's also recommended to create a `.sentryclirc` config file. +See [this example](https://github.com/mozilla-services/syncstorage-rs/blob/master/.sentryclirc.example) +for the config values you'll need. ## Related Documentation diff --git a/config/local.example.toml b/config/local.example.toml index f845b5c99e..1f3fdfd2f4 100644 --- a/config/local.example.toml +++ b/config/local.example.toml @@ -8,6 +8,9 @@ human_logs = 1 syncstorage.database_url = "mysql://sample_user:sample_password@localhost/syncstorage_rs" # Example Spanner DSN: # database_url="spanner://projects/SAMPLE_GCP_PROJECT/instances/SAMPLE_SPANNER_INSTANCE/databases/SAMPLE_SPANNER_DB" +# Example SQLite DSN: +# database_url="sqlite://PATH_TO_FILE/FILE.sqlite" + # enable quota limits syncstorage.enable_quota = 0 # set the quota limit to 2GB. @@ -26,5 +29,5 @@ tokenserver.fxa_browserid_issuer = "https://api-accounts.stage.mozaws.net" tokenserver.fxa_browserid_server_url = "https://verifier.stage.mozaws.net/v2" # cors settings -# cors_allowed_origin = "localhost" +cors_allowed_origin = "localhost" # cors_max_age = 86400 diff --git a/docker-compose.e2e.sqlite.yaml b/docker-compose.e2e.sqlite.yaml new file mode 100644 index 0000000000..b2fac5c943 --- /dev/null +++ b/docker-compose.e2e.sqlite.yaml @@ -0,0 +1,39 @@ +version: "3" +services: + syncserver: + entrypoint: + /bin/sh -c " + sleep 15; + /app/bin/syncserver; + " + sqlite-e2e-tests: + depends_on: + - mock-fxa-server + - syncserver + image: app:build + privileged: true + user: root + environment: + MOCK_FXA_SERVER_URL: http://mock-fxa-server:6000 + SYNC_HOST: 0.0.0.0 + SYNC_MASTER_SECRET: secret0 + SYNC_SYNCSTORAGE__DATABASE_URL: sqlite:///data/syncstoragedb.sqlite + SYNC_TOKENSERVER__DATABASE_URL: sqlite:///data/tokenserverdb.sqlite + SYNC_TOKENSERVER__ENABLED: "true" + SYNC_TOKENSERVER__FXA_BROWSERID_AUDIENCE: "https://token.stage.mozaws.net/" + SYNC_TOKENSERVER__FXA_BROWSERID_ISSUER: "api-accounts.stage.mozaws.net" + SYNC_TOKENSERVER__FXA_EMAIL_DOMAIN: api-accounts.stage.mozaws.net + SYNC_TOKENSERVER__FXA_METRICS_HASH_SECRET: secret0 + SYNC_TOKENSERVER__RUN_MIGRATIONS: "true" + SYNC_TOKENSERVER__FXA_OAUTH_PRIMARY_JWK__KTY: "RSA" + SYNC_TOKENSERVER__FXA_OAUTH_PRIMARY_JWK__ALG: "RS256" + SYNC_TOKENSERVER__FXA_OAUTH_PRIMARY_JWK__KID: "20190730-15e473fd" + SYNC_TOKENSERVER__FXA_OAUTH_PRIMARY_JWK__FXA_CREATED_AT: "1564502400" + SYNC_TOKENSERVER__FXA_OAUTH_PRIMARY_JWK__USE: "sig" + SYNC_TOKENSERVER__FXA_OAUTH_PRIMARY_JWK__N: "15OpVGC7ws_SlU0gRbRh1Iwo8_gR8ElX2CDnbN5blKyXLg-ll0ogktoDXc-tDvTabRTxi7AXU0wWQ247odhHT47y5uz0GASYXdfPponynQ_xR9CpNn1eEL1gvDhQN9rfPIzfncl8FUi9V4WMd5f600QC81yDw9dX-Z8gdkru0aDaoEKF9-wU2TqrCNcQdiJCX9BISotjz_9cmGwKXFEekQNJWBeRQxH2bUmgwUK0HaqwW9WbYOs-zstNXXWFsgK9fbDQqQeGehXLZM4Cy5Mgl_iuSvnT3rLzPo2BmlxMLUvRqBx3_v8BTtwmNGA0v9O0FJS_mnDq0Iue0Dz8BssQCQ" + SYNC_TOKENSERVER__FXA_OAUTH_PRIMARY_JWK__E: "AQAB" + TOKENSERVER_HOST: http://localhost:8000 + entrypoint: + /bin/sh -c " + sleep 28; python3 /app/tools/integration_tests/run.py 'http://localhost:8000#secret0' + " diff --git a/docker-compose.sqlite.yaml b/docker-compose.sqlite.yaml new file mode 100644 index 0000000000..ab81cb464b --- /dev/null +++ b/docker-compose.sqlite.yaml @@ -0,0 +1,42 @@ +# NOTE: This docker-compose file was constructed to create a base for +# use by the End-to-end tests. It has not been fully tested for use in +# constructing a true, stand-alone sync server. +# If you're interested in doing that, please join our community in the +# github issues and comments. +# +# Application runs off of port 8000. +# you can test if it's available with +# curl "http://localhost:8000/__heartbeat__" + +version: "3" +services: + mock-fxa-server: + image: app:build + restart: "no" + entrypoint: "sh scripts/start_mock_fxa_server.sh" + environment: + MOCK_FXA_SERVER_HOST: 0.0.0.0 + MOCK_FXA_SERVER_PORT: 6000 + + syncserver: + # NOTE: The naming in the rest of this repository has been updated to reflect the fact + # that Syncstorage and Tokenserver are now part of one repository/server called + # "Syncserver" (updated from "syncstorage-rs"). We keep the legacy naming below for + # backwards compatibility with previous Docker images. + image: ${SYNCSTORAGE_RS_IMAGE:-syncstorage-rs:latest} + restart: always + ports: + - "8000:8000" + volumes: + - sqlite_data:/data/ + environment: + SYNC_HOST: 0.0.0.0 + SYNC_MASTER_SECRET: secret0 + SYNC_SYNCSTORAGE__DATABASE_URL: sqlite:///data/syncdb.sqlite + SYNC_SYNCSTORAGE__DATABASE_POOL_MIN_IDLE: 0 + SYNC_TOKENSERVER__DATABASE_URL: sqlite:///data/tokenserverdb.sqlite + SYNC_TOKENSERVER__DATABASE_POOL_MIN_IDLE: 0 + SYNC_TOKENSERVER__RUN_MIGRATIONS: "true" + +volumes: + sqlite_data: diff --git a/syncserver-db-common/Cargo.toml b/syncserver-db-common/Cargo.toml index 499aa274d5..813522e732 100644 --- a/syncserver-db-common/Cargo.toml +++ b/syncserver-db-common/Cargo.toml @@ -12,6 +12,11 @@ futures.workspace = true http.workspace = true thiserror.workspace = true -diesel = { workspace = true, features = ["mysql", "r2d2"] } -diesel_migrations = { workspace = true, features = ["mysql"] } +diesel = { workspace = true, features = ["mysql", "sqlite","r2d2"] } +diesel_migrations = { workspace = true, features = ["mysql", "sqlite"] } syncserver-common = { path = "../syncserver-common" } + +[features] +sql = [] +mysql = ["sql"] +sqlite = ["sql"] diff --git a/syncserver-db-common/src/lib.rs b/syncserver-db-common/src/lib.rs index 5e2273760a..d3259bfea5 100644 --- a/syncserver-db-common/src/lib.rs +++ b/syncserver-db-common/src/lib.rs @@ -1,3 +1,4 @@ +#[cfg(feature = "sql")] pub mod error; pub mod test; diff --git a/syncserver-db-common/src/test.rs b/syncserver-db-common/src/test.rs index 351888f3fb..6db3dd0376 100644 --- a/syncserver-db-common/src/test.rs +++ b/syncserver-db-common/src/test.rs @@ -1,14 +1,26 @@ use diesel::{ - mysql::MysqlConnection, r2d2::{CustomizeConnection, Error as PoolError}, Connection, }; +#[cfg(feature = "mysql")] +use diesel::mysql::MysqlConnection; +#[cfg(feature = "sqlite")] +use diesel::sqlite::SqliteConnection; + #[derive(Debug)] pub struct TestTransactionCustomizer; +#[cfg(feature = "mysql")] impl CustomizeConnection for TestTransactionCustomizer { fn on_acquire(&self, conn: &mut MysqlConnection) -> Result<(), PoolError> { conn.begin_test_transaction().map_err(PoolError::QueryError) } } + +#[cfg(feature = "sqlite")] +impl CustomizeConnection for TestTransactionCustomizer { + fn on_acquire(&self, conn: &mut SqliteConnection) -> Result<(), PoolError> { + conn.begin_test_transaction().map_err(PoolError::QueryError) + } +} diff --git a/syncserver/Cargo.toml b/syncserver/Cargo.toml index a920ee107b..702ce87823 100644 --- a/syncserver/Cargo.toml +++ b/syncserver/Cargo.toml @@ -61,8 +61,10 @@ validator_derive = "0.18" woothee = "0.13" [features] -default = ["mysql", "py_verifier"] +default = ["py_verifier"] no_auth = [] py_verifier = ["tokenserver-auth/py"] -mysql = ["syncstorage-db/mysql"] -spanner = ["syncstorage-db/spanner"] +mysql = ["syncstorage-db/mysql", "tokenserver-db/mysql"] +# Spanner backend rely on MySQL for token server database +spanner = ["syncstorage-db/spanner", "tokenserver-db/mysql"] +sqlite = ["syncstorage-db/sqlite", "tokenserver-db/sqlite"] diff --git a/syncserver/src/db/mod.rs b/syncserver/src/db/mod.rs index a7d89e69bf..b0f18f6631 100644 --- a/syncserver/src/db/mod.rs +++ b/syncserver/src/db/mod.rs @@ -1,8 +1,12 @@ //! Generic db abstration. pub mod mock; +#[cfg(feature = "mysql")] pub mod mysql; +#[cfg(feature = "spanner")] pub mod spanner; +#[cfg(feature = "sqlite")] +pub mod sqlite; #[cfg(test)] mod tests; pub mod transaction; @@ -27,14 +31,22 @@ pub async fn pool_from_settings( let url = Url::parse(&settings.database_url).map_err(|e| DbErrorKind::InvalidUrl(e.to_string()))?; Ok(match url.scheme() { + #[cfg(feature = "mysql")] "mysql" => Box::new(mysql::pool::MysqlDbPool::new( settings, metrics, blocking_threadpool, )?), + #[cfg(feature = "spanner")] "spanner" => Box::new( spanner::pool::SpannerDbPool::new(settings, metrics, blocking_threadpool).await?, ), + #[cfg(feature = "sqlite")] + "sqlite" => Box::new(sqlite::pool::SqliteDbPool::new( + settings, + metrics, + blocking_threadpool, + )?), _ => Err(DbErrorKind::InvalidUrl(settings.database_url.to_owned()))?, }) } diff --git a/syncserver/src/server/test.rs b/syncserver/src/server/test.rs index 869052666c..2152c2f4dc 100644 --- a/syncserver/src/server/test.rs +++ b/syncserver/src/server/test.rs @@ -62,15 +62,33 @@ fn get_test_settings() -> Settings { .as_str(), ) .expect("Could not get pool_size in get_test_settings"); - if cfg!(feature = "mysql") && settings.syncstorage.uses_spanner() { + if cfg!(feature = "mysql") + && !&settings + .syncstorage + .database_url + .as_str() + .starts_with("mysql://") + { panic!( - "Spanner database_url specified for MySQL feature, please correct.\n\t{}", + "Spanner or SQLite database_url specified for MySQL feature, please correct.\n\t{}", &settings.syncstorage.database_url ) } if cfg!(feature = "spanner") && !&settings.syncstorage.uses_spanner() { panic!( - "MySQL database_url specified for Spanner feature, please correct.\n\t{}", + "MySQL or SQLite database_url specified for Spanner feature, please correct.\n\t{}", + &settings.syncstorage.database_url + ) + } + if cfg!(feature = "sqlite") + && !&settings + .syncstorage + .database_url + .as_str() + .starts_with("sqlite://") + { + panic!( + "Spanner or MySQL database_url specified for SQLite feature, please correct.\n\t{}", &settings.syncstorage.database_url ) } diff --git a/syncstorage-db-common/Cargo.toml b/syncstorage-db-common/Cargo.toml index 28ef3504e0..69ad52e437 100644 --- a/syncstorage-db-common/Cargo.toml +++ b/syncstorage-db-common/Cargo.toml @@ -16,8 +16,7 @@ serde_json.workspace = true thiserror.workspace = true async-trait = "0.1.40" -# diesel = 1.4 diesel = { workspace = true, features = ["mysql", "r2d2"] } diesel_migrations = { workspace = true, features = ["mysql"] } syncserver-common = { path = "../syncserver-common" } -syncserver-db-common = { path = "../syncserver-db-common" } +syncserver-db-common = { path = "../syncserver-db-common", features = ["sql"] } diff --git a/syncstorage-db/Cargo.toml b/syncstorage-db/Cargo.toml index eb7fdd4181..49803a037a 100644 --- a/syncstorage-db/Cargo.toml +++ b/syncstorage-db/Cargo.toml @@ -20,14 +20,16 @@ log = { version = "0.4", features = [ "release_max_level_info", ] } syncserver-common = { path = "../syncserver-common" } -syncserver-db-common = { path = "../syncserver-db-common" } +syncserver-db-common = { path = "../syncserver-db-common", features = ["sql"] } syncserver-settings = { path = "../syncserver-settings" } syncstorage-db-common = { path = "../syncstorage-db-common" } syncstorage-mysql = { path = "../syncstorage-mysql", optional = true } -syncstorage-settings = { path = "../syncstorage-settings" } +syncstorage-sqlite = { path = "../syncstorage-sqlite/", optional = true} syncstorage-spanner = { path = "../syncstorage-spanner", optional = true } +syncstorage-settings = { path = "../syncstorage-settings" } tokio = { workspace = true, features = ["macros", "sync"] } [features] mysql = ['syncstorage-mysql'] spanner = ['syncstorage-spanner'] +sqlite = ['syncstorage-sqlite'] diff --git a/syncstorage-db/src/lib.rs b/syncstorage-db/src/lib.rs index cfd0ee8fea..763557833d 100644 --- a/syncstorage-db/src/lib.rs +++ b/syncstorage-db/src/lib.rs @@ -15,6 +15,13 @@ pub use syncstorage_mysql::DbError; #[cfg(feature = "mysql")] pub type DbImpl = syncstorage_mysql::MysqlDb; +#[cfg(feature = "sqlite")] +pub type DbPoolImpl = syncstorage_sqlite::SqliteDbPool; +#[cfg(feature = "sqlite")] +pub use syncstorage_sqlite::DbError; +#[cfg(feature = "sqlite")] +pub type DbImpl = syncstorage_sqlite::SqliteDb; + #[cfg(feature = "spanner")] pub type DbPoolImpl = syncstorage_spanner::SpannerDbPool; #[cfg(feature = "spanner")] @@ -31,8 +38,14 @@ pub use syncstorage_db_common::{ Db, DbPool, Sorting, UserIdentifier, }; -#[cfg(all(feature = "mysql", feature = "spanner"))] -compile_error!("only one of the \"mysql\" and \"spanner\" features can be enabled at a time"); - -#[cfg(not(any(feature = "mysql", feature = "spanner")))] -compile_error!("exactly one of the \"mysql\" and \"spanner\" features must be enabled"); +#[cfg(any( + all(feature = "mysql", feature = "spanner"), + all(feature = "mysql", feature = "sqlite"), + all(feature = "spanner", feature = "sqlite") +))] +compile_error!( + "only one of the \"mysql\", \"spanner\" and \"sqlite\" features can be enabled at a time" +); + +#[cfg(not(any(feature = "mysql", feature = "spanner", feature = "sqlite")))] +compile_error!("exactly one of the \"mysql\", \"spanner\" and \"sqlite\" features must be enabled"); diff --git a/syncstorage-mysql/Cargo.toml b/syncstorage-mysql/Cargo.toml index 7d2a7c3421..659f612d9b 100644 --- a/syncstorage-mysql/Cargo.toml +++ b/syncstorage-mysql/Cargo.toml @@ -14,14 +14,14 @@ slog-scope.workspace = true thiserror.workspace = true async-trait = "0.1.40" -# There appears to be a compilation error with diesel diesel = { workspace = true, features = ["mysql", "r2d2"] } diesel_logger = { workspace = true } diesel_migrations = { workspace = true, features = ["mysql"] } syncserver-common = { path = "../syncserver-common" } -syncserver-db-common = { path = "../syncserver-db-common" } +syncserver-db-common = { path = "../syncserver-db-common", features = ["mysql"] } syncstorage-db-common = { path = "../syncstorage-db-common" } syncstorage-settings = { path = "../syncstorage-settings" } +syncstorage-sql-db-common = { path = "../syncstorage-sql-db-common" } url = "2.1" [dev-dependencies] diff --git a/syncstorage-mysql/src/batch.rs b/syncstorage-mysql/src/batch.rs index 1e487440b5..d3fa31af36 100644 --- a/syncstorage-mysql/src/batch.rs +++ b/syncstorage-mysql/src/batch.rs @@ -11,9 +11,9 @@ use diesel::{ ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, }; use syncstorage_db_common::{params, results, UserIdentifier, BATCH_LIFETIME}; +use syncstorage_sql_db_common::error::DbError; use super::{ - error::DbError, models::MysqlDb, schema::{batch_upload_items, batch_uploads}, DbResult, diff --git a/syncstorage-mysql/src/lib.rs b/syncstorage-mysql/src/lib.rs index 4a93390070..83e9d5f7a9 100644 --- a/syncstorage-mysql/src/lib.rs +++ b/syncstorage-mysql/src/lib.rs @@ -8,15 +8,14 @@ extern crate slog_scope; #[macro_use] mod batch; mod diesel_ext; -mod error; mod models; mod pool; mod schema; #[cfg(test)] mod test; -pub use error::DbError; pub use models::MysqlDb; pub use pool::MysqlDbPool; +pub use syncstorage_sql_db_common::error::DbError; -pub(crate) type DbResult = Result; +pub(crate) type DbResult = Result; diff --git a/syncstorage-mysql/src/models.rs b/syncstorage-mysql/src/models.rs index 5598b479c9..34b211959b 100644 --- a/syncstorage-mysql/src/models.rs +++ b/syncstorage-mysql/src/models.rs @@ -22,11 +22,11 @@ use syncstorage_db_common::{ DEFAULT_BSO_TTL, }; use syncstorage_settings::{Quota, DEFAULT_MAX_TOTAL_RECORDS}; +use syncstorage_sql_db_common::error::DbError; use super::{ batch, diesel_ext::LockInShareModeDsl, - error::DbError, pool::CollectionCache, schema::{bso, collections, user_collections}, DbResult, diff --git a/syncstorage-mysql/src/pool.rs b/syncstorage-mysql/src/pool.rs index ea6030b9b5..088f2c90c4 100644 --- a/syncstorage-mysql/src/pool.rs +++ b/syncstorage-mysql/src/pool.rs @@ -20,8 +20,9 @@ use syncserver_db_common::test::TestTransactionCustomizer; use syncserver_db_common::{GetPoolState, PoolState}; use syncstorage_db_common::{Db, DbPool, STD_COLLS}; use syncstorage_settings::{Quota, Settings}; +use syncstorage_sql_db_common::error::DbError; -use super::{error::DbError, models::MysqlDb, DbResult}; +use super::{models::MysqlDb, DbResult}; embed_migrations!(); diff --git a/syncstorage-sql-db-common/Cargo.toml b/syncstorage-sql-db-common/Cargo.toml new file mode 100644 index 0000000000..c15b81c722 --- /dev/null +++ b/syncstorage-sql-db-common/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "syncstorage-sql-db-common" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] +backtrace.workspace=true +base64.workspace=true +futures.workspace=true +http.workspace=true +slog-scope.workspace=true + +async-trait = "0.1.40" +diesel = { version = "1.4", features = ["sqlite", "r2d2"] } +diesel_logger = "0.1.1" +diesel_migrations = { version = "1.4.0", features = ["sqlite"] } +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common" } +syncstorage-db-common = { path = "../syncstorage-db-common" } +syncstorage-settings = { path = "../syncstorage-settings" } +thiserror = "1.0.26" +url = "2.1" + +[dev-dependencies] +env_logger.workspace=true +syncserver-settings = { path = "../syncserver-settings" } diff --git a/syncstorage-mysql/src/error.rs b/syncstorage-sql-db-common/src/error.rs similarity index 89% rename from syncstorage-mysql/src/error.rs rename to syncstorage-sql-db-common/src/error.rs index d9a66d96ad..a2d5ec8726 100644 --- a/syncstorage-mysql/src/error.rs +++ b/syncstorage-sql-db-common/src/error.rs @@ -49,7 +49,7 @@ enum DbErrorKind { Common(SyncstorageDbError), #[error("{}", _0)] - Mysql(SqlError), + Sql(SqlError), } impl From for DbError { @@ -95,35 +95,35 @@ impl ReportableError for DbError { fn reportable_source(&self) -> Option<&(dyn ReportableError + 'static)> { Some(match &self.kind { DbErrorKind::Common(e) => e, - DbErrorKind::Mysql(e) => e, + DbErrorKind::Sql(e) => e, }) } fn is_sentry_event(&self) -> bool { match &self.kind { DbErrorKind::Common(e) => e.is_sentry_event(), - DbErrorKind::Mysql(e) => e.is_sentry_event(), + DbErrorKind::Sql(e) => e.is_sentry_event(), } } fn metric_label(&self) -> Option { match &self.kind { DbErrorKind::Common(e) => e.metric_label(), - DbErrorKind::Mysql(e) => e.metric_label(), + DbErrorKind::Sql(e) => e.metric_label(), } } fn backtrace(&self) -> Option<&Backtrace> { match &self.kind { DbErrorKind::Common(e) => e.backtrace(), - DbErrorKind::Mysql(e) => e.backtrace(), + DbErrorKind::Sql(e) => e.backtrace(), } } fn tags(&self) -> Vec<(&str, String)> { match &self.kind { DbErrorKind::Common(e) => e.tags(), - DbErrorKind::Mysql(e) => e.tags(), + DbErrorKind::Sql(e) => e.tags(), } } } @@ -140,24 +140,22 @@ from_error!(SyncstorageDbError, DbError, DbErrorKind::Common); from_error!( diesel::result::Error, DbError, - |error: diesel::result::Error| DbError::from(DbErrorKind::Mysql(SqlError::from(error))) + |error: diesel::result::Error| DbError::from(DbErrorKind::Sql(SqlError::from(error))) ); from_error!( diesel::result::ConnectionError, DbError, - |error: diesel::result::ConnectionError| DbError::from(DbErrorKind::Mysql(SqlError::from( - error - ))) + |error: diesel::result::ConnectionError| DbError::from(DbErrorKind::Sql(SqlError::from(error))) ); from_error!( diesel::r2d2::PoolError, DbError, - |error: diesel::r2d2::PoolError| DbError::from(DbErrorKind::Mysql(SqlError::from(error))) + |error: diesel::r2d2::PoolError| DbError::from(DbErrorKind::Sql(SqlError::from(error))) ); from_error!( diesel_migrations::RunMigrationsError, DbError, - |error: diesel_migrations::RunMigrationsError| DbError::from(DbErrorKind::Mysql( - SqlError::from(error) - )) + |error: diesel_migrations::RunMigrationsError| DbError::from(DbErrorKind::Sql(SqlError::from( + error + ))) ); diff --git a/syncstorage-sql-db-common/src/lib.rs b/syncstorage-sql-db-common/src/lib.rs new file mode 100644 index 0000000000..a91e735174 --- /dev/null +++ b/syncstorage-sql-db-common/src/lib.rs @@ -0,0 +1 @@ +pub mod error; diff --git a/syncstorage-sqlite/Cargo.toml b/syncstorage-sqlite/Cargo.toml new file mode 100644 index 0000000000..22a9eccfb6 --- /dev/null +++ b/syncstorage-sqlite/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "syncstorage-sqlite" +version.workspace=true +license.workspace=true +authors.workspace=true +edition.workspace=true + +[dependencies] +backtrace.workspace=true +base64.workspace=true +futures.workspace=true +http.workspace=true +slog-scope.workspace=true +thiserror.workspace = true +diesel_logger.workspace = true + +async-trait = "0.1.40" +diesel = { workspace = true, features = ["sqlite", "r2d2"] } +diesel_migrations = { workspace = true, features = ["sqlite"] } +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common", features = ["sqlite"] } +syncstorage-db-common = { path = "../syncstorage-db-common" } +syncstorage-settings = { path = "../syncstorage-settings" } +syncstorage-sql-db-common = { path = "../syncstorage-sql-db-common" } +url = "2.1" + +[dev-dependencies] +env_logger.workspace=true +syncserver-settings = { path = "../syncserver-settings" } diff --git a/syncstorage-sqlite/migrations/2024-01-19-131212_Init/down.sql b/syncstorage-sqlite/migrations/2024-01-19-131212_Init/down.sql new file mode 100644 index 0000000000..63a99dd2eb --- /dev/null +++ b/syncstorage-sqlite/migrations/2024-01-19-131212_Init/down.sql @@ -0,0 +1,8 @@ +-- DROP INDEX IF EXISTS `bso_expiry_idx`; +-- DROP INDEX IF EXISTS `bso_usr_col_mod_idx`; + +-- DROP TABLE IF EXISTS `bso`; +-- DROP TABLE IF EXISTS `collections`; +-- DROP TABLE IF EXISTS `user_collections`; +-- DROP TABLE IF EXISTS `batch_uploads`; +-- DROP TABLE IF EXISTS `batch_upload_items`; diff --git a/syncstorage-sqlite/migrations/2024-01-19-131212_Init/up.sql b/syncstorage-sqlite/migrations/2024-01-19-131212_Init/up.sql new file mode 100644 index 0000000000..2fd4aeb2ca --- /dev/null +++ b/syncstorage-sqlite/migrations/2024-01-19-131212_Init/up.sql @@ -0,0 +1,79 @@ +-- XXX: bsov1, etc +-- We use Bigint for some fields instead of Integer, even though Sqlite does not have the concept of Bigint, +-- to allow diesel to assume that integer can be mapped to i64. See https://github.com/diesel-rs/diesel/issues/852 + + +CREATE TABLE IF NOT EXISTS `bso` +( + `userid` BIGINT NOT NULL, + `collection` INTEGER NOT NULL, + `id` TEXT NOT NULL, + + `sortindex` INTEGER, + + `payload` TEXT NOT NULL, + `payload_size` BIGINT DEFAULT 0, + + -- last modified time in milliseconds since epoch + `modified` BIGINT NOT NULL, + -- expiration in milliseconds since epoch + `ttl` BIGINT DEFAULT '3153600000000' NOT NULL, + + PRIMARY KEY (`userid`, `collection`, `id`) +); +CREATE INDEX IF NOT EXISTS `bso_expiry_idx` ON `bso` (`ttl`); +CREATE INDEX IF NOT EXISTS `bso_usr_col_mod_idx` ON `bso` (`userid`, `collection`, `modified`); + +CREATE TABLE IF NOT EXISTS `collections` +( + `id` INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + `name` TEXT UNIQUE NOT NULL +); +INSERT INTO collections (id, name) +VALUES (1, 'clients'), + (2, 'crypto'), + (3, 'forms'), + (4, 'history'), + (5, 'keys'), + (6, 'meta'), + (7, 'bookmarks'), + (8, 'prefs'), + (9, 'tabs'), + (10, 'passwords'), + (11, 'addons'), + (12, 'addresses'), + (13, 'creditcards'), + -- Reserve space for additions to the standard collections + (100, ''); + + +CREATE TABLE IF NOT EXISTS `user_collections` +( + `userid` BIGINT NOT NULL, + `collection` INTEGER NOT NULL, + -- last modified time in milliseconds since epoch + `last_modified` BIGINT NOT NULL, + `total_bytes` BIGINT, + `count` INTEGER, + PRIMARY KEY (`userid`, `collection`) +); + +CREATE TABLE IF NOT EXISTS `batch_uploads` +( + `batch` BIGINT NOT NULL, + `userid` BIGINT NOT NULL, + `collection` INTEGER NOT NULL, + PRIMARY KEY (`batch`, `userid`) +); + +CREATE TABLE IF NOT EXISTS `batch_upload_items` +( + `batch` BIGINT NOT NULL, + `userid` BIGINT NOT NULL, + `id` TEXT NOT NULL, + `sortindex` INTEGER DEFAULT NULL, + `payload` TEXT, + `payload_size` BIGINT DEFAULT NULL, + `ttl_offset` INTEGER DEFAULT NULL, + PRIMARY KEY (`batch`, `userid`, `id`) +); diff --git a/syncstorage-sqlite/src/batch.rs b/syncstorage-sqlite/src/batch.rs new file mode 100644 index 0000000000..d452b78222 --- /dev/null +++ b/syncstorage-sqlite/src/batch.rs @@ -0,0 +1,278 @@ +use base64::Engine; +use std::collections::HashSet; + +use diesel::{ + self, + dsl::sql, + insert_into, + result::{DatabaseErrorKind::UniqueViolation, Error as DieselError}, + sql_query, + sql_types::{BigInt, Integer}, + ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, +}; +use syncstorage_db_common::{params, results, UserIdentifier, BATCH_LIFETIME}; +use syncstorage_sql_db_common::error::DbError; + +use super::{ + models::SqliteDb, + schema::{batch_upload_items, batch_uploads}, + DbResult, +}; + +const MAXTTL: i32 = 2_100_000_000; + +pub fn create(db: &SqliteDb, params: params::CreateBatch) -> DbResult { + let user_id = params.user_id.legacy_id as i64; + let collection_id = db.get_collection_id(¶ms.collection)?; + // Careful, there's some weirdness here! + // + // Sync timestamps are in seconds and quantized to two decimal places, so + // when we convert one to a bigint in milliseconds, the final digit is + // always zero. But we want to use the lower digits of the batchid for + // sharding writes via (batchid % num_tables), and leaving it as zero would + // skew the sharding distribution. + // + // So we mix in the lowest digit of the uid to improve the distribution + // while still letting us treat these ids as millisecond timestamps. It's + // yuck, but it works and it keeps the weirdness contained to this single + // line of code. + let batch_id = db.timestamp().as_i64() + (user_id % 10); + insert_into(batch_uploads::table) + .values(( + batch_uploads::batch_id.eq(&batch_id), + batch_uploads::user_id.eq(&user_id), + batch_uploads::collection_id.eq(&collection_id), + )) + .execute(&db.conn) + .map_err(|e| -> DbError { + match e { + // The user tried to create two batches with the same timestamp + DieselError::DatabaseError(UniqueViolation, _) => DbError::conflict(), + _ => e.into(), + } + })?; + + do_append(db, batch_id, params.user_id, collection_id, params.bsos)?; + Ok(results::CreateBatch { + id: encode_id(batch_id), + size: None, + }) +} + +pub fn validate(db: &SqliteDb, params: params::ValidateBatch) -> DbResult { + let batch_id = decode_id(¶ms.id)?; + // Avoid hitting the db for batches that are obviously too old. Recall + // that the batchid is a millisecond timestamp. + if (batch_id + BATCH_LIFETIME) < db.timestamp().as_i64() { + return Ok(false); + } + + let user_id = params.user_id.legacy_id as i64; + let collection_id = db.get_collection_id(¶ms.collection)?; + let exists = batch_uploads::table + .select(sql::("1")) + .filter(batch_uploads::batch_id.eq(&batch_id)) + .filter(batch_uploads::user_id.eq(&user_id)) + .filter(batch_uploads::collection_id.eq(&collection_id)) + .get_result::(&db.conn) + .optional()?; + Ok(exists.is_some()) +} + +pub fn append(db: &SqliteDb, params: params::AppendToBatch) -> DbResult<()> { + let exists = validate( + db, + params::ValidateBatch { + user_id: params.user_id.clone(), + collection: params.collection.clone(), + id: params.batch.id.clone(), + }, + )?; + + if !exists { + return Err(DbError::batch_not_found()); + } + + let batch_id = decode_id(¶ms.batch.id)?; + let collection_id = db.get_collection_id(¶ms.collection)?; + do_append(db, batch_id, params.user_id, collection_id, params.bsos)?; + Ok(()) +} + +pub fn get(db: &SqliteDb, params: params::GetBatch) -> DbResult> { + let is_valid = validate( + db, + params::ValidateBatch { + user_id: params.user_id, + collection: params.collection, + id: params.id.clone(), + }, + )?; + let batch = if is_valid { + Some(results::GetBatch { id: params.id }) + } else { + None + }; + Ok(batch) +} + +pub fn delete(db: &SqliteDb, params: params::DeleteBatch) -> DbResult<()> { + let batch_id = decode_id(¶ms.id)?; + let user_id = params.user_id.legacy_id as i64; + let collection_id = db.get_collection_id(¶ms.collection)?; + diesel::delete(batch_uploads::table) + .filter(batch_uploads::batch_id.eq(&batch_id)) + .filter(batch_uploads::user_id.eq(&user_id)) + .filter(batch_uploads::collection_id.eq(&collection_id)) + .execute(&db.conn)?; + diesel::delete(batch_upload_items::table) + .filter(batch_upload_items::batch_id.eq(&batch_id)) + .filter(batch_upload_items::user_id.eq(&user_id)) + .execute(&db.conn)?; + Ok(()) +} + +/// Commits a batch to the bsos table, deleting the batch when succesful +pub fn commit(db: &SqliteDb, params: params::CommitBatch) -> DbResult { + let batch_id = decode_id(¶ms.batch.id)?; + let user_id = params.user_id.legacy_id as i64; + let collection_id = db.get_collection_id(¶ms.collection)?; + let timestamp = db.timestamp(); + sql_query(include_str!("batch_commit.sql")) + .bind::(user_id) + .bind::(&collection_id) + .bind::(&db.timestamp().as_i64()) + .bind::(&db.timestamp().as_i64()) + .bind::((MAXTTL as i64) * 1000) // XXX: + .bind::(&batch_id) + .bind::(user_id) + .bind::(&db.timestamp().as_i64()) + .execute(&db.conn)?; + + db.update_collection(user_id as u32, collection_id)?; + + delete( + db, + params::DeleteBatch { + user_id: params.user_id, + collection: params.collection, + id: params.batch.id, + }, + )?; + Ok(timestamp) +} + +pub fn do_append( + db: &SqliteDb, + batch_id: i64, + user_id: UserIdentifier, + _collection_id: i32, + bsos: Vec, +) -> DbResult<()> { + fn exist_idx(user_id: u64, batch_id: i64, bso_id: &str) -> String { + // Construct something that matches the key for batch_upload_items + format!( + "{batch_id}-{user_id}-{bso_id}", + batch_id = batch_id, + user_id = user_id, + bso_id = bso_id, + ) + } + + // It's possible for the list of items to contain a duplicate key entry. + // This means that we can't really call `ON CONFLICT` here, because that's + // more about inserting one item at a time. (e.g. it works great if the + // values contain a key that's already in the database, less so if the + // the duplicate is in the value set we're inserting. + #[derive(Debug, QueryableByName)] + #[table_name = "batch_upload_items"] + struct ExistsResult { + batch_id: i64, + id: String, + } + + #[derive(AsChangeset)] + #[table_name = "batch_upload_items"] + struct UpdateBatches { + payload: Option, + payload_size: Option, + ttl_offset: Option, + } + + let mut existing = HashSet::new(); + + // pre-load the "existing" hashset with any batched uploads that are already in the table. + for item in sql_query( + "SELECT userid as user_id, batch as batch_id, id FROM batch_upload_items WHERE userid=? AND batch=?;", + ) + .bind::(user_id.legacy_id as i64) + .bind::(batch_id) + .get_results::(&db.conn)? + { + existing.insert(exist_idx( + user_id.legacy_id, + item.batch_id, + &item.id.to_string(), + )); + } + + for bso in bsos { + let payload_size = bso.payload.as_ref().map(|p| p.len() as i64); + let exist_idx = exist_idx(user_id.legacy_id, batch_id, &bso.id); + + if existing.contains(&exist_idx) { + diesel::update( + batch_upload_items::table + .filter(batch_upload_items::user_id.eq(user_id.legacy_id as i64)) + .filter(batch_upload_items::batch_id.eq(batch_id)), + ) + .set(&UpdateBatches { + payload: bso.payload, + payload_size, + ttl_offset: bso.ttl.map(|ttl| ttl as i32), + }) + .execute(&db.conn)?; + } else { + diesel::insert_into(batch_upload_items::table) + .values(( + batch_upload_items::batch_id.eq(&batch_id), + batch_upload_items::user_id.eq(user_id.legacy_id as i64), + batch_upload_items::id.eq(bso.id.clone()), + batch_upload_items::sortindex.eq(bso.sortindex), + batch_upload_items::payload.eq(bso.payload), + batch_upload_items::payload_size.eq(payload_size), + )) + .execute(&db.conn)?; + // make sure to include the key into our table check. + existing.insert(exist_idx); + } + } + + Ok(()) +} + +pub fn validate_batch_id(id: &str) -> DbResult<()> { + decode_id(id).map(|_| ()) +} + +fn encode_id(id: i64) -> String { + base64::engine::general_purpose::STANDARD.encode(id.to_string()) +} + +fn decode_id(id: &str) -> DbResult { + let bytes = base64::engine::general_purpose::STANDARD + .decode(id) + .unwrap_or_else(|_| id.as_bytes().to_vec()); + let decoded = std::str::from_utf8(&bytes).unwrap_or(id); + decoded + .parse::() + .map_err(|e| DbError::internal(format!("Invalid batch_id: {}", e))) +} + +macro_rules! batch_db_method { + ($name:ident, $batch_name:ident, $type:ident) => { + pub fn $name(&self, params: params::$type) -> DbResult { + batch::$batch_name(self, params) + } + }; +} diff --git a/syncstorage-sqlite/src/batch_commit.sql b/syncstorage-sqlite/src/batch_commit.sql new file mode 100644 index 0000000000..63728e5f8a --- /dev/null +++ b/syncstorage-sqlite/src/batch_commit.sql @@ -0,0 +1,19 @@ +INSERT INTO bso (userid, collection, id, modified, sortindex, ttl, payload, payload_size) +SELECT + ?, + ?, + id, + ?, + sortindex, + COALESCE((ttl_offset * 1000) + ?, ?) as ttl, + COALESCE(payload, '') as payload, + COALESCE(payload_size, 0) as payload_size + FROM batch_upload_items + WHERE batch = ? + AND userid = ? + ON CONFLICT(userid, collection, id) DO UPDATE SET + modified = ?, + sortindex = COALESCE(excluded.sortindex, bso.sortindex), + ttl = COALESCE(excluded.ttl, bso.ttl), + payload = COALESCE(NULLIF(excluded.payload, ''), bso.payload), + payload_size = COALESCE(excluded.payload_size, bso.payload_size) diff --git a/syncstorage-sqlite/src/diesel_ext.rs b/syncstorage-sqlite/src/diesel_ext.rs new file mode 100644 index 0000000000..153a4cfd5f --- /dev/null +++ b/syncstorage-sqlite/src/diesel_ext.rs @@ -0,0 +1,50 @@ +use core::fmt; + +use diesel::{ + backend::Backend, + insertable::CanInsertInSingleQuery, + query_builder::{AstPass, InsertStatement, QueryFragment, QueryId}, + result::QueryResult, + sqlite::Sqlite, + Expression, RunQueryDsl, Table, +}; + +#[derive(Debug, Clone, Copy, QueryId)] +pub struct LockInShareMode; + +impl QueryFragment for LockInShareMode { + fn walk_ast(&self, mut out: AstPass<'_, Sqlite>) -> QueryResult<()> { + out.push_sql(" LOCK IN SHARE MODE"); + Ok(()) + } +} + +#[derive(Debug, Clone)] +pub struct OnDuplicateKeyUpdate(Box>, X); + +impl QueryFragment for OnDuplicateKeyUpdate +where + DB: Backend, + T: Table, + T::FromClause: QueryFragment, + U: QueryFragment + CanInsertInSingleQuery, + Op: QueryFragment, + Ret: QueryFragment, + X: Expression + fmt::Debug, +{ + fn walk_ast(&self, mut out: AstPass<'_, DB>) -> QueryResult<()> { + self.0.walk_ast(out.reborrow())?; + out.push_sql(" ON CONFLICT({user_id}, {collection_id}) DO UPDATE SET "); + //self.1.walk_ast(out.reborrow())?; + debug!("{:?}", self.1); + Ok(()) + } +} + +impl RunQueryDsl for OnDuplicateKeyUpdate {} + +impl QueryId for OnDuplicateKeyUpdate { + type QueryId = (); + + const HAS_STATIC_QUERY_ID: bool = false; +} diff --git a/syncstorage-sqlite/src/lib.rs b/syncstorage-sqlite/src/lib.rs new file mode 100644 index 0000000000..30b5c305d7 --- /dev/null +++ b/syncstorage-sqlite/src/lib.rs @@ -0,0 +1,21 @@ +#[macro_use] +extern crate diesel; +#[macro_use] +extern crate diesel_migrations; +#[macro_use] +extern crate slog_scope; + +#[macro_use] +mod batch; +mod diesel_ext; +mod models; +mod pool; +mod schema; +#[cfg(test)] +mod test; + +pub use models::SqliteDb; +pub use pool::SqliteDbPool; +pub use syncstorage_sql_db_common::error::DbError; + +pub(crate) type DbResult = Result; diff --git a/syncstorage-sqlite/src/models.rs b/syncstorage-sqlite/src/models.rs new file mode 100644 index 0000000000..97609ad8de --- /dev/null +++ b/syncstorage-sqlite/src/models.rs @@ -0,0 +1,1147 @@ +use futures::future::TryFutureExt; + +use std::{self, cell::RefCell, collections::HashMap, fmt, ops::Deref, sync::Arc}; + +use diesel::{ + connection::TransactionManager, + delete, + dsl::max, + expression::sql_literal::sql, + r2d2::{ConnectionManager, PooledConnection}, + sql_query, + sql_types::{BigInt, Integer, Nullable, Text}, + sqlite::SqliteConnection, + Connection, ExpressionMethods, GroupByDsl, OptionalExtension, QueryDsl, RunQueryDsl, +}; +#[cfg(debug_assertions)] +use diesel_logger::LoggingConnection; +use syncserver_common::{BlockingThreadpool, Metrics}; +use syncserver_db_common::{sync_db_method, DbFuture}; +use syncstorage_db_common::{ + error::DbErrorIntrospect, params, results, util::SyncTimestamp, Db, Sorting, UserIdentifier, + DEFAULT_BSO_TTL, +}; +use syncstorage_settings::{Quota, DEFAULT_MAX_TOTAL_RECORDS}; +use syncstorage_sql_db_common::error::DbError; + +use super::{ + batch, + pool::CollectionCache, + schema::{bso, collections, user_collections}, + DbResult, +}; + +type Conn = PooledConnection>; + +// this is the max number of records we will return. +static DEFAULT_LIMIT: u32 = DEFAULT_MAX_TOTAL_RECORDS; + +const TOMBSTONE: i32 = 0; +/// SQL Variable remapping +/// These names are the legacy values mapped to the new names. +const COLLECTION_ID: &str = "collection"; +const USER_ID: &str = "userid"; +const MODIFIED: &str = "modified"; +const EXPIRY: &str = "ttl"; +const LAST_MODIFIED: &str = "last_modified"; +const COUNT: &str = "count"; +const TOTAL_BYTES: &str = "total_bytes"; + +#[derive(Debug)] +enum CollectionLock { + Read, + Write, +} + +/// Per session Db metadata +#[derive(Debug, Default)] +struct SqliteDbSession { + /// The "current time" on the server used for this session's operations + timestamp: SyncTimestamp, + /// Cache of collection modified timestamps per (user_id, collection_id) + coll_modified_cache: HashMap<(u32, i32), SyncTimestamp>, + /// Currently locked collections + coll_locks: HashMap<(u32, i32), CollectionLock>, + /// Whether a transaction was started (begin() called) + in_transaction: bool, + in_write_transaction: bool, +} + +#[derive(Clone, Debug)] +pub struct SqliteDb { + /// Synchronous Diesel calls are executed in actix_web::web::block to satisfy + /// the Db trait's asynchronous interface. + /// + /// Arc provides a Clone impl utilized for safely moving to + /// the thread pool but does not provide Send as the underlying db + /// conn. structs are !Sync (Arc requires both for Send). See the Send impl + /// below. + pub(super) inner: Arc, + + /// Pool level cache of collection_ids and their names + coll_cache: Arc, + + pub metrics: Metrics, + pub quota: Quota, + blocking_threadpool: Arc, +} + +/// Despite the db conn structs being !Sync (see Arc above) we +/// don't spawn multiple SqliteDb calls at a time in the thread pool. Calls are +/// queued to the thread pool via Futures, naturally serialized. +unsafe impl Send for SqliteDb {} + +pub struct SqliteDbInner { + #[cfg(not(debug_assertions))] + pub(super) conn: Conn, + #[cfg(debug_assertions)] + pub(super) conn: LoggingConnection, // display SQL when RUST_LOG="diesel_logger=trace" + + session: RefCell, +} + +impl fmt::Debug for SqliteDbInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "SqliteDbInner {{ session: {:?} }}", self.session) + } +} + +impl Deref for SqliteDb { + type Target = SqliteDbInner; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl SqliteDb { + pub(super) fn new( + conn: Conn, + coll_cache: Arc, + metrics: &Metrics, + quota: &Quota, + blocking_threadpool: Arc, + ) -> Self { + let inner = SqliteDbInner { + #[cfg(not(debug_assertions))] + conn, + #[cfg(debug_assertions)] + conn: LoggingConnection::new(conn), + session: RefCell::new(Default::default()), + }; + // https://github.com/mozilla-services/syncstorage-rs/issues/1480 + #[allow(clippy::arc_with_non_send_sync)] + SqliteDb { + inner: Arc::new(inner), + coll_cache, + metrics: metrics.clone(), + quota: *quota, + blocking_threadpool, + } + } + + /// APIs for collection-level locking + /// + /// Explicitly lock the matching row in the user_collections table. Read + /// locks do SELECT ... LOCK IN SHARE MODE and write locks do SELECT + /// ... FOR UPDATE. + /// + /// In theory it would be possible to use serializable transactions rather + /// than explicit locking, but our ops team have expressed concerns about + /// the efficiency of that approach at scale. + pub fn lock_for_read_sync(&self, params: params::LockCollection) -> DbResult<()> { + let user_id = params.user_id.legacy_id as i64; + let collection_id = self.get_collection_id(¶ms.collection).or_else(|e| { + if e.is_collection_not_found() { + // If the collection doesn't exist, we still want to start a + // transaction so it will continue to not exist. + Ok(0) + } else { + Err(e) + } + })?; + // If we already have a read or write lock then it's safe to + // use it as-is. + if self + .session + .borrow() + .coll_locks + .contains_key(&(user_id as u32, collection_id)) + { + return Ok(()); + } + + // Lock the db + self.begin(false)?; + let modified = user_collections::table + .select(user_collections::modified) + .filter(user_collections::user_id.eq(user_id)) + .filter(user_collections::collection_id.eq(collection_id)) + //.lock_in_share_mode() + .first(&self.conn) + .optional()?; + if let Some(modified) = modified { + let modified = SyncTimestamp::from_i64(modified)?; + self.session + .borrow_mut() + .coll_modified_cache + .insert((user_id as u32, collection_id), modified); // why does it still expect a u32 int? + } + // XXX: who's responsible for unlocking (removing the entry) + self.session + .borrow_mut() + .coll_locks + .insert((user_id as u32, collection_id), CollectionLock::Read); + Ok(()) + } + + pub fn lock_for_write_sync(&self, params: params::LockCollection) -> DbResult<()> { + let user_id = params.user_id.legacy_id as i64; + let collection_id = self.get_or_create_collection_id(¶ms.collection)?; + if let Some(CollectionLock::Read) = self + .session + .borrow() + .coll_locks + .get(&(user_id as u32, collection_id)) + { + return Err(DbError::internal( + "Can't escalate read-lock to write-lock".to_owned(), + )); + } + + // Lock the db + self.begin(true)?; + let modified = user_collections::table + .select(user_collections::modified) + .filter(user_collections::user_id.eq(user_id)) + .filter(user_collections::collection_id.eq(collection_id)) + // .for_update() + .first(&self.conn) + .optional()?; + if let Some(modified) = modified { + let modified = SyncTimestamp::from_i64(modified)?; + // Forbid the write if it would not properly incr the timestamp + if modified >= self.timestamp() { + return Err(DbError::conflict()); + } + self.session + .borrow_mut() + .coll_modified_cache + .insert((user_id as u32, collection_id), modified); + } + self.session + .borrow_mut() + .coll_locks + .insert((user_id as u32, collection_id), CollectionLock::Write); + Ok(()) + } + + pub(super) fn begin(&self, for_write: bool) -> DbResult<()> { + self.conn + .transaction_manager() + .begin_transaction(&self.conn)?; + self.session.borrow_mut().in_transaction = true; + if for_write { + self.session.borrow_mut().in_write_transaction = true; + } + Ok(()) + } + + pub async fn begin_async(&self, for_write: bool) -> DbResult<()> { + self.begin(for_write) + } + + pub fn commit_sync(&self) -> DbResult<()> { + if self.session.borrow().in_transaction { + self.conn + .transaction_manager() + .commit_transaction(&self.conn)?; + } + Ok(()) + } + + pub fn rollback_sync(&self) -> DbResult<()> { + if self.session.borrow().in_transaction { + self.conn + .transaction_manager() + .rollback_transaction(&self.conn)?; + } + Ok(()) + } + + fn erect_tombstone(&self, user_id: i32) -> DbResult<()> { + sql_query(format!( + r#"INSERT INTO user_collections ({user_id}, {collection_id}, {modified}) + VALUES (?, ?, ?) + ON CONFLICT({user_id}, {collection_id}) DO UPDATE SET + {modified} = excluded.{modified}"#, + user_id = USER_ID, + collection_id = COLLECTION_ID, + modified = LAST_MODIFIED + )) + .bind::(user_id as i64) + .bind::(TOMBSTONE) + .bind::(self.timestamp().as_i64()) + .execute(&self.conn)?; + Ok(()) + } + + pub fn delete_storage_sync(&self, user_id: UserIdentifier) -> DbResult<()> { + let user_id = user_id.legacy_id as i64; + // Delete user data. + delete(bso::table) + .filter(bso::user_id.eq(user_id)) + .execute(&self.conn)?; + // Delete user collections. + delete(user_collections::table) + .filter(user_collections::user_id.eq(user_id)) + .execute(&self.conn)?; + Ok(()) + } + + // Deleting the collection should result in: + // - collection does not appear in /info/collections + // - X-Last-Modified timestamp at the storage level changing + pub fn delete_collection_sync( + &self, + params: params::DeleteCollection, + ) -> DbResult { + let user_id = params.user_id.legacy_id as i64; + let collection_id = self.get_collection_id(¶ms.collection)?; + let mut count = delete(bso::table) + .filter(bso::user_id.eq(user_id)) + .filter(bso::collection_id.eq(&collection_id)) + .execute(&self.conn)?; + count += delete(user_collections::table) + .filter(user_collections::user_id.eq(user_id)) + .filter(user_collections::collection_id.eq(&collection_id)) + .execute(&self.conn)?; + if count == 0 { + return Err(DbError::collection_not_found()); + } else { + self.erect_tombstone(user_id as i32)?; + } + self.get_storage_timestamp_sync(params.user_id) + } + + pub(super) fn get_or_create_collection_id(&self, name: &str) -> DbResult { + if let Some(id) = self.coll_cache.get_id(name)? { + return Ok(id); + } + + let id = self.conn.transaction(|| { + diesel::insert_or_ignore_into(collections::table) + .values(collections::name.eq(name)) + .execute(&self.conn)?; + + collections::table + .select(collections::id) + .filter(collections::name.eq(name)) + .first(&self.conn) + })?; + + if !self.session.borrow().in_write_transaction { + self.coll_cache.put(id, name.to_owned())?; + } + + Ok(id) + } + + pub(super) fn get_collection_id(&self, name: &str) -> DbResult { + if let Some(id) = self.coll_cache.get_id(name)? { + return Ok(id); + } + + let id = sql_query( + "SELECT id + FROM collections + WHERE name = ?", + ) + .bind::(name) + .get_result::(&self.conn) + .optional()? + .ok_or_else(DbError::collection_not_found)? + .id; + if !self.session.borrow().in_write_transaction { + self.coll_cache.put(id, name.to_owned())?; + } + Ok(id) + } + + fn _get_collection_name(&self, id: i32) -> DbResult { + let name = if let Some(name) = self.coll_cache.get_name(id)? { + name + } else { + sql_query( + "SELECT name + FROM collections + WHERE id = ?", + ) + .bind::(&id) + .get_result::(&self.conn) + .optional()? + .ok_or_else(DbError::collection_not_found)? + .name + }; + Ok(name) + } + + pub fn put_bso_sync(&self, bso: params::PutBso) -> DbResult { + /* + if bso.payload.is_none() && bso.sortindex.is_none() && bso.ttl.is_none() { + // XXX: go returns an error here (ErrNothingToDo), and is treated + // as other errors + return Ok(()); + } + */ + + let collection_id = self.get_or_create_collection_id(&bso.collection)?; + let user_id: u64 = bso.user_id.legacy_id; + let timestamp = self.timestamp().as_i64(); + if self.quota.enabled { + let usage = self.get_quota_usage_sync(params::GetQuotaUsage { + user_id: bso.user_id.clone(), + collection: bso.collection.clone(), + collection_id, + })?; + if usage.total_bytes >= self.quota.size { + let mut tags = HashMap::default(); + tags.insert("collection".to_owned(), bso.collection.clone()); + self.metrics.incr_with_tags("storage.quota.at_limit", tags); + if self.quota.enforced { + return Err(DbError::quota()); + } else { + warn!("Quota at limit for user's collection ({} bytes)", usage.total_bytes; "collection"=>bso.collection.clone()); + } + } + } + + self.conn.transaction(|| { + let payload = bso.payload.as_deref().unwrap_or_default(); + let sortindex = bso.sortindex; + let ttl = bso.ttl.map_or(DEFAULT_BSO_TTL, |ttl| ttl); + let q = format!(r#" + INSERT INTO bso ({user_id}, {collection_id}, id, sortindex, payload, {modified}, {expiry}) + VALUES (?, ?, ?, ?, ?, ?, ?) + ON CONFLICT({user_id}, {collection_id}, id) DO UPDATE SET + {user_id} = excluded.{user_id}, + {collection_id} = excluded.{collection_id}, + id = excluded.id + "#, user_id=USER_ID, modified=MODIFIED, collection_id=COLLECTION_ID, expiry=EXPIRY); + let q = format!( + "{}{}", + q, + if bso.sortindex.is_some() { + ", sortindex = excluded.sortindex" + } else { + "" + }, + ); + let q = format!( + "{}{}", + q, + if bso.payload.is_some() { + ", payload = excluded.payload" + } else { + "" + }, + ); + let q = format!( + "{}{}", + q, + if bso.ttl.is_some() { + format!(", {expiry} = excluded.{expiry}", expiry=EXPIRY) + } else { + "".to_owned() + }, + ); + let q = format!( + "{}{}", + q, + if bso.payload.is_some() || bso.sortindex.is_some() { + format!(", {modified} = excluded.{modified}", modified=MODIFIED) + } else { + "".to_owned() + }, + ); + sql_query(q) + .bind::(user_id as i64) // XXX: + .bind::(&collection_id) + .bind::(&bso.id) + .bind::, _>(sortindex) + .bind::(payload) + .bind::(timestamp) + .bind::(timestamp + (i64::from(ttl) * 1000)) // remember: this is in millis + .execute(&self.conn)?; + self.update_collection(user_id as u32, collection_id) + }) + } + + pub fn get_bsos_sync(&self, params: params::GetBsos) -> DbResult { + let user_id = params.user_id.legacy_id as i64; + let collection_id = self.get_collection_id(¶ms.collection)?; + let now = self.timestamp().as_i64(); + let mut query = bso::table + .select(( + bso::id, + bso::modified, + bso::payload, + bso::sortindex, + bso::expiry, + )) + .filter(bso::user_id.eq(user_id)) + .filter(bso::collection_id.eq(collection_id)) // XXX: + .filter(bso::expiry.gt(now)) + .into_boxed(); + + if let Some(older) = params.older { + query = query.filter(bso::modified.lt(older.as_i64())); + } + if let Some(newer) = params.newer { + query = query.filter(bso::modified.gt(newer.as_i64())); + } + + if !params.ids.is_empty() { + query = query.filter(bso::id.eq_any(params.ids)); + } + + // it's possible for two BSOs to be inserted with the same `modified` date, + // since there's no guarantee of order when doing a get, pagination can return + // an error. We "fudge" a bit here by taking the id order as a secondary, since + // that is guaranteed to be unique by the client. + query = match params.sort { + // issue559: Revert to previous sorting + /* + Sorting::Index => query.order(bso::id.desc()).order(bso::sortindex.desc()), + Sorting::Newest | Sorting::None => { + query.order(bso::id.desc()).order(bso::modified.desc()) + } + Sorting::Oldest => query.order(bso::id.asc()).order(bso::modified.asc()), + */ + Sorting::Index => query.order(bso::sortindex.desc()), + Sorting::Newest => query.order((bso::modified.desc(), bso::id.desc())), + Sorting::Oldest => query.order((bso::modified.asc(), bso::id.asc())), + _ => query, + }; + + let limit = params + .limit + .map(i64::from) + .unwrap_or(DEFAULT_LIMIT as i64) + .max(0); + // fetch an extra row to detect if there are more rows that + // match the query conditions + query = query.limit(if limit > 0 { limit + 1 } else { limit }); + + let numeric_offset = params.offset.map_or(0, |offset| offset.offset as i64); + + if numeric_offset > 0 { + // XXX: copy over this optimization: + // https://github.com/mozilla-services/server-syncstorage/blob/a0f8117/syncstorage/storage/sql/__init__.py#L404 + query = query.offset(numeric_offset); + } + let mut bsos = query.load::(&self.conn)?; + + // XXX: an additional get_collection_timestamp is done here in + // python to trigger potential CollectionNotFoundErrors + //if bsos.len() == 0 { + //} + + let next_offset = if limit >= 0 && bsos.len() > limit as usize { + bsos.pop(); + Some((limit + numeric_offset).to_string()) + } else { + // if an explicit "limit=0" is sent, return the offset of "0" + // Otherwise, this would break at least the db::tests::db::get_bsos_limit_offset + // unit test. + if limit == 0 { + Some(0.to_string()) + } else { + None + } + }; + + Ok(results::GetBsos { + items: bsos, + offset: next_offset, + }) + } + + pub fn get_bso_ids_sync(&self, params: params::GetBsos) -> DbResult { + let user_id = params.user_id.legacy_id as i64; + let collection_id = self.get_collection_id(¶ms.collection)?; + let mut query = bso::table + .select(bso::id) + .filter(bso::user_id.eq(user_id)) + .filter(bso::collection_id.eq(collection_id)) // XXX: + .filter(bso::expiry.gt(self.timestamp().as_i64())) + .into_boxed(); + + if let Some(older) = params.older { + query = query.filter(bso::modified.lt(older.as_i64())); + } + if let Some(newer) = params.newer { + query = query.filter(bso::modified.gt(newer.as_i64())); + } + + if !params.ids.is_empty() { + query = query.filter(bso::id.eq_any(params.ids)); + } + + query = match params.sort { + Sorting::Index => query.order(bso::sortindex.desc()), + Sorting::Newest => query.order(bso::modified.desc()), + Sorting::Oldest => query.order(bso::modified.asc()), + _ => query, + }; + + // negative limits are no longer allowed by mysql. + let limit = params + .limit + .map(i64::from) + .unwrap_or(DEFAULT_LIMIT as i64) + .max(0); + // fetch an extra row to detect if there are more rows that + // match the query conditions. Negative limits will cause an error. + query = query.limit(if limit == 0 { limit } else { limit + 1 }); + let numeric_offset = params.offset.map_or(0, |offset| offset.offset as i64); + if numeric_offset != 0 { + // XXX: copy over this optimization: + // https://github.com/mozilla-services/server-syncstorage/blob/a0f8117/syncstorage/storage/sql/__init__.py#L404 + query = query.offset(numeric_offset); + } + let mut ids = query.load::(&self.conn)?; + + // XXX: an additional get_collection_timestamp is done here in + // python to trigger potential CollectionNotFoundErrors + //if bsos.len() == 0 { + //} + + let next_offset = if limit >= 0 && ids.len() > limit as usize { + ids.pop(); + Some((limit + numeric_offset).to_string()) + } else { + None + }; + + Ok(results::GetBsoIds { + items: ids, + offset: next_offset, + }) + } + + pub fn get_bso_sync(&self, params: params::GetBso) -> DbResult> { + let user_id = params.user_id.legacy_id as i64; + let collection_id = self.get_collection_id(¶ms.collection)?; + Ok(bso::table + .select(( + bso::id, + bso::modified, + bso::payload, + bso::sortindex, + bso::expiry, + )) + .filter(bso::user_id.eq(user_id)) + .filter(bso::collection_id.eq(&collection_id)) + .filter(bso::id.eq(¶ms.id)) + .filter(bso::expiry.ge(self.timestamp().as_i64())) + .get_result::(&self.conn) + .optional()?) + } + + pub fn delete_bso_sync(&self, params: params::DeleteBso) -> DbResult { + let user_id = params.user_id.legacy_id; + let collection_id = self.get_collection_id(¶ms.collection)?; + let affected_rows = delete(bso::table) + .filter(bso::user_id.eq(user_id as i64)) + .filter(bso::collection_id.eq(&collection_id)) + .filter(bso::id.eq(params.id)) + .filter(bso::expiry.gt(&self.timestamp().as_i64())) + .execute(&self.conn)?; + if affected_rows == 0 { + return Err(DbError::bso_not_found()); + } + self.update_collection(user_id as u32, collection_id) + } + + pub fn delete_bsos_sync(&self, params: params::DeleteBsos) -> DbResult { + let user_id = params.user_id.legacy_id as i64; + let collection_id = self.get_collection_id(¶ms.collection)?; + delete(bso::table) + .filter(bso::user_id.eq(user_id)) + .filter(bso::collection_id.eq(&collection_id)) + .filter(bso::id.eq_any(params.ids)) + .execute(&self.conn)?; + self.update_collection(user_id as u32, collection_id) + } + + pub fn post_bsos_sync(&self, input: params::PostBsos) -> DbResult { + let collection_id = self.get_or_create_collection_id(&input.collection)?; + let mut result = results::PostBsos { + modified: self.timestamp(), + success: Default::default(), + failed: input.failed, + }; + + for pbso in input.bsos { + let id = pbso.id; + let put_result = self.put_bso_sync(params::PutBso { + user_id: input.user_id.clone(), + collection: input.collection.clone(), + id: id.clone(), + payload: pbso.payload, + sortindex: pbso.sortindex, + ttl: pbso.ttl, + }); + // XXX: python version doesn't report failures from db + // layer.. (wouldn't db failures abort the entire transaction + // anyway?) + // XXX: sanitize to.to_string()? + match put_result { + Ok(_) => result.success.push(id), + Err(e) => { + result.failed.insert(id, e.to_string()); + } + } + } + self.update_collection(input.user_id.legacy_id as u32, collection_id)?; + Ok(result) + } + + pub fn get_storage_timestamp_sync(&self, user_id: UserIdentifier) -> DbResult { + let user_id = user_id.legacy_id as i64; + let modified = user_collections::table + .select(max(user_collections::modified)) + .filter(user_collections::user_id.eq(user_id)) + .first::>(&self.conn)? + .unwrap_or_default(); + SyncTimestamp::from_i64(modified).map_err(Into::into) + } + + pub fn get_collection_timestamp_sync( + &self, + params: params::GetCollectionTimestamp, + ) -> DbResult { + let user_id = params.user_id.legacy_id as u32; + let collection_id = self.get_collection_id(¶ms.collection)?; + if let Some(modified) = self + .session + .borrow() + .coll_modified_cache + .get(&(user_id, collection_id)) + { + return Ok(*modified); + } + user_collections::table + .select(user_collections::modified) + .filter(user_collections::user_id.eq(user_id as i64)) + .filter(user_collections::collection_id.eq(collection_id)) + .first(&self.conn) + .optional()? + .ok_or_else(DbError::collection_not_found) + } + + pub fn get_bso_timestamp_sync( + &self, + params: params::GetBsoTimestamp, + ) -> DbResult { + let user_id = params.user_id.legacy_id as i64; + let collection_id = self.get_collection_id(¶ms.collection)?; + let modified = bso::table + .select(bso::modified) + .filter(bso::user_id.eq(user_id)) + .filter(bso::collection_id.eq(&collection_id)) + .filter(bso::id.eq(¶ms.id)) + .first::(&self.conn) + .optional()? + .unwrap_or_default(); + SyncTimestamp::from_i64(modified).map_err(Into::into) + } + + pub fn get_collection_timestamps_sync( + &self, + user_id: UserIdentifier, + ) -> DbResult { + let modifieds = sql_query(format!( + "SELECT {collection_id}, {modified} + FROM user_collections + WHERE {user_id} = ? + AND {collection_id} != ?", + collection_id = COLLECTION_ID, + user_id = USER_ID, + modified = LAST_MODIFIED + )) + .bind::(user_id.legacy_id as i64) + .bind::(TOMBSTONE) + .load::(&self.conn)? + .into_iter() + .map(|cr| { + SyncTimestamp::from_i64(cr.last_modified) + .map(|ts| (cr.collection, ts)) + .map_err(Into::into) + }) + .collect::>>()?; + self.map_collection_names(modifieds) + } + + fn check_sync(&self) -> DbResult { + // Check if querying works + sql_query("SELECT 1").execute(&self.conn)?; + Ok(true) + } + + fn map_collection_names(&self, by_id: HashMap) -> DbResult> { + let mut names = self.load_collection_names(by_id.keys())?; + by_id + .into_iter() + .map(|(id, value)| { + names.remove(&id).map(|name| (name, value)).ok_or_else(|| { + DbError::internal("load_collection_names unknown collection id".to_owned()) + }) + }) + .collect() + } + + fn load_collection_names<'a>( + &self, + collection_ids: impl Iterator, + ) -> DbResult> { + let mut names = HashMap::new(); + let mut uncached = Vec::new(); + for &id in collection_ids { + if let Some(name) = self.coll_cache.get_name(id)? { + names.insert(id, name); + } else { + uncached.push(id); + } + } + + if !uncached.is_empty() { + let result = collections::table + .select((collections::id, collections::name)) + .filter(collections::id.eq_any(uncached)) + .load::<(i32, String)>(&self.conn)?; + + for (id, name) in result { + names.insert(id, name.clone()); + if !self.session.borrow().in_write_transaction { + self.coll_cache.put(id, name)?; + } + } + } + + Ok(names) + } + + pub(super) fn update_collection( + &self, + user_id: u32, + collection_id: i32, + ) -> DbResult { + let quota = if self.quota.enabled { + self.calc_quota_usage_sync(user_id, collection_id)? + } else { + results::GetQuotaUsage { + count: 0, + total_bytes: 0, + } + }; + let upsert = format!( + r#" + INSERT INTO user_collections ({user_id}, {collection_id}, {modified}, {total_bytes}, {count}) + VALUES (?, ?, ?, ?, ?) + ON CONFLICT({user_id}, {collection_id}) DO UPDATE SET + {modified} = ?, + {total_bytes} = ?, + {count} = ? + "#, + user_id = USER_ID, + collection_id = COLLECTION_ID, + modified = LAST_MODIFIED, + count = COUNT, + total_bytes = TOTAL_BYTES, + ); + let total_bytes = quota.total_bytes as i64; + sql_query(upsert) + .bind::(user_id as i64) + .bind::(&collection_id) + .bind::(&self.timestamp().as_i64()) + .bind::(&total_bytes) + .bind::("a.count) + .bind::(&self.timestamp().as_i64()) + .bind::(&total_bytes) + .bind::("a.count) + .execute(&self.conn)?; + Ok(self.timestamp()) + } + + // Perform a lighter weight "read only" storage size check + pub fn get_storage_usage_sync( + &self, + user_id: UserIdentifier, + ) -> DbResult { + let uid = user_id.legacy_id as i64; + let total_bytes = bso::table + .select(sql::>("SUM(LENGTH(payload))")) + .filter(bso::user_id.eq(uid)) + .filter(bso::expiry.gt(&self.timestamp().as_i64())) + .get_result::>(&self.conn)?; + Ok(total_bytes.unwrap_or_default() as u64) + } + + // Perform a lighter weight "read only" quota storage check + pub fn get_quota_usage_sync( + &self, + params: params::GetQuotaUsage, + ) -> DbResult { + let uid = params.user_id.legacy_id as i64; + let (total_bytes, count): (i64, i32) = user_collections::table + .select(( + sql::("COALESCE(SUM(COALESCE(total_bytes, 0)), 0)"), + sql::("COALESCE(SUM(COALESCE(count, 0)), 0)"), + )) + .filter(user_collections::user_id.eq(uid)) + .filter(user_collections::collection_id.eq(params.collection_id)) + .get_result(&self.conn) + .optional()? + .unwrap_or_default(); + Ok(results::GetQuotaUsage { + total_bytes: total_bytes as usize, + count, + }) + } + + // perform a heavier weight quota calculation + pub fn calc_quota_usage_sync( + &self, + user_id: u32, + collection_id: i32, + ) -> DbResult { + let (total_bytes, count): (i64, i32) = bso::table + .select(( + sql::(r#"COALESCE(SUM(LENGTH(COALESCE(payload, ""))),0)"#), + sql::("COALESCE(COUNT(*),0)"), + )) + .filter(bso::user_id.eq(user_id as i64)) + .filter(bso::expiry.gt(self.timestamp().as_i64())) + .filter(bso::collection_id.eq(collection_id)) + .get_result(&self.conn) + .optional()? + .unwrap_or_default(); + Ok(results::GetQuotaUsage { + total_bytes: total_bytes as usize, + count, + }) + } + + pub fn get_collection_usage_sync( + &self, + user_id: UserIdentifier, + ) -> DbResult { + let counts = bso::table + .select((bso::collection_id, sql::("SUM(LENGTH(payload))"))) + .filter(bso::user_id.eq(user_id.legacy_id as i64)) + .filter(bso::expiry.gt(&self.timestamp().as_i64())) + .group_by(bso::collection_id) + .load(&self.conn)? + .into_iter() + .collect(); + self.map_collection_names(counts) + } + + pub fn get_collection_counts_sync( + &self, + user_id: UserIdentifier, + ) -> DbResult { + let counts = bso::table + .select(( + bso::collection_id, + sql::(&format!( + "COUNT({collection_id})", + collection_id = COLLECTION_ID + )), + )) + .filter(bso::user_id.eq(user_id.legacy_id as i64)) + .filter(bso::expiry.gt(&self.timestamp().as_i64())) + .group_by(bso::collection_id) + .load(&self.conn)? + .into_iter() + .collect(); + self.map_collection_names(counts) + } + + batch_db_method!(create_batch_sync, create, CreateBatch); + batch_db_method!(validate_batch_sync, validate, ValidateBatch); + batch_db_method!(append_to_batch_sync, append, AppendToBatch); + batch_db_method!(commit_batch_sync, commit, CommitBatch); + batch_db_method!(delete_batch_sync, delete, DeleteBatch); + + pub fn get_batch_sync(&self, params: params::GetBatch) -> DbResult> { + batch::get(self, params) + } + + pub fn timestamp(&self) -> SyncTimestamp { + self.session.borrow().timestamp + } +} + +impl Db for SqliteDb { + type Error = DbError; + + fn commit(&self) -> DbFuture<'_, (), Self::Error> { + let db = self.clone(); + Box::pin(self.blocking_threadpool.spawn(move || db.commit_sync())) + } + + fn rollback(&self) -> DbFuture<'_, (), Self::Error> { + let db = self.clone(); + Box::pin(self.blocking_threadpool.spawn(move || db.rollback_sync())) + } + + fn begin(&self, for_write: bool) -> DbFuture<'_, (), Self::Error> { + let db = self.clone(); + Box::pin(async move { db.begin_async(for_write).map_err(Into::into).await }) + } + + fn check(&self) -> DbFuture<'_, results::Check, Self::Error> { + let db = self.clone(); + Box::pin(self.blocking_threadpool.spawn(move || db.check_sync())) + } + + sync_db_method!(lock_for_read, lock_for_read_sync, LockCollection); + sync_db_method!(lock_for_write, lock_for_write_sync, LockCollection); + sync_db_method!( + get_collection_timestamps, + get_collection_timestamps_sync, + GetCollectionTimestamps + ); + sync_db_method!( + get_collection_timestamp, + get_collection_timestamp_sync, + GetCollectionTimestamp + ); + sync_db_method!( + get_collection_counts, + get_collection_counts_sync, + GetCollectionCounts + ); + sync_db_method!( + get_collection_usage, + get_collection_usage_sync, + GetCollectionUsage + ); + sync_db_method!( + get_storage_timestamp, + get_storage_timestamp_sync, + GetStorageTimestamp + ); + sync_db_method!(get_storage_usage, get_storage_usage_sync, GetStorageUsage); + sync_db_method!(get_quota_usage, get_quota_usage_sync, GetQuotaUsage); + sync_db_method!(delete_storage, delete_storage_sync, DeleteStorage); + sync_db_method!(delete_collection, delete_collection_sync, DeleteCollection); + sync_db_method!(delete_bsos, delete_bsos_sync, DeleteBsos); + sync_db_method!(get_bsos, get_bsos_sync, GetBsos); + sync_db_method!(get_bso_ids, get_bso_ids_sync, GetBsoIds); + sync_db_method!(post_bsos, post_bsos_sync, PostBsos); + sync_db_method!(delete_bso, delete_bso_sync, DeleteBso); + sync_db_method!(get_bso, get_bso_sync, GetBso, Option); + sync_db_method!( + get_bso_timestamp, + get_bso_timestamp_sync, + GetBsoTimestamp, + results::GetBsoTimestamp + ); + sync_db_method!(put_bso, put_bso_sync, PutBso); + sync_db_method!(create_batch, create_batch_sync, CreateBatch); + sync_db_method!(validate_batch, validate_batch_sync, ValidateBatch); + sync_db_method!(append_to_batch, append_to_batch_sync, AppendToBatch); + sync_db_method!( + get_batch, + get_batch_sync, + GetBatch, + Option + ); + sync_db_method!(commit_batch, commit_batch_sync, CommitBatch); + + fn get_collection_id(&self, name: String) -> DbFuture<'_, i32, Self::Error> { + let db = self.clone(); + Box::pin( + self.blocking_threadpool + .spawn(move || db.get_collection_id(&name)), + ) + } + + fn get_connection_info(&self) -> results::ConnectionInfo { + results::ConnectionInfo::default() + } + + fn create_collection(&self, name: String) -> DbFuture<'_, i32, Self::Error> { + let db = self.clone(); + Box::pin( + self.blocking_threadpool + .spawn(move || db.get_or_create_collection_id(&name)), + ) + } + + fn update_collection( + &self, + param: params::UpdateCollection, + ) -> DbFuture<'_, SyncTimestamp, Self::Error> { + let db = self.clone(); + Box::pin(self.blocking_threadpool.spawn(move || { + db.update_collection(param.user_id.legacy_id as u32, param.collection_id) + })) + } + + fn timestamp(&self) -> SyncTimestamp { + self.timestamp() + } + + fn set_timestamp(&self, timestamp: SyncTimestamp) { + self.session.borrow_mut().timestamp = timestamp; + } + + sync_db_method!(delete_batch, delete_batch_sync, DeleteBatch); + + fn clear_coll_cache(&self) -> DbFuture<'_, (), Self::Error> { + let db = self.clone(); + Box::pin(self.blocking_threadpool.spawn(move || { + db.coll_cache.clear(); + Ok(()) + })) + } + + fn set_quota(&mut self, enabled: bool, limit: usize, enforced: bool) { + self.quota = Quota { + size: limit, + enabled, + enforced, + } + } + + fn box_clone(&self) -> Box> { + Box::new(self.clone()) + } +} + +#[derive(Debug, QueryableByName)] +struct IdResult { + #[sql_type = "Integer"] + id: i32, +} + +#[allow(dead_code)] // Not really dead, Rust can't see the use above +#[derive(Debug, QueryableByName)] +struct NameResult { + #[sql_type = "Text"] + name: String, +} + +#[derive(Debug, QueryableByName)] +struct UserCollectionsResult { + // Can't substitute column names here. + #[sql_type = "Integer"] + collection: i32, // COLLECTION_ID + #[sql_type = "BigInt"] + last_modified: i64, // LAST_MODIFIED +} diff --git a/syncstorage-sqlite/src/pool.rs b/syncstorage-sqlite/src/pool.rs new file mode 100644 index 0000000000..4a1326ca97 --- /dev/null +++ b/syncstorage-sqlite/src/pool.rs @@ -0,0 +1,234 @@ +use async_trait::async_trait; + +use std::{ + collections::HashMap, + fmt, + sync::{Arc, RwLock}, + time::Duration, +}; + +use diesel::{ + r2d2::{ConnectionManager, Pool}, + sqlite::SqliteConnection, + Connection, +}; +#[cfg(debug_assertions)] +use diesel_logger::LoggingConnection; +use syncserver_common::{BlockingThreadpool, Metrics}; +#[cfg(debug_assertions)] +use syncserver_db_common::test::TestTransactionCustomizer; +use syncserver_db_common::{GetPoolState, PoolState}; +use syncstorage_db_common::{Db, DbPool, STD_COLLS}; +use syncstorage_settings::{Quota, Settings}; +use syncstorage_sql_db_common::error::DbError; + +use super::{models::SqliteDb, DbResult}; + +embed_migrations!(); + +/// Run the diesel embedded migrations +/// +/// Sqlite DDL statements implicitly commit which could disrupt SqlitePool's +/// begin_test_transaction during tests. So this runs on its own separate conn. +fn run_embedded_migrations(database_url: &str) -> DbResult<()> { + let path = database_url + .strip_prefix("sqlite://") + .unwrap_or(database_url); + let conn = SqliteConnection::establish(path)?; + #[cfg(debug_assertions)] + // XXX: this doesn't show the DDL statements + // https://github.com/shssoichiro/diesel-logger/issues/1 + embedded_migrations::run(&LoggingConnection::new(conn))?; + #[cfg(not(debug_assertions))] + embedded_migrations::run(&conn)?; + Ok(()) +} + +#[derive(Clone)] +pub struct SqliteDbPool { + /// Pool of db connections + pool: Pool>, + /// Thread Pool for running synchronous db calls + /// In-memory cache of collection_ids and their names + coll_cache: Arc, + + metrics: Metrics, + quota: Quota, + blocking_threadpool: Arc, +} + +impl SqliteDbPool { + /// Creates a new pool of Sqlite db connections. + /// + /// Also initializes the Sqlite db, ensuring all migrations are ran. + pub fn new( + settings: &Settings, + metrics: &Metrics, + blocking_threadpool: Arc, + ) -> DbResult { + run_embedded_migrations(&settings.database_url)?; + Self::new_without_migrations(settings, metrics, blocking_threadpool) + } + + pub fn new_without_migrations( + settings: &Settings, + metrics: &Metrics, + blocking_threadpool: Arc, + ) -> DbResult { + let path = settings + .database_url + .strip_prefix("sqlite://") + .unwrap_or(&settings.database_url); + let manager = ConnectionManager::::new(path); + let builder = Pool::builder() + .max_size(settings.database_pool_max_size) + .connection_timeout(Duration::from_secs( + settings.database_pool_connection_timeout.unwrap_or(30) as u64, + )) + .idle_timeout(Some(Duration::from_secs(1))) // FIXME: This one should only be enabled in testing sqlite + .min_idle(settings.database_pool_min_idle); + + #[cfg(debug_assertions)] + let builder = if settings.database_use_test_transactions { + builder.connection_customizer(Box::new(TestTransactionCustomizer)) + } else { + builder + }; + + Ok(Self { + pool: builder.build(manager)?, + coll_cache: Default::default(), + metrics: metrics.clone(), + quota: Quota { + size: settings.limits.max_quota_limit as usize, + enabled: settings.enable_quota, + enforced: settings.enforce_quota, + }, + blocking_threadpool, + }) + } + + /// Spawn a task to periodically evict idle connections. Calls wrapper sweeper fn + /// to use pool.retain, retaining objects only if they are shorter in duration than + /// defined max_idle. Noop for mysql impl. + pub fn spawn_sweeper(&self, _interval: Duration) { + sweeper() + } + + pub fn get_sync(&self) -> DbResult { + Ok(SqliteDb::new( + self.pool.get()?, + Arc::clone(&self.coll_cache), + &self.metrics, + &self.quota, + self.blocking_threadpool.clone(), + )) + } +} + +/// Sweeper to retain only the objects specified within the closure. +/// In this context, if a Spanner connection is unutilized, we want it +/// to release the given connections. +/// See: https://docs.rs/deadpool/latest/deadpool/managed/struct.Pool.html#method.retain +/// Noop for mysql impl +fn sweeper() {} + +#[async_trait] +impl DbPool for SqliteDbPool { + type Error = DbError; + + async fn get<'a>(&'a self) -> DbResult>> { + let pool = self.clone(); + self.blocking_threadpool + .spawn(move || pool.get_sync()) + .await + .map(|db| Box::new(db) as Box>) + } + + fn validate_batch_id(&self, id: String) -> DbResult<()> { + super::batch::validate_batch_id(&id) + } + + fn box_clone(&self) -> Box> { + Box::new(self.clone()) + } +} + +impl fmt::Debug for SqliteDbPool { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("SqliteDbPool") + .field("coll_cache", &self.coll_cache) + .finish() + } +} + +impl GetPoolState for SqliteDbPool { + fn state(&self) -> PoolState { + self.pool.state().into() + } +} + +#[derive(Debug)] +pub(super) struct CollectionCache { + pub by_name: RwLock>, + pub by_id: RwLock>, +} + +impl CollectionCache { + pub fn put(&self, id: i32, name: String) -> DbResult<()> { + // XXX: should this emit a metric? + // XXX: should probably either lock both simultaneously during + // writes or use an RwLock alternative + self.by_name + .write() + .map_err(|_| DbError::internal("by_name write".to_owned()))? + .insert(name.clone(), id); + self.by_id + .write() + .map_err(|_| DbError::internal("by_id write".to_owned()))? + .insert(id, name); + Ok(()) + } + + pub fn get_id(&self, name: &str) -> DbResult> { + Ok(self + .by_name + .read() + .map_err(|_| DbError::internal("by_name read".to_owned()))? + .get(name) + .cloned()) + } + + pub fn get_name(&self, id: i32) -> DbResult> { + Ok(self + .by_id + .read() + .map_err(|_| DbError::internal("by_id read".to_owned()))? + .get(&id) + .cloned()) + } + + pub fn clear(&self) { + self.by_name.write().expect("by_name write").clear(); + self.by_id.write().expect("by_id write").clear(); + } +} + +impl Default for CollectionCache { + fn default() -> Self { + Self { + by_name: RwLock::new( + STD_COLLS + .iter() + .map(|(k, v)| ((*v).to_owned(), *k)) + .collect(), + ), + by_id: RwLock::new( + STD_COLLS + .iter() + .map(|(k, v)| (*k, (*v).to_owned())) + .collect(), + ), + } + } +} diff --git a/syncstorage-sqlite/src/schema.rs b/syncstorage-sqlite/src/schema.rs new file mode 100644 index 0000000000..9fb0be6a5c --- /dev/null +++ b/syncstorage-sqlite/src/schema.rs @@ -0,0 +1,71 @@ +table! { + batch_uploads (batch_id, user_id) { + #[sql_name="batch"] + batch_id -> Bigint, + #[sql_name="userid"] + user_id -> Bigint, + #[sql_name="collection"] + collection_id -> Integer, + } +} + +table! { + batch_upload_items (batch_id, user_id, id) { + #[sql_name="batch"] + batch_id -> Bigint, + #[sql_name="userid"] + user_id -> Bigint, + id -> Varchar, + sortindex -> Nullable, + payload -> Nullable, + payload_size -> Nullable, + ttl_offset -> Nullable, + } +} + +table! { + bso (user_id, collection_id, id) { + #[sql_name="userid"] + user_id -> BigInt, + #[sql_name="collection"] + collection_id -> Integer, + id -> Varchar, + sortindex -> Nullable, + payload -> Mediumtext, + // not used, but legacy + payload_size -> Bigint, + modified -> Bigint, + #[sql_name="ttl"] + expiry -> Bigint, + } +} + +table! { + collections (id) { + id -> Integer, + name -> Varchar, + } +} + +table! { + user_collections (user_id, collection_id) { + #[sql_name="userid"] + user_id -> BigInt, + #[sql_name="collection"] + collection_id -> Integer, + #[sql_name="last_modified"] + modified -> Bigint, + #[sql_name="count"] + count -> Integer, + #[sql_name="total_bytes"] + total_bytes -> BigInt, + } +} + +allow_tables_to_appear_in_same_query!( + batch_uploads, + batch_upload_items, + bso, + collections, + user_collections, +); diff --git a/syncstorage-sqlite/src/test.rs b/syncstorage-sqlite/src/test.rs new file mode 100644 index 0000000000..cbb85fbff7 --- /dev/null +++ b/syncstorage-sqlite/src/test.rs @@ -0,0 +1,80 @@ +use std::{collections::HashMap, sync::Arc}; + +use diesel::{ + // expression_methods::TextExpressionMethods, // See note below about `not_like` becoming swedish + ExpressionMethods, + QueryDsl, + RunQueryDsl, +}; +use syncserver_common::{BlockingThreadpool, Metrics}; +use syncserver_settings::Settings as SyncserverSettings; +use syncstorage_settings::Settings as SyncstorageSettings; +use url::Url; + +use crate::{models::SqliteDb, pool::SqliteDbPool, schema::collections, DbResult}; + +pub fn db(settings: &SyncstorageSettings) -> DbResult { + let _ = env_logger::try_init(); + // inherit SYNC_SYNCSTORAGE__DATABASE_URL from the env + + let pool = SqliteDbPool::new( + settings, + &Metrics::noop(), + Arc::new(BlockingThreadpool::new(1)), + )?; + pool.get_sync() +} + +#[test] +fn static_collection_id() -> DbResult<()> { + let settings = SyncserverSettings::test_settings().syncstorage; + if Url::parse(&settings.database_url).unwrap().scheme() != "sqlite" { + // Skip this test if we're not using mysql + return Ok(()); + } + let db = db(&settings)?; + + // ensure DB actually has predefined common collections + let cols: Vec<(i32, _)> = vec![ + (1, "clients"), + (2, "crypto"), + (3, "forms"), + (4, "history"), + (5, "keys"), + (6, "meta"), + (7, "bookmarks"), + (8, "prefs"), + (9, "tabs"), + (10, "passwords"), + (11, "addons"), + (12, "addresses"), + (13, "creditcards"), + ]; + // The integration tests can create collections that start + // with `xxx%`. We should not include those in our counts for local + // unit tests. + // Note: not sure why but as of 11/02/20, `.not_like("xxx%")` is apparently + // swedish-ci. Commenting that out for now. + let results: HashMap = collections::table + .select((collections::id, collections::name)) + .filter(collections::name.ne("")) + //.filter(collections::name.not_like("xxx%")) // from most integration tests + .filter(collections::name.ne("xxx_col2")) // from server::test + .filter(collections::name.ne("col2")) // from older intergration tests + .load(&db.inner.conn)? + .into_iter() + .collect(); + assert_eq!(results.len(), cols.len(), "mismatched columns"); + for (id, name) in &cols { + assert_eq!(results.get(id).unwrap(), name); + } + + for (id, name) in &cols { + let result = db.get_collection_id(name)?; + assert_eq!(result, *id); + } + + let cid = db.get_or_create_collection_id("col1")?; + assert!(cid >= 100); + Ok(()) +} diff --git a/tokenserver-common/src/lib.rs b/tokenserver-common/src/lib.rs index c03ea32964..48146e6ff1 100644 --- a/tokenserver-common/src/lib.rs +++ b/tokenserver-common/src/lib.rs @@ -10,6 +10,8 @@ pub enum NodeType { MySql, #[serde(rename = "spanner")] Spanner, + #[serde(rename = "sqlite")] + Sqlite, } impl NodeType { diff --git a/tokenserver-db-common/Cargo.toml b/tokenserver-db-common/Cargo.toml new file mode 100644 index 0000000000..94eb3d1c42 --- /dev/null +++ b/tokenserver-db-common/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "tokenserver-db-common" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] +backtrace.workspace = true +futures.workspace = true +http.workspace = true +serde.workspace = true +serde_derive.workspace = true +serde_json.workspace = true +slog-scope.workspace = true + +async-trait = "0.1.40" +diesel = { version = "1.4", features = ["mysql", "r2d2"] } +diesel_logger = "0.1.1" +diesel_migrations = { version = "1.4.0", features = ["mysql"] } +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common", features = ["sql"] } +thiserror = "1.0.26" +tokenserver-common = { path = "../tokenserver-common" } +tokenserver-settings = { path = "../tokenserver-settings" } +tokio = { workspace = true, features = ["macros", "sync"] } + +[dev-dependencies] +env_logger.workspace = true + +syncserver-settings = { path = "../syncserver-settings" } diff --git a/tokenserver-db/src/error.rs b/tokenserver-db-common/src/error.rs similarity index 94% rename from tokenserver-db/src/error.rs rename to tokenserver-db-common/src/error.rs index bff809f5d7..b6153565d3 100644 --- a/tokenserver-db/src/error.rs +++ b/tokenserver-db-common/src/error.rs @@ -7,8 +7,8 @@ use syncserver_db_common::error::SqlError; use thiserror::Error; use tokenserver_common::TokenserverError; -pub(crate) type DbFuture<'a, T> = syncserver_db_common::DbFuture<'a, T, DbError>; -pub(crate) type DbResult = Result; +pub type DbFuture<'a, T> = syncserver_db_common::DbFuture<'a, T, DbError>; +pub type DbResult = Result; /// An error type that represents any database-related errors that may occur while processing a /// tokenserver request. @@ -20,7 +20,7 @@ pub struct DbError { } impl DbError { - pub(crate) fn internal(msg: String) -> Self { + pub fn internal(msg: String) -> Self { DbErrorKind::Internal(msg).into() } } diff --git a/tokenserver-db-common/src/lib.rs b/tokenserver-db-common/src/lib.rs new file mode 100644 index 0000000000..a91e735174 --- /dev/null +++ b/tokenserver-db-common/src/lib.rs @@ -0,0 +1 @@ +pub mod error; diff --git a/tokenserver-db-mysql/Cargo.toml b/tokenserver-db-mysql/Cargo.toml new file mode 100644 index 0000000000..e7db08636f --- /dev/null +++ b/tokenserver-db-mysql/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "tokenserver-db-mysql" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] +backtrace.workspace = true +futures.workspace = true +http.workspace = true +serde.workspace = true +serde_derive.workspace = true +serde_json.workspace = true +slog-scope.workspace = true + +async-trait = "0.1.40" +diesel = { version = "1.4", features = ["mysql", "r2d2"] } +diesel_logger = "0.1.1" +diesel_migrations = { version = "1.4.0", features = ["mysql"] } +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common", features = ["sql"] } +thiserror = "1.0.26" +tokenserver-common = { path = "../tokenserver-common" } +tokenserver-db-common = { path = "../tokenserver-db-common" } +tokenserver-settings = { path = "../tokenserver-settings" } +tokio = { workspace = true, features = ["macros", "sync"] } + +[dev-dependencies] +env_logger.workspace = true + +syncserver-settings = { path = "../syncserver-settings" } diff --git a/tokenserver-db/migrations/2021-07-16-001122_init/down.sql b/tokenserver-db-mysql/migrations/2021-07-16-001122_init/down.sql similarity index 100% rename from tokenserver-db/migrations/2021-07-16-001122_init/down.sql rename to tokenserver-db-mysql/migrations/2021-07-16-001122_init/down.sql diff --git a/tokenserver-db/migrations/2021-07-16-001122_init/up.sql b/tokenserver-db-mysql/migrations/2021-07-16-001122_init/up.sql similarity index 100% rename from tokenserver-db/migrations/2021-07-16-001122_init/up.sql rename to tokenserver-db-mysql/migrations/2021-07-16-001122_init/up.sql diff --git a/tokenserver-db/migrations/2021-08-03-234845_populate_services/down.sql b/tokenserver-db-mysql/migrations/2021-08-03-234845_populate_services/down.sql similarity index 100% rename from tokenserver-db/migrations/2021-08-03-234845_populate_services/down.sql rename to tokenserver-db-mysql/migrations/2021-08-03-234845_populate_services/down.sql diff --git a/tokenserver-db/migrations/2021-08-03-234845_populate_services/up.sql b/tokenserver-db-mysql/migrations/2021-08-03-234845_populate_services/up.sql similarity index 100% rename from tokenserver-db/migrations/2021-08-03-234845_populate_services/up.sql rename to tokenserver-db-mysql/migrations/2021-08-03-234845_populate_services/up.sql diff --git a/tokenserver-db/migrations/2021-09-30-142643_remove_foreign_key_constraints/down.sql b/tokenserver-db-mysql/migrations/2021-09-30-142643_remove_foreign_key_constraints/down.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-142643_remove_foreign_key_constraints/down.sql rename to tokenserver-db-mysql/migrations/2021-09-30-142643_remove_foreign_key_constraints/down.sql diff --git a/tokenserver-db/migrations/2021-09-30-142643_remove_foreign_key_constraints/up.sql b/tokenserver-db-mysql/migrations/2021-09-30-142643_remove_foreign_key_constraints/up.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-142643_remove_foreign_key_constraints/up.sql rename to tokenserver-db-mysql/migrations/2021-09-30-142643_remove_foreign_key_constraints/up.sql diff --git a/tokenserver-db/migrations/2021-09-30-142654_remove_node_defaults/down.sql b/tokenserver-db-mysql/migrations/2021-09-30-142654_remove_node_defaults/down.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-142654_remove_node_defaults/down.sql rename to tokenserver-db-mysql/migrations/2021-09-30-142654_remove_node_defaults/down.sql diff --git a/tokenserver-db/migrations/2021-09-30-142654_remove_node_defaults/up.sql b/tokenserver-db-mysql/migrations/2021-09-30-142654_remove_node_defaults/up.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-142654_remove_node_defaults/up.sql rename to tokenserver-db-mysql/migrations/2021-09-30-142654_remove_node_defaults/up.sql diff --git a/tokenserver-db/migrations/2021-09-30-142746_add_indexes/down.sql b/tokenserver-db-mysql/migrations/2021-09-30-142746_add_indexes/down.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-142746_add_indexes/down.sql rename to tokenserver-db-mysql/migrations/2021-09-30-142746_add_indexes/down.sql diff --git a/tokenserver-db/migrations/2021-09-30-142746_add_indexes/up.sql b/tokenserver-db-mysql/migrations/2021-09-30-142746_add_indexes/up.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-142746_add_indexes/up.sql rename to tokenserver-db-mysql/migrations/2021-09-30-142746_add_indexes/up.sql diff --git a/tokenserver-db/migrations/2021-09-30-144043_remove_nodes_service_key/down.sql b/tokenserver-db-mysql/migrations/2021-09-30-144043_remove_nodes_service_key/down.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-144043_remove_nodes_service_key/down.sql rename to tokenserver-db-mysql/migrations/2021-09-30-144043_remove_nodes_service_key/down.sql diff --git a/tokenserver-db/migrations/2021-09-30-144043_remove_nodes_service_key/up.sql b/tokenserver-db-mysql/migrations/2021-09-30-144043_remove_nodes_service_key/up.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-144043_remove_nodes_service_key/up.sql rename to tokenserver-db-mysql/migrations/2021-09-30-144043_remove_nodes_service_key/up.sql diff --git a/tokenserver-db/migrations/2021-09-30-144225_remove_users_nodeid_key/down.sql b/tokenserver-db-mysql/migrations/2021-09-30-144225_remove_users_nodeid_key/down.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-144225_remove_users_nodeid_key/down.sql rename to tokenserver-db-mysql/migrations/2021-09-30-144225_remove_users_nodeid_key/down.sql diff --git a/tokenserver-db/migrations/2021-09-30-144225_remove_users_nodeid_key/up.sql b/tokenserver-db-mysql/migrations/2021-09-30-144225_remove_users_nodeid_key/up.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-144225_remove_users_nodeid_key/up.sql rename to tokenserver-db-mysql/migrations/2021-09-30-144225_remove_users_nodeid_key/up.sql diff --git a/tokenserver-db/migrations/2021-12-22-160451_remove_services/down.sql b/tokenserver-db-mysql/migrations/2021-12-22-160451_remove_services/down.sql similarity index 100% rename from tokenserver-db/migrations/2021-12-22-160451_remove_services/down.sql rename to tokenserver-db-mysql/migrations/2021-12-22-160451_remove_services/down.sql diff --git a/tokenserver-db/migrations/2021-12-22-160451_remove_services/up.sql b/tokenserver-db-mysql/migrations/2021-12-22-160451_remove_services/up.sql similarity index 100% rename from tokenserver-db/migrations/2021-12-22-160451_remove_services/up.sql rename to tokenserver-db-mysql/migrations/2021-12-22-160451_remove_services/up.sql diff --git a/tokenserver-db-mysql/src/lib.rs b/tokenserver-db-mysql/src/lib.rs new file mode 100644 index 0000000000..2e19539531 --- /dev/null +++ b/tokenserver-db-mysql/src/lib.rs @@ -0,0 +1,6 @@ +extern crate diesel; +#[macro_use] +extern crate diesel_migrations; + +pub mod models; +pub mod pool; diff --git a/tokenserver-db-mysql/src/models.rs b/tokenserver-db-mysql/src/models.rs new file mode 100644 index 0000000000..091c68de1a --- /dev/null +++ b/tokenserver-db-mysql/src/models.rs @@ -0,0 +1,129 @@ +pub const LAST_INSERT_ID_QUERY: &str = "SELECT LAST_INSERT_ID() AS id"; + +pub const GET_NODE_ID_SYNC_QUERY: &str = r#" +SELECT id +FROM nodes +WHERE service = ? +AND node = ?"#; + +pub const REPLACE_USERS_SYNC_QUERY: &str = r#" +UPDATE users +SET replaced_at = ? +WHERE service = ? +AND email = ? +AND replaced_at IS NULL +AND created_at < ?"#; + +pub const REPLACE_USER_SYNC_QUERY: &str = r#" +UPDATE users +SET replaced_at = ? +WHERE service = ? +AND uid = ?"#; + +// The `where` clause on this statement is designed as an extra layer of +// protection, to ensure that concurrent updates don't accidentally move +// timestamp fields backwards in time. The handling of `keys_changed_at` +// is additionally weird because we want to treat the default `NULL` value +// as zero. +pub const PUT_USER_SYNC_QUERY: &str = r#" +UPDATE users +SET generation = ?, +keys_changed_at = ? +WHERE service = ? +AND email = ? +AND generation <= ? +AND COALESCE(keys_changed_at, 0) <= COALESCE(?, keys_changed_at, 0) +AND replaced_at IS NULL"#; + +pub const POST_USER_SYNC_QUERY: &str = r#" +INSERT INTO users (service, email, generation, client_state, created_at, nodeid, keys_changed_at, replaced_at) +VALUES (?, ?, ?, ?, ?, ?, ?, NULL);"#; + +pub const CHECK_SYNC_QUERY: &str = "SHOW STATUS LIKE \"Uptime\""; + +pub const GET_BEST_NODE_QUERY: &str = r#" +SELECT id, node +FROM nodes +WHERE service = ? +AND available > 0 +AND capacity > current_load +AND downed = 0 +AND backoff = 0 +ORDER BY LOG(current_load) / LOG(capacity) +LIMIT 1"#; + +pub const GET_BEST_NODE_RELEASE_CAPACITY_QUERY: &str = r#" +UPDATE nodes +SET available = LEAST(capacity * ?, capacity - current_load) +WHERE service = ? +AND available <= 0 +AND capacity > current_load +AND downed = 0"#; + +pub const GET_BEST_NODE_SPANNER_QUERY: &str = r#" +SELECT id, node +FROM nodes +WHERE id = ? +LIMIT 1"#; + +pub const ADD_USER_TO_NODE_SYNC_QUERY: &str = r#" +UPDATE nodes +SET current_load = current_load + 1, +available = GREATEST(available - 1, 0) +WHERE service = ? +AND node = ?"#; + +pub const ADD_USER_TO_NODE_SYNC_SPANNER_QUERY: &str = r#" +UPDATE nodes +SET current_load = current_load + 1 +WHERE service = ? +AND node = ?"#; + +pub const GET_USERS_SYNC_QUERY: &str = r#" +SELECT uid, nodes.node, generation, keys_changed_at, client_state, created_at, replaced_at +FROM users +LEFT OUTER JOIN nodes ON users.nodeid = nodes.id +WHERE email = ? +AND users.service = ? +ORDER BY created_at DESC, uid DESC +LIMIT 20"#; + +pub const GET_SERVICE_ID_SYNC_QUERY: &str = r#" +SELECT id +FROM services +WHERE service = ?"#; + +pub const SET_USER_CREATED_AT_SYNC_QUERY: &str = r#" +UPDATE users +SET created_at = ? +WHERE uid = ?"#; + +pub const SET_USER_REPLACED_AT_SYNC_QUERY: &str = r#" +UPDATE users +SET replaced_at = ? +WHERE uid = ?"#; + +pub const GET_USER_SYNC_QUERY: &str = r#" +SELECT service, email, generation, client_state, replaced_at, nodeid, keys_changed_at +FROM users +WHERE uid = ?"#; + +pub const POST_NODE_SYNC_QUERY: &str = r#" +INSERT INTO nodes (service, node, available, current_load, capacity, downed, backoff) +VALUES (?, ?, ?, ?, ?, ?, ?)"#; + +pub const GET_NODE_SYNC_QUERY: &str = r#" +SELECT * +FROM nodes +WHERE id = ?"#; + +pub const UNASSIGNED_NODE_SYNC_QUERY: &str = r#" +UPDATE users +SET replaced_at = ? +WHERE nodeid = ?"#; + +pub const REMOVE_NODE_SYNC_QUERY: &str = "DELETE FROM nodes WHERE id = ?"; + +pub const POST_SERVICE_INSERT_SERVICE_QUERY: &str = r#" +INSERT INTO services (service, pattern) +VALUES (?, ?)"#; diff --git a/tokenserver-db-mysql/src/pool.rs b/tokenserver-db-mysql/src/pool.rs new file mode 100644 index 0000000000..685de5d423 --- /dev/null +++ b/tokenserver-db-mysql/src/pool.rs @@ -0,0 +1,20 @@ +use diesel::{mysql::MysqlConnection, Connection}; +use diesel_logger::LoggingConnection; +use tokenserver_db_common::error::DbResult; + +embed_migrations!(); + +/// Run the diesel embedded migrations +/// +/// Mysql DDL statements implicitly commit which could disrupt MysqlPool's +/// begin_test_transaction during tests. So this runs on its own separate conn. +pub fn run_embedded_migrations(database_url: &str) -> DbResult<()> { + let conn = MysqlConnection::establish(database_url)?; + #[cfg(debug_assertions)] + // XXX: this doesn't show the DDL statements + // https://github.com/shssoichiro/diesel-logger/issues/1 + embedded_migrations::run(&LoggingConnection::new(conn))?; + #[cfg(not(debug_assertions))] + embedded_migrations::run(&conn)?; + Ok(()) +} diff --git a/tokenserver-db-sqlite/Cargo.toml b/tokenserver-db-sqlite/Cargo.toml new file mode 100644 index 0000000000..c157eebed2 --- /dev/null +++ b/tokenserver-db-sqlite/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "tokenserver-db-sqlite" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] +backtrace.workspace = true +futures.workspace = true +http.workspace = true +serde.workspace = true +serde_derive.workspace = true +serde_json.workspace = true +slog-scope.workspace = true + +async-trait = "0.1.40" +diesel = { version = "1.4", features = ["sqlite", "r2d2"] } +diesel_logger = "0.1.1" +diesel_migrations = { version = "1.4.0", features = ["sqlite"] } +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common", features = ["sql"] } +thiserror = "1.0.26" +tokenserver-common = { path = "../tokenserver-common" } +tokenserver-db-common = { path = "../tokenserver-db-common"} +tokenserver-settings = { path = "../tokenserver-settings" } +tokio = { workspace = true, features = ["macros", "sync"] } + +[dev-dependencies] +env_logger.workspace = true + +syncserver-settings = { path = "../syncserver-settings" } diff --git a/tokenserver-db-sqlite/migrations/2024-01-28-211312_init/down.sql b/tokenserver-db-sqlite/migrations/2024-01-28-211312_init/down.sql new file mode 100644 index 0000000000..da49bf74a9 --- /dev/null +++ b/tokenserver-db-sqlite/migrations/2024-01-28-211312_init/down.sql @@ -0,0 +1,3 @@ +DROP TABLE IF EXISTS `users`; +DROP TABLE IF EXISTS `nodes`; +DROP TABLE IF EXISTS `services`; diff --git a/tokenserver-db-sqlite/migrations/2024-01-28-211312_init/up.sql b/tokenserver-db-sqlite/migrations/2024-01-28-211312_init/up.sql new file mode 100644 index 0000000000..78ee3929ce --- /dev/null +++ b/tokenserver-db-sqlite/migrations/2024-01-28-211312_init/up.sql @@ -0,0 +1,34 @@ +CREATE TABLE IF NOT EXISTS `services` ( + `id` INTEGER PRIMARY KEY, + `service` varchar(30) DEFAULT NULL UNIQUE, + `pattern` varchar(128) DEFAULT NULL +); + +CREATE TABLE IF NOT EXISTS `nodes` ( + `id` INTEGER PRIMARY KEY, + `service` int NOT NULL, + `node` varchar(64) NOT NULL, + `available` int NOT NULL, + `current_load` int NOT NULL, + `capacity` int NOT NULL, + `downed` int NOT NULL, + `backoff` int NOT NULL +); + +CREATE UNIQUE INDEX `unique_idx` ON `nodes` (`service`, `node`); + +CREATE TABLE IF NOT EXISTS `users` ( + `uid` INTEGER PRIMARY KEY, + `service` int NOT NULL, + `email` varchar(255) NOT NULL, + `generation` bigint NOT NULL, + `client_state` varchar(32) NOT NULL, + `created_at` bigint NOT NULL, + `replaced_at` bigint DEFAULT NULL, + `nodeid` bigint NOT NULL, + `keys_changed_at` bigint DEFAULT NULL +); + +CREATE INDEX `lookup_idx` ON `users` (`email`, `service`, `created_at`); +CREATE INDEX `replaced_at_idx` ON `users` (`service`, `replaced_at`); +CREATE INDEX `node_idx` ON `users` (`nodeid`); diff --git a/tokenserver-db-sqlite/src/lib.rs b/tokenserver-db-sqlite/src/lib.rs new file mode 100644 index 0000000000..2e19539531 --- /dev/null +++ b/tokenserver-db-sqlite/src/lib.rs @@ -0,0 +1,6 @@ +extern crate diesel; +#[macro_use] +extern crate diesel_migrations; + +pub mod models; +pub mod pool; diff --git a/tokenserver-db-sqlite/src/models.rs b/tokenserver-db-sqlite/src/models.rs new file mode 100644 index 0000000000..6c96173721 --- /dev/null +++ b/tokenserver-db-sqlite/src/models.rs @@ -0,0 +1,130 @@ +pub const LAST_INSERT_ID_QUERY: &str = "SELECT LAST_INSERT_ROWID() AS id"; + +pub const GET_NODE_ID_SYNC_QUERY: &str = r#" +SELECT rowid as id +FROM nodes +WHERE service = ? +AND node = ?"#; + +pub const REPLACE_USERS_SYNC_QUERY: &str = r#" +UPDATE users +SET replaced_at = ? +WHERE service = ? +AND email = ? +AND replaced_at IS NULL +AND created_at < ?"#; + +pub const REPLACE_USER_SYNC_QUERY: &str = r#" +UPDATE users +SET replaced_at = ? +WHERE service = ? +AND uid = ?"#; + +// The `where` clause on this statement is designed as an extra layer of +// protection, to ensure that concurrent updates don't accidentally move +// timestamp fields backwards in time. The handling of `keys_changed_at` +// is additionally weird because we want to treat the default `NULL` value +// as zero. +pub const PUT_USER_SYNC_QUERY: &str = r#" +UPDATE users +SET generation = ?, +keys_changed_at = ? +WHERE service = ? +AND email = ? +AND generation <= ? +AND COALESCE(keys_changed_at, 0) <= COALESCE(?, keys_changed_at, 0) +AND replaced_at IS NULL"#; + +pub const POST_USER_SYNC_QUERY: &str = r#" +INSERT INTO users (service, email, generation, client_state, created_at, nodeid, keys_changed_at, replaced_at) +VALUES (?, ?, ?, ?, ?, ?, ?, NULL);"#; + +pub const CHECK_SYNC_QUERY: &str = "SHOW STATUS LIKE \"Uptime\""; + +pub const GET_BEST_NODE_QUERY: &str = r#" +SELECT id, node +FROM nodes +WHERE service = ? +AND available > 0 +AND capacity > current_load +AND downed = 0 +AND backoff = 0 +ORDER BY LOG(current_load) / LOG(capacity) +LIMIT 1"#; + +pub const GET_BEST_NODE_RELEASE_CAPACITY_QUERY: &str = r#" +UPDATE nodes +SET available = MIN(capacity * ?, capacity - current_load) +WHERE service = ? +AND available <= 0 +AND capacity > current_load +AND downed = 0"#; + +// FIXME: MySQL specific +pub const GET_BEST_NODE_SPANNER_QUERY: &str = r#" +SELECT id, node +FROM nodes +WHERE id = ? +LIMIT 1"#; + +pub const ADD_USER_TO_NODE_SYNC_QUERY: &str = r#" +UPDATE nodes +SET current_load = current_load + 1, +available = MAX(available - 1, 0) +WHERE service = ? +AND node = ?"#; + +pub const ADD_USER_TO_NODE_SYNC_SPANNER_QUERY: &str = r#" +UPDATE nodes +SET current_load = current_load + 1 +WHERE service = ? +AND node = ?"#; + +pub const GET_USERS_SYNC_QUERY: &str = r#" +SELECT uid, nodes.node, generation, keys_changed_at, client_state, created_at, replaced_at +FROM users +LEFT OUTER JOIN nodes ON users.nodeid = nodes.id +WHERE email = ? +AND users.service = ? +ORDER BY created_at DESC, uid DESC +LIMIT 20"#; + +pub const GET_SERVICE_ID_SYNC_QUERY: &str = r#" +SELECT id +FROM services +WHERE service = ?"#; + +pub const SET_USER_CREATED_AT_SYNC_QUERY: &str = r#" +UPDATE users +SET created_at = ? +WHERE uid = ?"#; + +pub const SET_USER_REPLACED_AT_SYNC_QUERY: &str = r#" +UPDATE users +SET replaced_at = ? +WHERE uid = ?"#; + +pub const GET_USER_SYNC_QUERY: &str = r#" +SELECT service, email, generation, client_state, replaced_at, nodeid, keys_changed_at +FROM users +WHERE uid = ?"#; + +pub const POST_NODE_SYNC_QUERY: &str = r#" +INSERT INTO nodes (service, node, available, current_load, capacity, downed, backoff) +VALUES (?, ?, ?, ?, ?, ?, ?)"#; + +pub const GET_NODE_SYNC_QUERY: &str = r#" +SELECT * +FROM nodes +WHERE id = ?"#; + +pub const UNASSIGNED_NODE_SYNC_QUERY: &str = r#" +UPDATE users +SET replaced_at = ? +WHERE nodeid = ?"#; + +pub const REMOVE_NODE_SYNC_QUERY: &str = "DELETE FROM nodes WHERE id = ?"; + +pub const POST_SERVICE_INSERT_SERVICE_QUERY: &str = r#" +INSERT INTO services (service, pattern) +VALUES (?, ?)"#; diff --git a/tokenserver-db-sqlite/src/pool.rs b/tokenserver-db-sqlite/src/pool.rs new file mode 100644 index 0000000000..678e9ea427 --- /dev/null +++ b/tokenserver-db-sqlite/src/pool.rs @@ -0,0 +1,23 @@ +use diesel::{sqlite::SqliteConnection, Connection}; +use diesel_logger::LoggingConnection; +use tokenserver_db_common::error::DbResult; + +embed_migrations!(); + +/// Run the diesel embedded migrations +pub fn run_embedded_migrations(database_url: &str) -> DbResult<()> { + let path = database_url + .strip_prefix("sqlite://") + .unwrap_or(database_url); + + let conn = SqliteConnection::establish(path)?; + + #[cfg(debug_assertions)] + // XXX: this doesn't show the DDL statements + // https://github.com/shssoichiro/diesel-logger/issues/1 + embedded_migrations::run(&LoggingConnection::new(conn))?; + #[cfg(not(debug_assertions))] + embedded_migrations::run(&conn)?; + + Ok(()) +} diff --git a/tokenserver-db/Cargo.toml b/tokenserver-db/Cargo.toml index bd881a3ab8..c6b75227b9 100644 --- a/tokenserver-db/Cargo.toml +++ b/tokenserver-db/Cargo.toml @@ -21,12 +21,20 @@ diesel = { workspace = true, features = ["mysql", "r2d2"] } diesel_logger = { workspace = true } diesel_migrations = { workspace = true, features = ["mysql"] } syncserver-common = { path = "../syncserver-common" } -syncserver-db-common = { path = "../syncserver-db-common" } +syncserver-db-common = { path = "../syncserver-db-common", features = ["mysql", "sqlite"] } tokenserver-common = { path = "../tokenserver-common" } tokenserver-settings = { path = "../tokenserver-settings" } +tokenserver-db-common = { path = "../tokenserver-db-common" } +tokenserver-db-mysql = { path = "../tokenserver-db-mysql", optional = true} +tokenserver-db-sqlite = { path = "../tokenserver-db-sqlite", optional = true} tokio = { workspace = true, features = ["macros", "sync"] } [dev-dependencies] env_logger.workspace = true syncserver-settings = { path = "../syncserver-settings" } + +[features] +default = [] +mysql = ["tokenserver-db-mysql"] +sqlite = ["tokenserver-db-sqlite"] diff --git a/tokenserver-db/src/lib.rs b/tokenserver-db/src/lib.rs index 34447c3d59..4f5ae825b6 100644 --- a/tokenserver-db/src/lib.rs +++ b/tokenserver-db/src/lib.rs @@ -1,10 +1,14 @@ extern crate diesel; -#[macro_use] extern crate diesel_migrations; #[macro_use] extern crate slog_scope; -mod error; +use diesel::r2d2::{ConnectionManager, PooledConnection}; +#[cfg(feature = "mysql")] +use diesel::MysqlConnection; +#[cfg(feature = "sqlite")] +use diesel::SqliteConnection; + pub mod mock; mod models; pub mod params; @@ -13,3 +17,15 @@ pub mod results; pub use models::{Db, TokenserverDb}; pub use pool::{DbPool, TokenserverPool}; + +#[cfg(feature = "mysql")] +type Conn = MysqlConnection; +#[cfg(feature = "sqlite")] +type Conn = SqliteConnection; +type PooledConn = PooledConnection>; + +#[cfg(all(feature = "mysql", feature = "sqlite"))] +compile_error!("only one of the \"mysql\" and \"sqlite\" features can be enabled at a time"); + +#[cfg(not(any(feature = "mysql", feature = "sqlite")))] +compile_error!("exactly one of the \"mysql\", \"spanner\" and \"sqlite\" features must be enabled"); diff --git a/tokenserver-db/src/mock.rs b/tokenserver-db/src/mock.rs index 29041091d7..1e2b783f3e 100644 --- a/tokenserver-db/src/mock.rs +++ b/tokenserver-db/src/mock.rs @@ -3,8 +3,8 @@ use async_trait::async_trait; use futures::future; use syncserver_db_common::{GetPoolState, PoolState}; +use tokenserver_db_common::error::{DbError, DbFuture}; -use super::error::{DbError, DbFuture}; use super::models::Db; use super::params; use super::pool::DbPool; diff --git a/tokenserver-db/src/models.rs b/tokenserver-db/src/models.rs index 2e6ba32c19..177e0ebf1f 100644 --- a/tokenserver-db/src/models.rs +++ b/tokenserver-db/src/models.rs @@ -1,6 +1,9 @@ +use std::{ + sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; + use diesel::{ - mysql::MysqlConnection, - r2d2::{ConnectionManager, PooledConnection}, sql_types::{Bigint, Float, Integer, Nullable, Text}, OptionalExtension, RunQueryDsl, }; @@ -9,23 +12,18 @@ use diesel_logger::LoggingConnection; use http::StatusCode; use syncserver_common::{BlockingThreadpool, Metrics}; use syncserver_db_common::{sync_db_method, DbFuture}; +use tokenserver_db_common::error::{DbError, DbResult}; +#[cfg(feature = "mysql")] +use tokenserver_db_mysql::models::*; +#[cfg(feature = "sqlite")] +use tokenserver_db_sqlite::models::*; -use std::{ - sync::Arc, - time::{Duration, SystemTime, UNIX_EPOCH}, -}; - -use super::{ - error::{DbError, DbResult}, - params, results, -}; +use super::{params, results, PooledConn}; /// The maximum possible generation number. Used as a tombstone to mark users that have been /// "retired" from the db. const MAX_GENERATION: i64 = i64::MAX; -type Conn = PooledConnection>; - #[derive(Clone)] pub struct TokenserverDb { /// Synchronous Diesel calls are executed on a blocking threadpool to satisfy @@ -50,21 +48,20 @@ unsafe impl Send for TokenserverDb {} struct DbInner { #[cfg(not(test))] - pub(super) conn: Conn, + pub(super) conn: PooledConn, #[cfg(test)] - pub(super) conn: LoggingConnection, // display SQL when RUST_LOG="diesel_logger=trace" + pub(super) conn: LoggingConnection, // display SQL when RUST_LOG="diesel_logger=trace" } impl TokenserverDb { // Note that this only works because an instance of `TokenserverDb` has *exclusive access* to - // a connection from the r2d2 pool for its lifetime. `LAST_INSERT_ID()` returns the ID of the - // most recently-inserted record *for a given connection*. If connections were shared across - // requests, using this function would introduce a race condition, as we could potentially - // get IDs from records created during other requests. - const LAST_INSERT_ID_QUERY: &'static str = "SELECT LAST_INSERT_ID() AS id"; + // a connection from the r2d2 pool for its lifetime. `LAST_INSERT_ID_QUERY` + // returns the ID of the most recently-inserted record *for a given connection*. + // If connections were shared across requests, using this function would introduce a race condition, + // as we could potentially get IDs from records created during other requests. pub fn new( - conn: Conn, + conn: PooledConn, metrics: &Metrics, service_id: Option, spanner_node_id: Option, @@ -91,20 +88,13 @@ impl TokenserverDb { } fn get_node_id_sync(&self, params: params::GetNodeId) -> DbResult { - const QUERY: &str = r#" - SELECT id - FROM nodes - WHERE service = ? - AND node = ? - "#; - if let Some(id) = self.spanner_node_id { Ok(results::GetNodeId { id: id as i64 }) } else { let mut metrics = self.metrics.clone(); metrics.start_timer("storage.get_node_id", None); - diesel::sql_query(QUERY) + diesel::sql_query(GET_NODE_ID_SYNC_QUERY) .bind::(params.service_id) .bind::(¶ms.node) .get_result(&self.inner.conn) @@ -114,19 +104,10 @@ impl TokenserverDb { /// Mark users matching the given email and service ID as replaced. fn replace_users_sync(&self, params: params::ReplaceUsers) -> DbResult { - const QUERY: &str = r#" - UPDATE users - SET replaced_at = ? - WHERE service = ? - AND email = ? - AND replaced_at IS NULL - AND created_at < ? - "#; - let mut metrics = self.metrics.clone(); metrics.start_timer("storage.replace_users", None); - diesel::sql_query(QUERY) + diesel::sql_query(REPLACE_USERS_SYNC_QUERY) .bind::(params.replaced_at) .bind::(¶ms.service_id) .bind::(¶ms.email) @@ -138,14 +119,7 @@ impl TokenserverDb { /// Mark the user with the given uid and service ID as being replaced. fn replace_user_sync(&self, params: params::ReplaceUser) -> DbResult { - const QUERY: &str = r#" - UPDATE users - SET replaced_at = ? - WHERE service = ? - AND uid = ? - "#; - - diesel::sql_query(QUERY) + diesel::sql_query(REPLACE_USER_SYNC_QUERY) .bind::(params.replaced_at) .bind::(params.service_id) .bind::(params.uid) @@ -157,26 +131,10 @@ impl TokenserverDb { /// Update the user with the given email and service ID with the given `generation` and /// `keys_changed_at`. fn put_user_sync(&self, params: params::PutUser) -> DbResult { - // The `where` clause on this statement is designed as an extra layer of - // protection, to ensure that concurrent updates don't accidentally move - // timestamp fields backwards in time. The handling of `keys_changed_at` - // is additionally weird because we want to treat the default `NULL` value - // as zero. - const QUERY: &str = r#" - UPDATE users - SET generation = ?, - keys_changed_at = ? - WHERE service = ? - AND email = ? - AND generation <= ? - AND COALESCE(keys_changed_at, 0) <= COALESCE(?, keys_changed_at, 0) - AND replaced_at IS NULL - "#; - let mut metrics = self.metrics.clone(); metrics.start_timer("storage.put_user", None); - diesel::sql_query(QUERY) + diesel::sql_query(PUT_USER_SYNC_QUERY) .bind::(params.generation) .bind::, _>(params.keys_changed_at) .bind::(¶ms.service_id) @@ -190,15 +148,10 @@ impl TokenserverDb { /// Create a new user. fn post_user_sync(&self, user: params::PostUser) -> DbResult { - const QUERY: &str = r#" - INSERT INTO users (service, email, generation, client_state, created_at, nodeid, keys_changed_at, replaced_at) - VALUES (?, ?, ?, ?, ?, ?, ?, NULL); - "#; - let mut metrics = self.metrics.clone(); metrics.start_timer("storage.post_user", None); - diesel::sql_query(QUERY) + diesel::sql_query(POST_USER_SYNC_QUERY) .bind::(user.service_id) .bind::(&user.email) .bind::(user.generation) @@ -208,52 +161,26 @@ impl TokenserverDb { .bind::, _>(user.keys_changed_at) .execute(&self.inner.conn)?; - diesel::sql_query(Self::LAST_INSERT_ID_QUERY) - .bind::(&user.email) + diesel::sql_query(LAST_INSERT_ID_QUERY) .get_result::(&self.inner.conn) .map_err(Into::into) } fn check_sync(&self) -> DbResult { // has the database been up for more than 0 seconds? - let result = diesel::sql_query("SHOW STATUS LIKE \"Uptime\"").execute(&self.inner.conn)?; + let result = diesel::sql_query(CHECK_SYNC_QUERY).execute(&self.inner.conn)?; Ok(result as u64 > 0) } /// Gets the least-loaded node that has available slots. fn get_best_node_sync(&self, params: params::GetBestNode) -> DbResult { const DEFAULT_CAPACITY_RELEASE_RATE: f32 = 0.1; - const GET_BEST_NODE_QUERY: &str = r#" - SELECT id, node - FROM nodes - WHERE service = ? - AND available > 0 - AND capacity > current_load - AND downed = 0 - AND backoff = 0 - ORDER BY LOG(current_load) / LOG(capacity) - LIMIT 1 - "#; - const RELEASE_CAPACITY_QUERY: &str = r#" - UPDATE nodes - SET available = LEAST(capacity * ?, capacity - current_load) - WHERE service = ? - AND available <= 0 - AND capacity > current_load - AND downed = 0 - "#; - const SPANNER_QUERY: &str = r#" - SELECT id, node - FROM nodes - WHERE id = ? - LIMIT 1 - "#; let mut metrics = self.metrics.clone(); metrics.start_timer("storage.get_best_node", None); if let Some(spanner_node_id) = self.spanner_node_id { - diesel::sql_query(SPANNER_QUERY) + diesel::sql_query(GET_BEST_NODE_SPANNER_QUERY) .bind::(spanner_node_id) .get_result::(&self.inner.conn) .map_err(|e| { @@ -277,7 +204,7 @@ impl TokenserverDb { // There were no available nodes. Try to release additional capacity from any nodes // that are not fully occupied. - let affected_rows = diesel::sql_query(RELEASE_CAPACITY_QUERY) + let affected_rows = diesel::sql_query(GET_BEST_NODE_RELEASE_CAPACITY_QUERY) .bind::( params .capacity_release_rate @@ -305,24 +232,10 @@ impl TokenserverDb { let mut metrics = self.metrics.clone(); metrics.start_timer("storage.add_user_to_node", None); - const QUERY: &str = r#" - UPDATE nodes - SET current_load = current_load + 1, - available = GREATEST(available - 1, 0) - WHERE service = ? - AND node = ? - "#; - const SPANNER_QUERY: &str = r#" - UPDATE nodes - SET current_load = current_load + 1 - WHERE service = ? - AND node = ? - "#; - let query = if self.spanner_node_id.is_some() { - SPANNER_QUERY + ADD_USER_TO_NODE_SYNC_SPANNER_QUERY } else { - QUERY + ADD_USER_TO_NODE_SYNC_QUERY }; diesel::sql_query(query) @@ -337,18 +250,7 @@ impl TokenserverDb { let mut metrics = self.metrics.clone(); metrics.start_timer("storage.get_users", None); - const QUERY: &str = r#" - SELECT uid, nodes.node, generation, keys_changed_at, client_state, created_at, - replaced_at - FROM users - LEFT OUTER JOIN nodes ON users.nodeid = nodes.id - WHERE email = ? - AND users.service = ? - ORDER BY created_at DESC, uid DESC - LIMIT 20 - "#; - - diesel::sql_query(QUERY) + diesel::sql_query(GET_USERS_SYNC_QUERY) .bind::(¶ms.email) .bind::(params.service_id) .load::(&self.inner.conn) @@ -519,16 +421,10 @@ impl TokenserverDb { &self, params: params::GetServiceId, ) -> DbResult { - const QUERY: &str = r#" - SELECT id - FROM services - WHERE service = ? - "#; - if let Some(id) = self.service_id { Ok(results::GetServiceId { id }) } else { - diesel::sql_query(QUERY) + diesel::sql_query(GET_SERVICE_ID_SYNC_QUERY) .bind::(params.service) .get_result::(&self.inner.conn) .map_err(Into::into) @@ -540,12 +436,7 @@ impl TokenserverDb { &self, params: params::SetUserCreatedAt, ) -> DbResult { - const QUERY: &str = r#" - UPDATE users - SET created_at = ? - WHERE uid = ? - "#; - diesel::sql_query(QUERY) + diesel::sql_query(SET_USER_CREATED_AT_SYNC_QUERY) .bind::(params.created_at) .bind::(¶ms.uid) .execute(&self.inner.conn) @@ -558,12 +449,7 @@ impl TokenserverDb { &self, params: params::SetUserReplacedAt, ) -> DbResult { - const QUERY: &str = r#" - UPDATE users - SET replaced_at = ? - WHERE uid = ? - "#; - diesel::sql_query(QUERY) + diesel::sql_query(SET_USER_REPLACED_AT_SYNC_QUERY) .bind::(params.replaced_at) .bind::(¶ms.uid) .execute(&self.inner.conn) @@ -573,13 +459,7 @@ impl TokenserverDb { #[cfg(test)] fn get_user_sync(&self, params: params::GetUser) -> DbResult { - const QUERY: &str = r#" - SELECT service, email, generation, client_state, replaced_at, nodeid, keys_changed_at - FROM users - WHERE uid = ? - "#; - - diesel::sql_query(QUERY) + diesel::sql_query(GET_USER_SYNC_QUERY) .bind::(params.id) .get_result::(&self.inner.conn) .map_err(Into::into) @@ -587,11 +467,7 @@ impl TokenserverDb { #[cfg(test)] fn post_node_sync(&self, params: params::PostNode) -> DbResult { - const QUERY: &str = r#" - INSERT INTO nodes (service, node, available, current_load, capacity, downed, backoff) - VALUES (?, ?, ?, ?, ?, ?, ?) - "#; - diesel::sql_query(QUERY) + diesel::sql_query(POST_NODE_SYNC_QUERY) .bind::(params.service_id) .bind::(¶ms.node) .bind::(params.available) @@ -601,20 +477,14 @@ impl TokenserverDb { .bind::(params.backoff) .execute(&self.inner.conn)?; - diesel::sql_query(Self::LAST_INSERT_ID_QUERY) + diesel::sql_query(LAST_INSERT_ID_QUERY) .get_result::(&self.inner.conn) .map_err(Into::into) } #[cfg(test)] fn get_node_sync(&self, params: params::GetNode) -> DbResult { - const QUERY: &str = r#" - SELECT * - FROM nodes - WHERE id = ? - "#; - - diesel::sql_query(QUERY) + diesel::sql_query(GET_NODE_SYNC_QUERY) .bind::(params.id) .get_result::(&self.inner.conn) .map_err(Into::into) @@ -622,18 +492,12 @@ impl TokenserverDb { #[cfg(test)] fn unassign_node_sync(&self, params: params::UnassignNode) -> DbResult { - const QUERY: &str = r#" - UPDATE users - SET replaced_at = ? - WHERE nodeid = ? - "#; - let current_time = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_millis() as i64; - diesel::sql_query(QUERY) + diesel::sql_query(UNASSIGNED_NODE_SYNC_QUERY) .bind::(current_time) .bind::(params.node_id) .execute(&self.inner.conn) @@ -643,9 +507,7 @@ impl TokenserverDb { #[cfg(test)] fn remove_node_sync(&self, params: params::RemoveNode) -> DbResult { - const QUERY: &str = "DELETE FROM nodes WHERE id = ?"; - - diesel::sql_query(QUERY) + diesel::sql_query(REMOVE_NODE_SYNC_QUERY) .bind::(params.node_id) .execute(&self.inner.conn) .map(|_| ()) @@ -654,17 +516,12 @@ impl TokenserverDb { #[cfg(test)] fn post_service_sync(&self, params: params::PostService) -> DbResult { - const INSERT_SERVICE_QUERY: &str = r#" - INSERT INTO services (service, pattern) - VALUES (?, ?) - "#; - - diesel::sql_query(INSERT_SERVICE_QUERY) + diesel::sql_query(POST_SERVICE_INSERT_SERVICE_QUERY) .bind::(¶ms.service) .bind::(¶ms.pattern) .execute(&self.inner.conn)?; - diesel::sql_query(Self::LAST_INSERT_ID_QUERY) + diesel::sql_query(LAST_INSERT_ID_QUERY) .get_result::(&self.inner.conn) .map(|result| results::PostService { id: result.id as i32, diff --git a/tokenserver-db/src/pool.rs b/tokenserver-db/src/pool.rs index 5c8d9b5e96..fa4bc91f4d 100644 --- a/tokenserver-db/src/pool.rs +++ b/tokenserver-db/src/pool.rs @@ -1,41 +1,27 @@ use std::{sync::Arc, time::Duration}; use async_trait::async_trait; -use diesel::{ - mysql::MysqlConnection, - r2d2::{ConnectionManager, Pool}, - Connection, -}; -use diesel_logger::LoggingConnection; +use diesel::r2d2::{ConnectionManager, Pool}; use syncserver_common::{BlockingThreadpool, Metrics}; #[cfg(debug_assertions)] use syncserver_db_common::test::TestTransactionCustomizer; use syncserver_db_common::{GetPoolState, PoolState}; +use tokenserver_db_common::error::{DbError, DbResult}; +#[cfg(feature = "mysql")] +use tokenserver_db_mysql::pool::run_embedded_migrations; +#[cfg(feature = "sqlite")] +use tokenserver_db_sqlite::pool::run_embedded_migrations; use tokenserver_settings::Settings; use super::{ - error::{DbError, DbResult}, models::{Db, TokenserverDb}, + Conn, }; -embed_migrations!(); - -/// Run the diesel embedded migrations -/// -/// Mysql DDL statements implicitly commit which could disrupt MysqlPool's -/// begin_test_transaction during tests. So this runs on its own separate conn. -fn run_embedded_migrations(database_url: &str) -> DbResult<()> { - let conn = MysqlConnection::establish(database_url)?; - - embedded_migrations::run(&LoggingConnection::new(conn))?; - - Ok(()) -} - #[derive(Clone)] pub struct TokenserverPool { /// Pool of db connections - inner: Pool>, + inner: Pool>, metrics: Metrics, // This field is public so the service ID can be set after the pool is created pub service_id: Option, @@ -55,12 +41,19 @@ impl TokenserverPool { run_embedded_migrations(&settings.database_url)?; } - let manager = ConnectionManager::::new(settings.database_url.clone()); + // SQLite can't handle its uri prefix + let database_url = settings + .database_url + .strip_prefix("sqlite://") + .unwrap_or(&settings.database_url); + + let manager = ConnectionManager::::new(database_url); let builder = Pool::builder() .max_size(settings.database_pool_max_size) .connection_timeout(Duration::from_secs( settings.database_pool_connection_timeout.unwrap_or(30) as u64, )) + .idle_timeout(Some(Duration::from_secs(1))) // FIXME: This one should only be enabled in testing sqlite .min_idle(settings.database_pool_min_idle); #[cfg(debug_assertions)] diff --git a/tools/integration_tests/tokenserver/test_node_assignment.py b/tools/integration_tests/tokenserver/test_node_assignment.py index c8b4132649..61d574777f 100644 --- a/tools/integration_tests/tokenserver/test_node_assignment.py +++ b/tools/integration_tests/tokenserver/test_node_assignment.py @@ -43,9 +43,11 @@ def test_user_creation(self): self.assertEqual(self._count_users(), 1) def test_new_user_allocation(self): + self._db_connect() # Start with a clean database cursor = self._execute_sql('DELETE FROM nodes', ()) cursor.close() + self.database.close() self._add_node(available=100, current_load=0, capacity=100, backoff=1, node='https://node1') @@ -73,9 +75,11 @@ def test_new_user_allocation(self): self.assertEqual(node['available'], 98) def test_successfully_releasing_node_capacity(self): + self._db_connect() # Start with a clean database cursor = self._execute_sql('DELETE FROM nodes', ()) cursor.close() + self.database.close() node_id1 = self._add_node(available=0, current_load=99, capacity=100, node='https://node1') @@ -116,9 +120,12 @@ def test_successfully_releasing_node_capacity(self): self.assertEqual(node5['available'], 0) def test_unsuccessfully_releasing_node_capacity(self): + self._db_connect() + # Start with a clean database cursor = self._execute_sql('DELETE FROM nodes', ()) cursor.close() + self.database.close() self._add_node(available=0, current_load=100, capacity=100, node='https://node1') diff --git a/tools/integration_tests/tokenserver/test_support.py b/tools/integration_tests/tokenserver/test_support.py index 982066dab1..c459466d83 100644 --- a/tools/integration_tests/tokenserver/test_support.py +++ b/tools/integration_tests/tokenserver/test_support.py @@ -10,6 +10,7 @@ import urllib.parse as urlparse from sqlalchemy import create_engine +from sqlalchemy.pool import NullPool from tokenlib.utils import decode_token_bytes from webtest import TestApp @@ -29,10 +30,7 @@ def setUpClass(cls): cls._build_auth_headers = cls._build_oauth_headers def setUp(self): - engine = create_engine(os.environ['SYNC_TOKENSERVER__DATABASE_URL']) - self.database = engine. \ - execution_options(isolation_level='AUTOCOMMIT'). \ - connect() + self._db_connect() host_url = urlparse.urlparse(self.TOKENSERVER_HOST) self.app = TestApp(self.TOKENSERVER_HOST, extra_environ={ @@ -54,8 +52,11 @@ def setUp(self): # Ensure we have a node with enough capacity to run the tests. self._add_node(capacity=100, node=self.NODE_URL, id=self.NODE_ID) + self.database.close() def tearDown(self): + self._db_connect() + # And clean up at the end, for good measure. cursor = self._execute_sql(('DELETE FROM users'), ()) cursor.close() @@ -97,6 +98,7 @@ def _build_oauth_headers(self, generation=None, user='test', def _add_node(self, capacity=100, available=100, node=NODE_URL, id=None, current_load=0, backoff=0, downed=0): + self._db_connect() query = 'INSERT INTO nodes (service, node, available, capacity, \ current_load, backoff, downed' data = (self.service_id, node, available, capacity, current_load, @@ -110,15 +112,18 @@ def _add_node(self, capacity=100, available=100, node=NODE_URL, id=None, cursor = self._execute_sql(query, data) cursor.close() + self.database.close() return self._last_insert_id() def _get_node(self, id): + self._db_connect() query = 'SELECT * FROM nodes WHERE id=%s' cursor = self._execute_sql(query, (id,)) (id, service, node, available, current_load, capacity, downed, backoff) = cursor.fetchone() cursor.close() + self.database.close() return { 'id': id, @@ -132,23 +137,28 @@ def _get_node(self, id): } def _last_insert_id(self): + self._db_connect() cursor = self._execute_sql('SELECT LAST_INSERT_ID()', ()) (id,) = cursor.fetchone() cursor.close() + self.database.close() return id def _add_service(self, service_name, pattern): + self._db_connect() query = 'INSERT INTO services (service, pattern) \ VALUES(%s, %s)' cursor = self._execute_sql(query, (service_name, pattern)) cursor.close() + self.database.close() return self._last_insert_id() def _add_user(self, email=None, generation=1234, client_state='aaaa', created_at=None, nodeid=NODE_ID, keys_changed_at=1234, replaced_at=None): + self._db_connect() query = ''' INSERT INTO users (service, email, generation, client_state, \ created_at, nodeid, keys_changed_at, replaced_at) @@ -162,16 +172,19 @@ def _add_user(self, email=None, generation=1234, client_state='aaaa', created_at, nodeid, keys_changed_at, replaced_at)) cursor.close() + self.database.close() return self._last_insert_id() def _get_user(self, uid): + self._db_connect() query = 'SELECT * FROM users WHERE uid = %s' cursor = self._execute_sql(query, (uid,)) (uid, service, email, generation, client_state, created_at, replaced_at, nodeid, keys_changed_at) = cursor.fetchone() cursor.close() + self.database.close() return { 'uid': uid, @@ -186,6 +199,7 @@ def _get_user(self, uid): } def _get_replaced_users(self, service_id, email): + self._db_connect() query = 'SELECT * FROM users WHERE service = %s AND email = %s AND \ replaced_at IS NOT NULL' cursor = self._execute_sql(query, (service_id, email)) @@ -209,21 +223,26 @@ def _get_replaced_users(self, service_id, email): users.append(user_dict) cursor.close() + self.database.close() return users def _get_service_id(self, service): + self._db_connect() query = 'SELECT id FROM services WHERE service = %s' cursor = self._execute_sql(query, (service,)) (service_id,) = cursor.fetchone() cursor.close() + self.database.close() return service_id def _count_users(self): + self._db_connect() query = 'SELECT COUNT(DISTINCT(uid)) FROM users' cursor = self._execute_sql(query, ()) (count,) = cursor.fetchone() cursor.close() + self.database.close() return count @@ -232,6 +251,13 @@ def _execute_sql(self, query, args): return cursor + def _db_connect(self): + time.sleep(2) # FIXME: Don't waste time like that + engine = create_engine(os.environ['SYNC_TOKENSERVER__DATABASE_URL'], poolclass=NullPool) + self.database = engine. \ + execution_options(isolation_level='AUTOCOMMIT'). \ + connect() + def unsafelyParseToken(self, token): # For testing purposes, don't check HMAC or anything... return json.loads(decode_token_bytes(token)[:-32].decode('utf8'))