diff --git a/CHANGELOG.md b/CHANGELOG.md index 4377e6f725..b8231674c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,38 @@ + +## 0.5.6 (2020-08-11) + + +#### Features + +* More purge_ttl features (#776) ([59aa28a4](https://github.com/mozilla-services/syncstorage-rs/commit/59aa28a4e5fdcfe2acc3f767487066d30b998af0), closes [#735](https://github.com/mozilla-services/syncstorage-rs/issues/735), [#743](https://github.com/mozilla-services/syncstorage-rs/issues/743)) + +#### Bug Fixes + +* remove ubuntu target for grpcio (#775) ([7d1061f7](https://github.com/mozilla-services/syncstorage-rs/commit/7d1061f7197a56936a6cff9a438997640892d6c6), closes [#774](https://github.com/mozilla-services/syncstorage-rs/issues/774)) +* Return WeaveError::OverQuota for over quota responses (#773) ([38cd5ddd](https://github.com/mozilla-services/syncstorage-rs/commit/38cd5dddc36ae0aeda159fea88ba6128a8e85181), closes [#769](https://github.com/mozilla-services/syncstorage-rs/issues/769)) +* ensure an X-Last-Modified for /info/configuration (#761) ([36533f85](https://github.com/mozilla-services/syncstorage-rs/commit/36533f8566c39e8c82ccb5a2bc8ae62fb254129a), closes [#759](https://github.com/mozilla-services/syncstorage-rs/issues/759)) + + + + +### 0.5.5 (2020-08-06) + +#### Bug Fixes + +* set config env separator to double underscore. (#763) ([f1d88fea](https://github.com/mozilla-services/syncstorage-rs/commit/f1d88feae60d7fea15b7575ac2108f0f80ff42b4), closes [#762](https://github.com/mozilla-services/syncstorage-rs/issues/762)) + + + + +### 0.5.4 (2020-08-04) + + +#### Features + +* add debug_client check to BsoBodies for batch operations. ([1370df9d](https://github.com/mozilla-services/syncstorage-rs/commit/1370df9d7c2e6d656f50332b3f8615faafacead0) + + + ## 0.5.3 (2020-07-31) diff --git a/Cargo.lock b/Cargo.lock index 74b2a80ba7..7549eeea6c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -777,6 +777,20 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "deadpool" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4aaff9a7a1de9893f4004fa08527b31cb2ae4121c44e053cf53f29203c73bd23" +dependencies = [ + "async-trait", + "config", + "crossbeam-queue", + "num_cpus", + "serde 1.0.114", + "tokio", +] + [[package]] name = "debugid" version = "0.7.2" @@ -1994,9 +2008,9 @@ dependencies = [ [[package]] name = "protobuf" -version = "2.16.2" +version = "2.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d883f78645c21b7281d21305181aa1f4dd9e9363e7cf2566c93121552cff003e" +checksum = "cb14183cc7f213ee2410067e1ceeadba2a7478a59432ff0747a335202798b1e2" [[package]] name = "quick-error" @@ -2706,7 +2720,7 @@ dependencies = [ [[package]] name = "syncstorage" -version = "0.5.3" +version = "0.5.5" dependencies = [ "actix-cors", "actix-http", @@ -2719,6 +2733,7 @@ dependencies = [ "cadence", "chrono", "config", + "deadpool", "diesel", "diesel_logger", "diesel_migrations", @@ -2755,6 +2770,7 @@ dependencies = [ "time 0.2.16", "tokio", "url 2.1.1", + "urlencoding", "uuid", "validator", "validator_derive", @@ -3139,6 +3155,12 @@ dependencies = [ "serde 1.0.114", ] +[[package]] +name = "urlencoding" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9232eb53352b4442e40d7900465dfc534e8cb2dc8f18656fcb2ac16112b5593" + [[package]] name = "uuid" version = "0.8.1" diff --git a/Cargo.toml b/Cargo.toml index eb7ca5ac7c..102cb80688 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "syncstorage" -version = "0.5.3" +version = "0.5.5" license = "MPL-2.0" authors = [ "Ben Bangert ", @@ -26,6 +26,7 @@ bytes = "0.5" cadence = "0.20.0" chrono = "0.4" config = "0.10" +deadpool = "0.5.2" diesel = { version = "1.4.4", features = ["mysql", "r2d2"] } diesel_logger = "0.1.1" diesel_migrations = { version = "1.4.0", features = ["mysql"] } @@ -34,6 +35,9 @@ env_logger = "0.7.1" failure = "0.1.8" futures = { version = "0.3", features = ["compat"] } googleapis-raw = { version = "0", path = "vendor/mozilla-rust-sdk/googleapis-raw" } +# Some versions of OpenSSL 1.1.1 conflict with grpcio's built-in boringssl which can cause +# syncserver to either fail to either compile, or start. In those cases, try +# `cargo build --features grpcio/openssl ...` grpcio = { version = "0.6.0" } lazy_static = "1.4.0" hawk = "3.2" @@ -44,7 +48,7 @@ log = { version = "0.4.8", features = ["max_level_info", "release_max_level_info mime = "0.3" num_cpus = "1" # must match what's used by googleapis-raw -protobuf = "2.15" +protobuf = "2.17.0" rand = "0.7" regex = "1.3" sentry = { version = "0.19", features = ["with_curl_transport"] } @@ -62,6 +66,7 @@ slog-stdlog = "4.0" slog-term = "2.6" time = "0.2" url = "2.1" +urlencoding = "1.1" uuid = { version = "0.8.1", features = ["serde", "v4"] } validator = "0.10" validator_derive = "0.10" diff --git a/README.md b/README.md index e14433c665..256098474d 100644 --- a/README.md +++ b/README.md @@ -122,7 +122,7 @@ This requires access to the mozilla-rust-sdk which is now available at `/vendor/ ### Connecting to Firefox -This will walk you through the steps to connect this project to your local copy of Firefox. +This will walk you through the steps to connect this project to your local copy of Firefox. 1. Follow the steps outlined above for running this project using [MySQL](https://github.com/mozilla-services/syncstorage-rs#mysql). @@ -201,11 +201,13 @@ Open a PR after doing the following: Once your PR merges, then go ahead and create an official [GitHub release](https://github.com/mozilla-services/syncstorage-rs/releases). - ## Troubleshooting - `rm Cargo.lock; cargo clean;` - Try this if you're having problems compiling. +- Some versions of OpenSSL 1.1.1 can conflict with grpcio's built in BoringSSL. These errors can cause syncstorage to fail to run or compile. +If you see a problem related to `libssl` you may need to specify the `cargo` option `--features grpcio/openssl` to force grpcio to use OpenSSL. + ## Related Documentation - [API docs](https://mozilla-services.readthedocs.io/en/latest/storage/apis-1.5.html) diff --git a/src/db/results.rs b/src/db/results.rs index 3b063f44cf..93f49f706c 100644 --- a/src/db/results.rs +++ b/src/db/results.rs @@ -94,6 +94,15 @@ impl From for PoolState { } } +impl From for PoolState { + fn from(status: deadpool::Status) -> PoolState { + PoolState { + connections: status.size as u32, + idle_connections: status.available as u32, + } + } +} + #[cfg(test)] pub type GetCollectionId = i32; diff --git a/src/db/spanner/batch.rs b/src/db/spanner/batch.rs index 6869e4805d..66ba26b41f 100644 --- a/src/db/spanner/batch.rs +++ b/src/db/spanner/batch.rs @@ -18,7 +18,7 @@ use crate::{ }; pub async fn create_async( - db: &SpannerDb<'_>, + db: &SpannerDb, params: params::CreateBatch, ) -> Result { let batch_id = Uuid::new_v4().to_simple().to_string(); @@ -57,7 +57,7 @@ pub async fn create_async( Ok(batch_id) } -pub async fn validate_async(db: &SpannerDb<'_>, params: params::ValidateBatch) -> Result { +pub async fn validate_async(db: &SpannerDb, params: params::ValidateBatch) -> Result { let collection_id = db.get_collection_id_async(¶ms.collection).await?; let exists = db .sql( @@ -81,7 +81,7 @@ pub async fn validate_async(db: &SpannerDb<'_>, params: params::ValidateBatch) - Ok(exists.is_some()) } -pub async fn append_async(db: &SpannerDb<'_>, params: params::AppendToBatch) -> Result<()> { +pub async fn append_async(db: &SpannerDb, params: params::AppendToBatch) -> Result<()> { let mut metrics = db.metrics.clone(); metrics.start_timer("storage.spanner.append_items_to_batch", None); @@ -106,7 +106,7 @@ pub async fn append_async(db: &SpannerDb<'_>, params: params::AppendToBatch) -> } pub async fn get_async( - db: &SpannerDb<'_>, + db: &SpannerDb, params: params::GetBatch, ) -> Result> { let collection_id = db.get_collection_id_async(¶ms.collection).await?; @@ -142,7 +142,7 @@ pub async fn get_async( Ok(batch) } -pub async fn delete_async(db: &SpannerDb<'_>, params: params::DeleteBatch) -> Result<()> { +pub async fn delete_async(db: &SpannerDb, params: params::DeleteBatch) -> Result<()> { let collection_id = db.get_collection_id_async(¶ms.collection).await?; // Also deletes child batch_bsos rows (INTERLEAVE IN PARENT batches ON // DELETE CASCADE) @@ -165,7 +165,7 @@ pub async fn delete_async(db: &SpannerDb<'_>, params: params::DeleteBatch) -> Re } pub async fn commit_async( - db: &SpannerDb<'_>, + db: &SpannerDb, params: params::CommitBatch, ) -> Result { let mut metrics = db.metrics.clone(); @@ -239,7 +239,7 @@ pub async fn commit_async( } pub async fn do_append_async( - db: &SpannerDb<'_>, + db: &SpannerDb, user_id: HawkIdentifier, collection_id: i32, batch_id: String, @@ -335,7 +335,7 @@ pub async fn do_append_async( /// For the special case of a user creating a batch for a collection with no /// prior data. async fn pretouch_collection_async( - db: &SpannerDb<'_>, + db: &SpannerDb, user_id: &HawkIdentifier, collection_id: i32, ) -> Result<()> { diff --git a/src/db/spanner/manager.rs b/src/db/spanner/manager/bb8.rs similarity index 95% rename from src/db/spanner/manager.rs rename to src/db/spanner/manager/bb8.rs index 344625a968..b154889059 100644 --- a/src/db/spanner/manager.rs +++ b/src/db/spanner/manager/bb8.rs @@ -18,7 +18,7 @@ use crate::{ settings::Settings, }; -const SPANNER_ADDRESS: &str = "spanner.googleapis.com:443"; +pub const SPANNER_ADDRESS: &str = "spanner.googleapis.com:443"; pub struct SpannerConnectionManager { database_name: String, @@ -33,6 +33,7 @@ impl<_T> fmt::Debug for SpannerConnectionManager<_T> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("SpannerConnectionManager") .field("database_name", &self.database_name) + .field("test_transactions", &self.test_transactions) .finish() } } @@ -65,7 +66,7 @@ pub struct SpannerSession { pub client: SpannerClient, pub session: Session, - pub(super) use_test_transactions: bool, + pub(in crate::db::spanner) use_test_transactions: bool, } #[async_trait] @@ -130,7 +131,7 @@ impl ManageConnection } } -async fn create_session( +pub async fn create_session( client: &SpannerClient, database_name: &str, ) -> Result { diff --git a/src/db/spanner/manager/deadpool.rs b/src/db/spanner/manager/deadpool.rs new file mode 100644 index 0000000000..e75e7dd3ad --- /dev/null +++ b/src/db/spanner/manager/deadpool.rs @@ -0,0 +1,115 @@ +use std::{fmt, sync::Arc}; + +use actix_web::web::block; +use async_trait::async_trait; +use deadpool::managed::{RecycleError, RecycleResult}; +use googleapis_raw::spanner::v1::{spanner::GetSessionRequest, spanner_grpc::SpannerClient}; +use grpcio::{ChannelBuilder, ChannelCredentials, EnvBuilder, Environment}; + +use crate::{ + db::error::{DbError, DbErrorKind}, + server::metrics::Metrics, + settings::Settings, +}; + +use super::bb8::{create_session, SpannerSession, SPANNER_ADDRESS}; + +pub struct Manager { + database_name: String, + /// The gRPC environment + env: Arc, + metrics: Metrics, + test_transactions: bool, +} + +impl fmt::Debug for Manager { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Manager") + .field("database_name", &self.database_name) + .field("test_transactions", &self.test_transactions) + .finish() + } +} + +impl Manager { + pub fn new(settings: &Settings, metrics: &Metrics) -> Result { + let url = &settings.database_url; + if !url.starts_with("spanner://") { + Err(DbErrorKind::InvalidUrl(url.to_owned()))?; + } + let database_name = url["spanner://".len()..].to_owned(); + let env = Arc::new(EnvBuilder::new().build()); + + #[cfg(not(test))] + let test_transactions = false; + #[cfg(test)] + let test_transactions = settings.database_use_test_transactions; + + Ok(Manager { + database_name, + env, + metrics: metrics.clone(), + test_transactions, + }) + } +} + +#[async_trait] +impl deadpool::managed::Manager for Manager { + async fn create(&self) -> Result { + let env = self.env.clone(); + let mut metrics = self.metrics.clone(); + // XXX: issue732: Could google_default_credentials (or + // ChannelBuilder::secure_connect) block?! + let chan = block(move || -> Result { + metrics.start_timer("storage.pool.grpc_auth", None); + // Requires + // GOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account.json + let creds = ChannelCredentials::google_default_credentials()?; + Ok(ChannelBuilder::new(env) + .max_send_message_len(100 << 20) + .max_receive_message_len(100 << 20) + .secure_connect(SPANNER_ADDRESS, creds)) + }) + .await + .map_err(|e| match e { + actix_web::error::BlockingError::Error(e) => e.into(), + actix_web::error::BlockingError::Canceled => { + DbError::internal("web::block Manager operation canceled") + } + })?; + let client = SpannerClient::new(chan); + + // Connect to the instance and create a Spanner session. + let session = create_session(&client, &self.database_name).await?; + + Ok(SpannerSession { + client, + session, + use_test_transactions: self.test_transactions, + }) + } + + async fn recycle(&self, conn: &mut SpannerSession) -> RecycleResult { + let mut req = GetSessionRequest::new(); + req.set_name(conn.session.get_name().to_owned()); + if let Err(e) = conn + .client + .get_session_async(&req) + .map_err(|e| RecycleError::Backend(e.into()))? + .await + { + match e { + grpcio::Error::RpcFailure(ref status) + if status.status == grpcio::RpcStatusCode::NOT_FOUND => + { + conn.session = create_session(&conn.client, &self.database_name) + .await + .map_err(|e| RecycleError::Backend(e.into()))?; + } + _ => return Err(RecycleError::Backend(e.into())), + } + } + Ok(()) + } +} diff --git a/src/db/spanner/manager/mod.rs b/src/db/spanner/manager/mod.rs new file mode 100644 index 0000000000..d29dca6f4b --- /dev/null +++ b/src/db/spanner/manager/mod.rs @@ -0,0 +1,2 @@ +pub mod bb8; +pub mod deadpool; diff --git a/src/db/spanner/models.rs b/src/db/spanner/models.rs index b45d1b9e31..cba4bbb8ee 100644 --- a/src/db/spanner/models.rs +++ b/src/db/spanner/models.rs @@ -7,47 +7,46 @@ use std::{ sync::Arc, }; -use bb8::PooledConnection; use futures::future::TryFutureExt; -use googleapis_raw::spanner::v1::transaction::{ - self, TransactionOptions, TransactionOptions_ReadOnly, TransactionOptions_ReadWrite, -}; use googleapis_raw::spanner::v1::{ mutation::{Mutation, Mutation_Write}, spanner::{BeginTransactionRequest, CommitRequest, ExecuteSqlRequest, RollbackRequest}, + transaction::{ + TransactionOptions, TransactionOptions_ReadOnly, TransactionOptions_ReadWrite, + TransactionSelector, + }, type_pb::TypeCode, }; #[allow(unused_imports)] use protobuf::{well_known_types::ListValue, Message, RepeatedField}; -use super::manager::{SpannerConnectionManager, SpannerSession}; -use super::pool::CollectionCache; - -use crate::db::{ - error::{DbError, DbErrorKind}, - params, results, - spanner::support::{as_type, StreamedResultSetAsync}, - util::SyncTimestamp, - Db, DbFuture, Sorting, FIRST_CUSTOM_COLLECTION_ID, +use crate::{ + db::{ + error::{DbError, DbErrorKind}, + params, results, + util::SyncTimestamp, + Db, DbFuture, Sorting, FIRST_CUSTOM_COLLECTION_ID, + }, + server::metrics::Metrics, + web::extractors::{BsoQueryParams, HawkIdentifier, Offset}, }; -use crate::server::metrics::Metrics; -use crate::web::extractors::{BsoQueryParams, HawkIdentifier, Offset}; -use super::support::{bso_to_insert_row, bso_to_update_row}; use super::{ batch, - support::{as_list_value, as_value, bso_from_row, ExecuteSqlRequestBuilder}, + pool::{CollectionCache, Conn}, + support::{ + as_list_value, as_type, as_value, bso_from_row, ExecuteSqlRequestBuilder, + StreamedResultSetAsync, + }, + support::{bso_to_insert_row, bso_to_update_row}, }; -pub type TransactionSelector = transaction::TransactionSelector; - #[derive(Debug, Eq, PartialEq)] pub enum CollectionLock { Read, Write, } -pub(super) type Conn<'a> = PooledConnection<'a, SpannerConnectionManager>; pub type Result = std::result::Result; /// The ttl to use for rows that are never supposed to expire (in seconds) @@ -81,8 +80,8 @@ struct SpannerDbSession { } #[derive(Clone, Debug)] -pub struct SpannerDb<'a> { - pub(super) inner: Arc>, +pub struct SpannerDb { + pub(super) inner: Arc, /// Pool level cache of collection_ids and their names coll_cache: Arc, @@ -90,28 +89,28 @@ pub struct SpannerDb<'a> { pub metrics: Metrics, } -pub struct SpannerDbInner<'a> { - pub(super) conn: Conn<'a>, +pub struct SpannerDbInner { + pub(super) conn: Conn, session: RefCell, } -impl fmt::Debug for SpannerDbInner<'_> { +impl fmt::Debug for SpannerDbInner { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "SpannerDbInner") } } -impl<'a> Deref for SpannerDb<'a> { - type Target = SpannerDbInner<'a>; +impl Deref for SpannerDb { + type Target = SpannerDbInner; fn deref(&self) -> &Self::Target { &self.inner } } -impl<'a> SpannerDb<'a> { - pub fn new(conn: Conn<'a>, coll_cache: Arc, metrics: &Metrics) -> Self { +impl SpannerDb { + pub fn new(conn: Conn, coll_cache: Arc, metrics: &Metrics) -> Self { let inner = SpannerDbInner { conn, session: RefCell::new(Default::default()), @@ -1605,7 +1604,7 @@ impl<'a> SpannerDb<'a> { } } -impl<'a> Db<'a> for SpannerDb<'a> { +impl<'a> Db<'a> for SpannerDb { fn commit(&self) -> DbFuture<'_, ()> { let db = self.clone(); Box::pin(async move { db.commit_async().map_err(Into::into).await }) diff --git a/src/db/spanner/pool.rs b/src/db/spanner/pool.rs index 1d0311595d..74150e09dd 100644 --- a/src/db/spanner/pool.rs +++ b/src/db/spanner/pool.rs @@ -1,5 +1,5 @@ use async_trait::async_trait; -use bb8::Pool; +use bb8::ErrorSink; use std::{ collections::HashMap, @@ -12,10 +12,12 @@ use crate::db::{error::DbError, results, Db, DbPool, STD_COLLS}; use crate::server::metrics::Metrics; use crate::settings::Settings; -use super::manager::{SpannerConnectionManager, SpannerSession}; +use super::manager::bb8::SpannerSession; use super::models::SpannerDb; use crate::error::ApiResult; +pub(super) type Conn = deadpool::managed::Object; + embed_migrations!(); /// Run the diesel embedded migrations @@ -30,7 +32,8 @@ embed_migrations!(); #[derive(Clone)] pub struct SpannerDbPool { /// Pool of db connections - pool: Pool>, + //pool: Pool>, + pool: deadpool::managed::Pool, /// In-memory cache of collection_ids and their names coll_cache: Arc, @@ -45,23 +48,24 @@ impl SpannerDbPool { } pub async fn new_without_migrations(settings: &Settings, metrics: &Metrics) -> Result { - let manager = SpannerConnectionManager::::new(settings, metrics)?; let max_size = settings.database_pool_max_size.unwrap_or(10); - let builder = bb8::Pool::builder() - .max_size(max_size) - .min_idle(settings.database_pool_min_idle); + let manager = super::manager::deadpool::Manager::new(settings, metrics)?; + let config = deadpool::managed::PoolConfig::new(max_size as usize); + let pool = deadpool::managed::Pool::from_config(manager, config); Ok(Self { - pool: builder.build(manager).await?, + pool, coll_cache: Default::default(), metrics: metrics.clone(), }) } - pub async fn get_async(&self) -> Result> { + pub async fn get_async(&self) -> Result { let conn = self.pool.get().await.map_err(|e| match e { - bb8::RunError::User(dbe) => dbe, - bb8::RunError::TimedOut => DbError::internal("bb8:TimedOut"), + deadpool::managed::PoolError::Backend(dbe) => dbe, + deadpool::managed::PoolError::Timeout(timeout_type) => { + DbError::internal(&format!("deadpool Timeout: {:?}", timeout_type)) + } })?; Ok(SpannerDb::new( conn, @@ -81,7 +85,7 @@ impl DbPool for SpannerDbPool { } fn state(&self) -> results::PoolState { - self.pool.state().into() + self.pool.status().into() } fn validate_batch_id(&self, id: String) -> Result<()> { @@ -164,3 +168,19 @@ impl Default for CollectionCache { } } } + +/// Logs internal bb8 errors +#[derive(Debug, Clone, Copy)] +pub struct LoggingErrorSink; + +impl ErrorSink for LoggingErrorSink { + fn sink(&self, e: E) { + error!("bb8 Error: {}", e); + let event = sentry::integrations::failure::event_from_fail(&e); + sentry::capture_event(event); + } + + fn boxed_clone(&self) -> Box> { + Box::new(*self) + } +} diff --git a/src/db/spanner/support.rs b/src/db/spanner/support.rs index 2a9b1715b5..6c6ca0ccfd 100644 --- a/src/db/spanner/support.rs +++ b/src/db/spanner/support.rs @@ -16,14 +16,16 @@ use protobuf::{ RepeatedField, }; -use super::models::{Conn, Result}; -use crate::db::{results, util::SyncTimestamp, DbError, DbErrorKind}; - use crate::{ - db::{params, spanner::models::DEFAULT_BSO_TTL, util::to_rfc3339}, + db::{ + params, results, spanner::models::DEFAULT_BSO_TTL, util::to_rfc3339, util::SyncTimestamp, + DbError, DbErrorKind, + }, web::extractors::HawkIdentifier, }; +use super::{models::Result, pool::Conn}; + pub fn as_value(string_value: String) -> Value { let mut value = Value::new(); value.set_string_value(string_value); @@ -86,7 +88,7 @@ impl ExecuteSqlRequestBuilder { self } - fn prepare_request(self, conn: &Conn<'_>) -> ExecuteSqlRequest { + fn prepare_request(self, conn: &Conn) -> ExecuteSqlRequest { let mut request = self.execute_sql; request.set_session(conn.session.get_name().to_owned()); if let Some(params) = self.params { @@ -101,7 +103,7 @@ impl ExecuteSqlRequestBuilder { } /// Execute a SQL read statement but return a non-blocking streaming result - pub fn execute_async(self, conn: &Conn<'_>) -> Result { + pub fn execute_async(self, conn: &Conn) -> Result { let stream = conn .client .execute_streaming_sql(&self.prepare_request(conn))?; @@ -109,7 +111,7 @@ impl ExecuteSqlRequestBuilder { } /// Execute a DML statement, returning the exact count of modified rows - pub async fn execute_dml_async(self, conn: &Conn<'_>) -> Result { + pub async fn execute_dml_async(self, conn: &Conn) -> Result { let rs = conn .client .execute_sql_async(&self.prepare_request(conn))? diff --git a/src/db/transaction.rs b/src/db/transaction.rs index f4b2154da1..e5f9e9eb48 100644 --- a/src/db/transaction.rs +++ b/src/db/transaction.rs @@ -21,7 +21,6 @@ use std::future::Future; #[derive(Clone)] pub struct DbTransactionPool { pool: Box, - lock_collection: Option, is_read: bool, tags: Tags, user_id: HawkIdentifier, @@ -48,10 +47,10 @@ impl DbTransactionPool { let db2 = db.clone(); // Lock for transaction - let result = match (self.lock_collection.clone(), self.is_read) { + let result = match (self.get_lock_collection(), self.is_read) { (Some(lc), true) => db.lock_for_read(lc).await, (Some(lc), false) => db.lock_for_write(lc).await, - _ => Ok(()), + (None, is_read) => db.begin(!is_read).await, }; // Handle lock error @@ -156,6 +155,16 @@ impl DbTransactionPool { }; Ok(resp) } + + /// Create a lock collection if there is a collection to lock + fn get_lock_collection(&self) -> Option { + self.collection + .clone() + .map(|collection| params::LockCollection { + collection, + user_id: self.user_id.clone(), + }) + } } impl FromRequest for DbTransactionPool { @@ -193,7 +202,7 @@ impl FromRequest for DbTransactionPool { } }; let collection = match col_result { - Ok(v) => v, + Ok(v) => v.map(|collection| collection.collection), Err(e) => { // Semi-example to show how to use metrics inside of middleware. Metrics::from(state.as_ref()).incr("sync.error.collectionParam"); @@ -212,25 +221,13 @@ impl FromRequest for DbTransactionPool { let bso = BsoParam::extrude(req.head(), &mut req.extensions_mut()).ok(); let bso_opt = bso.map(|b| b.bso); - let (lc, is_read) = if let Some(collection) = collection { - let lc = params::LockCollection { - user_id: user_id.clone(), - collection: collection.collection, - }; - let is_read = match method { - Method::GET | Method::HEAD => true, - _ => false, - }; - - (Some(lc), is_read) - } else { - (None, true) + let is_read = match method { + Method::GET | Method::HEAD => true, + _ => false, }; - let collection = lc.as_ref().map(|c| c.collection.clone()); let precondition = PreConditionHeaderOpt::extrude(&req.headers(), Some(tags.clone()))?; let pool = Self { pool: state.db_pool.clone(), - lock_collection: lc, is_read, tags, user_id, diff --git a/src/error.rs b/src/error.rs index 68eef6dc74..061f5f417f 100644 --- a/src/error.rs +++ b/src/error.rs @@ -13,7 +13,6 @@ use actix_web::{ error::ResponseError, http::StatusCode, middleware::errhandlers::ErrorHandlerResponse, - web::Data, HttpResponse, Result, }; use failure::{Backtrace, Context, Fail}; @@ -25,7 +24,7 @@ use serde::{ use crate::db::error::{DbError, DbErrorKind}; use crate::server::metrics::Metrics; use crate::server::ServerState; -use crate::web::error::{HawkError, ValidationError, ValidationErrorKind}; +use crate::web::error::{HawkError, HawkErrorKind, ValidationError, ValidationErrorKind}; use crate::web::extractors::RequestErrorLocation; /// Legacy Sync 1.1 error codes, which Sync 1.5 also returns by replacing the descriptive JSON @@ -125,14 +124,19 @@ impl ApiError { DbErrorKind::Conflict => return false, _ => (), }, + ApiErrorKind::Hawk(hawke) => match hawke.kind() { + HawkErrorKind::MissingHeader => return false, + HawkErrorKind::InvalidHeader => return false, + _ => (), + }, _ => (), } true } - pub fn on_response(&self, state: &Data) { + pub fn on_response(&self, state: &ServerState) { if self.is_conflict() { - Metrics::from(state.as_ref()).incr("storage.confict") + Metrics::from(state).incr("storage.confict") } } @@ -145,8 +149,10 @@ impl ApiError { name, ref _tags, ) => { - if description == "size-limit-exceeded" { - return WeaveError::SizeLimitExceeded; + match description.as_ref() { + "over-quota" => return WeaveError::OverQuota, + "size-limit-exceeded" => return WeaveError::SizeLimitExceeded, + _ => {} } let name = name.clone().unwrap_or_else(|| "".to_owned()); if *location == RequestErrorLocation::Body diff --git a/src/server/mod.rs b/src/server/mod.rs index 1cc2887c41..d9644ef299 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -2,11 +2,6 @@ use std::{sync::Arc, time::Duration}; -use crate::db::{pool_from_settings, spawn_pool_periodic_reporter, DbPool}; -use crate::error::ApiError; -use crate::server::metrics::Metrics; -use crate::settings::{Secrets, ServerLimits, Settings}; -use crate::web::{handlers, middleware, tokenserver}; use actix_cors::Cors; use actix_web::{ dev, http::StatusCode, middleware::errhandlers::ErrorHandlers, web, App, HttpRequest, @@ -14,6 +9,12 @@ use actix_web::{ }; use cadence::StatsdClient; +use crate::db::{pool_from_settings, spawn_pool_periodic_reporter, DbPool}; +use crate::error::ApiError; +use crate::server::metrics::Metrics; +use crate::settings::{Secrets, ServerLimits, Settings}; +use crate::web::{handlers, middleware, tokenserver}; + pub const BSO_ID_REGEX: &str = r"[ -~]{1,64}"; pub const COLLECTION_ID_REGEX: &str = r"[a-zA-Z0-9._-]{1,32}"; const MYSQL_UID_REGEX: &str = r"[0-9]{1,10}"; @@ -32,6 +33,9 @@ pub struct ServerState { /// Server-enforced limits for request payloads. pub limits: Arc, + /// limits rendered as JSON + pub limits_json: String, + /// Secrets used during Hawk authentication. pub secrets: Arc, @@ -154,26 +158,33 @@ impl Server { let metrics = metrics::metrics_from_opts(&settings)?; let db_pool = pool_from_settings(&settings, &Metrics::from(&metrics)).await?; let limits = Arc::new(settings.limits); + let limits_json = + serde_json::to_string(&*limits).expect("ServerLimits failed to serialize"); let secrets = Arc::new(settings.master_secret); let port = settings.port; spawn_pool_periodic_reporter(Duration::from_secs(10), metrics.clone(), db_pool.clone())?; - let server = HttpServer::new(move || { + let mut server = HttpServer::new(move || { // Setup the server state let state = ServerState { db_pool: db_pool.clone(), limits: Arc::clone(&limits), + limits_json: limits_json.clone(), secrets: Arc::clone(&secrets), metrics: Box::new(metrics.clone()), port, }; build_app!(state, limits) - }) - .bind(format!("{}:{}", settings.host, settings.port)) - .expect("Could not get Server in Server::with_settings") - .run(); + }); + if let Some(keep_alive) = settings.actix_keep_alive { + server = server.keep_alive(keep_alive as usize); + } + let server = server + .bind(format!("{}:{}", settings.host, settings.port)) + .expect("Could not get Server in Server::with_settings") + .run(); Ok(server) } } diff --git a/src/server/test.rs b/src/server/test.rs index 061ac4356a..e3619ad46f 100644 --- a/src/server/test.rs +++ b/src/server/test.rs @@ -23,8 +23,7 @@ use crate::db::pool_from_settings; use crate::db::results::{DeleteBso, GetBso, PostBsos, PutBso}; use crate::db::util::SyncTimestamp; use crate::settings::{Secrets, ServerLimits}; -use crate::web::auth::HawkPayload; -use crate::web::extractors::BsoBody; +use crate::web::{auth::HawkPayload, extractors::BsoBody, X_LAST_MODIFIED}; lazy_static! { static ref SERVER_LIMITS: Arc = Arc::new(ServerLimits::default()); @@ -70,6 +69,7 @@ async fn get_test_state(settings: &Settings) -> ServerState { .await .expect("Could not get db_pool in get_test_state"), limits: Arc::clone(&SERVER_LIMITS), + limits_json: serde_json::to_string(&**SERVER_LIMITS).unwrap(), secrets: Arc::clone(&SECRETS), metrics: Box::new(metrics), port: settings.port, @@ -604,3 +604,20 @@ async fn reject_old_ios() { let body = String::from_utf8(test::read_body(response).await.to_vec()).unwrap(); assert_eq!(body, "0"); } + +#[actix_rt::test] +async fn info_configuration_xlm() { + let mut app = init_app!().await; + let req = + create_request(http::Method::GET, "/1.5/42/info/configuration", None, None).to_request(); + let response = app.call(req).await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + let xlm = response.headers().get(X_LAST_MODIFIED); + assert!(xlm.is_some()); + assert_eq!( + xlm.unwrap() + .to_str() + .expect("Couldn't parse X-Last-Modified"), + "0.00" + ); +} diff --git a/src/settings.rs b/src/settings.rs index ba9d9b2748..17b22d35b3 100644 --- a/src/settings.rs +++ b/src/settings.rs @@ -32,6 +32,8 @@ pub struct Settings { #[cfg(test)] pub database_use_test_transactions: bool, + pub actix_keep_alive: Option, + /// Server-enforced limits for request payloads. pub limits: ServerLimits, @@ -57,6 +59,7 @@ impl Default for Settings { database_pool_min_idle: None, #[cfg(test)] database_use_test_transactions: false, + actix_keep_alive: None, limits: ServerLimits::default(), master_secret: Secrets::default(), statsd_host: None, @@ -108,7 +111,12 @@ impl Settings { } // Merge the environment overrides - s.merge(Environment::with_prefix(PREFIX))?; + // While the prefix is currently case insensitive, it's traditional that + // environment vars be UPPERCASE, this ensures that will continue should + // Environment ever change their policy about case insensitivity. + // This will accept environment variables specified as + // `SYNC_FOO__BAR_VALUE="gorp"` as `foo.bar_value = "gorp"` + s.merge(Environment::with_prefix(&PREFIX.to_uppercase()).separator("__"))?; Ok(match s.try_into::() { Ok(s) => { diff --git a/src/web/error.rs b/src/web/error.rs index 7bb21f0c32..a675211b28 100644 --- a/src/web/error.rs +++ b/src/web/error.rs @@ -24,6 +24,12 @@ pub struct HawkError { inner: Context, } +impl HawkError { + pub fn kind(&self) -> &HawkErrorKind { + self.inner.get_context() + } +} + /// Causes of HAWK errors. #[derive(Debug, Fail)] pub enum HawkErrorKind { diff --git a/src/web/extractors.rs b/src/web/extractors.rs index bc8bac010c..4a0896671f 100644 --- a/src/web/extractors.rs +++ b/src/web/extractors.rs @@ -28,9 +28,9 @@ use validator::{Validate, ValidationError}; use crate::db::transaction::DbTransactionPool; use crate::db::{util::SyncTimestamp, DbPool, Sorting}; -use crate::error::ApiError; +use crate::error::{ApiError, ApiErrorKind}; use crate::server::{metrics, ServerState, BSO_ID_REGEX, COLLECTION_ID_REGEX}; -use crate::settings::{Secrets, ServerLimits}; +use crate::settings::Secrets; use crate::web::{ auth::HawkPayload, error::{HawkErrorKind, ValidationErrorKind}, @@ -66,6 +66,14 @@ pub struct UidParam { uid: u64, } +fn urldecode(s: &str) -> Result { + let decoded: String = urlencoding::decode(s).map_err(|e| { + debug!("unclean entry: {:?} {:?}", s, e); + ApiErrorKind::Internal(e.to_string()) + })?; + Ok(decoded) +} + #[derive(Clone, Debug, Deserialize, Validate)] pub struct BatchBsoBody { #[validate(custom = "validate_body_bso_id")] @@ -227,6 +235,34 @@ impl FromRequest for BsoBodies { )); } }; + + // ### debug_client + if let Some(uids) = &state.limits.debug_client { + for uid in uids.split(',') { + debug!("### checking uaid: {:?}", &uid); + match u64::from_str(uid.trim()) { + Ok(v) => { + if v == HawkIdentifier::uid_from_path(req.uri(), None).unwrap_or(0) { + debug!("### returning quota exceeded."); + error!("Returning over quota for {:?}", v); + return Box::pin(future::err( + ValidationErrorKind::FromDetails( + "over-quota".to_owned(), + RequestErrorLocation::Unknown, + Some("over-quota".to_owned()), + None, + ) + .into(), + )); + } + } + Err(_) => { + debug!("{:?} is not a u64", uid); + } + }; + } + } + let max_payload_size = state.limits.max_record_payload_bytes as usize; let max_post_bytes = state.limits.max_post_bytes as usize; @@ -403,9 +439,9 @@ impl FromRequest for BsoBody { error!("Returning over quota for {:?}", v); return Box::pin(future::err( ValidationErrorKind::FromDetails( - "size-limit-exceeded".to_owned(), + "over-quota".to_owned(), RequestErrorLocation::Unknown, - Some("size-limit-exceeded".to_owned()), + Some("over-quota".to_owned()), None, ) .into(), @@ -475,13 +511,12 @@ pub struct BsoParam { } impl BsoParam { - pub fn bsoparam_from_path(uri: &Uri, tags: &Tags) -> Result { + fn bsoparam_from_path(uri: &Uri, tags: &Tags) -> Result { // TODO: replace with proper path parser // path: "/1.5/{uid}/storage/{collection}/{bso}" let elements: Vec<&str> = uri.path().split('/').collect(); let elem = elements.get(3); if elem.is_none() || elem != Some(&"storage") || elements.len() != 6 { - warn!("⚠️ Unexpected BSO URI: {:?}", uri.path(); tags); return Err(ValidationErrorKind::FromDetails( "Invalid BSO".to_owned(), RequestErrorLocation::Path, @@ -490,8 +525,17 @@ impl BsoParam { ))?; } if let Some(v) = elements.get(5) { - let sv = String::from_str(v).map_err(|_| { - warn!("⚠️ Invalid BsoParam Error: {:?}", v; tags); + let sv = urldecode(&String::from_str(v).map_err(|e| { + warn!("⚠️ Invalid BsoParam Error: {:?} {:?}", v, e; tags); + ValidationErrorKind::FromDetails( + "Invalid BSO".to_owned(), + RequestErrorLocation::Path, + Some("bso".to_owned()), + Some(tags.clone()), + ) + })?) + .map_err(|e| { + warn!("⚠️ Invalid BsoParam Error: {:?} {:?}", v, e; tags); ValidationErrorKind::FromDetails( "Invalid BSO".to_owned(), RequestErrorLocation::Path, @@ -557,7 +601,7 @@ impl CollectionParam { return Ok(None); } if let Some(v) = elements.get(4) { - let sv = String::from_str(v).map_err(|_e| { + let mut sv = String::from_str(v).map_err(|_e| { ValidationErrorKind::FromDetails( "Missing Collection".to_owned(), RequestErrorLocation::Path, @@ -565,6 +609,14 @@ impl CollectionParam { Some(tags.clone()), ) })?; + sv = urldecode(&sv).map_err(|_e| { + ValidationErrorKind::FromDetails( + "Invalid Collection".to_owned(), + RequestErrorLocation::Path, + Some("collection".to_owned()), + Some(tags.clone()), + ) + })?; Ok(Some(Self { collection: sv })) } else { Err(ValidationErrorKind::FromDetails( @@ -714,7 +766,7 @@ impl FromRequest for CollectionRequest { "application/json" | "" => ReplyFormat::Json, _ => { return Err(ValidationErrorKind::FromDetails( - "Invalid accept".to_string(), + format!("Invalid Accept header specified: {:?}", accept), RequestErrorLocation::Header, Some("accept".to_string()), Some(tags), @@ -927,56 +979,6 @@ impl FromRequest for BsoPutRequest { } } -#[derive(Debug, Default, Serialize)] -pub struct ConfigRequest { - pub limits: ServerLimits, -} - -impl FromRequest for ConfigRequest { - type Config = (); - type Error = Error; - type Future = LocalBoxFuture<'static, Result>; - - fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future { - let tags = { - let exts = req.extensions(); - match exts.get::() { - Some(t) => t.clone(), - None => Tags::from_request_head(req.head()), - } - }; - - let state = match req.app_data::>() { - Some(s) => s, - None => { - error!("⚠️ Could not load the app state"); - return Box::pin(future::err( - ValidationErrorKind::FromDetails( - "Internal error".to_owned(), - RequestErrorLocation::Unknown, - Some("state".to_owned()), - Some(tags), - ) - .into(), - )); - } - }; - - let data = &state.limits; - Box::pin(future::ok(Self { - limits: ServerLimits { - max_post_bytes: data.max_post_bytes, - max_post_records: data.max_post_records, - max_record_payload_bytes: data.max_record_payload_bytes, - max_request_bytes: data.max_request_bytes, - max_total_bytes: data.max_total_bytes, - max_total_records: data.max_total_records, - debug_client: None, - }, - })) - } -} - #[derive(Clone, Debug)] pub struct HeartbeatRequest { pub headers: HeaderMap, @@ -1085,7 +1087,20 @@ impl HawkIdentifier { // path: "/1.5/{uid}" let elements: Vec<&str> = uri.path().split('/').collect(); if let Some(v) = elements.get(2) { - u64::from_str(v).map_err(|e| { + let clean = match urldecode(v) { + Err(e) => { + warn!("⚠️ HawkIdentifier Error invalid UID {:?} {:?}", v, e); + return Err(ValidationErrorKind::FromDetails( + "Invalid UID".to_owned(), + RequestErrorLocation::Path, + Some("uid".to_owned()), + tags, + ) + .into()); + } + Ok(v) => v, + }; + u64::from_str(&clean).map_err(|e| { warn!("⚠️ HawkIdentifier Error invalid UID {:?} {:?}", v, e); ValidationErrorKind::FromDetails( "Invalid UID".to_owned(), @@ -1812,6 +1827,7 @@ mod tests { ServerState { db_pool: Box::new(MockDbPool::new()), limits: Arc::clone(&SERVER_LIMITS), + limits_json: serde_json::to_string(&**SERVER_LIMITS).unwrap(), secrets: Arc::clone(&SECRETS), port: 8000, metrics: Box::new(metrics::metrics_from_opts(&settings).unwrap()), @@ -2110,6 +2126,31 @@ mod tests { assert_eq!(&result.collection, "tabs"); } + #[test] + fn test_quoted_bso() { + let payload = HawkPayload::test_default(*USER_ID); + let altered_bso = format!("\"{{{}}}\"", *USER_ID); + let state = make_state(); + let uri = format!( + "/1.5/{}/storage/tabs/{}", + *USER_ID, + urlencoding::encode(&altered_bso) + ); + let header = create_valid_hawk_header(&payload, &state, "GET", &uri, TEST_HOST, TEST_PORT); + let req = TestRequest::with_uri(&uri) + .data(state) + .header("authorization", header) + .header("accept", "application/json,text/plain:q=0.5") + .method(Method::GET) + .to_http_request(); + req.extensions_mut().insert(make_db()); + let result = block_on(BsoRequest::extract(&req)) + .expect("Could not get result in test_valid_collection_request"); + // make sure the altered bsoid matches the unaltered one, without the quotes and cury braces. + assert_eq!(result.user_id.legacy_id, *USER_ID); + assert_eq!(altered_bso.as_str(), result.bso); + } + #[test] fn test_invalid_collection_request() { let hawk_payload = HawkPayload::test_default(*USER_ID); diff --git a/src/web/handlers.rs b/src/web/handlers.rs index 92a4b5b453..6b8194bd3e 100644 --- a/src/web/handlers.rs +++ b/src/web/handlers.rs @@ -1,18 +1,25 @@ //! API Handlers use std::collections::HashMap; -use actix_web::{http::StatusCode, Error, HttpRequest, HttpResponse}; +use actix_web::{http::StatusCode, web::Data, Error, HttpRequest, HttpResponse}; use serde::Serialize; use serde_json::{json, Value}; -use crate::db::transaction::DbTransactionPool; -use crate::db::{params, results::Paginated, util::SyncTimestamp, Db, DbError, DbErrorKind}; -use crate::error::{ApiError, ApiErrorKind, ApiResult}; -use crate::web::extractors::{ - BsoPutRequest, BsoRequest, CollectionPostRequest, CollectionRequest, ConfigRequest, - HeartbeatRequest, MetaRequest, ReplyFormat, TestErrorRequest, +use crate::{ + db::{ + params, results::Paginated, transaction::DbTransactionPool, util::SyncTimestamp, Db, + DbError, DbErrorKind, + }, + error::{ApiError, ApiErrorKind, ApiResult}, + server::ServerState, + web::{ + extractors::{ + BsoPutRequest, BsoRequest, CollectionPostRequest, CollectionRequest, HeartbeatRequest, + MetaRequest, ReplyFormat, TestErrorRequest, + }, + X_LAST_MODIFIED, X_WEAVE_NEXT_OFFSET, X_WEAVE_RECORDS, + }, }; -use crate::web::{X_LAST_MODIFIED, X_WEAVE_NEXT_OFFSET, X_WEAVE_RECORDS}; pub const ONE_KB: f64 = 1024.0; @@ -89,11 +96,6 @@ pub async fn delete_all( db_pool .transaction_http(|db| async move { meta.metrics.incr("request.delete_all"); - // transaction_http won't implicitly begin a write transaction - // for DELETE /storage because it lacks a collection. So it's done - // manually here, partly to not further complicate the unit test's - // transactions - db.begin(true).await?; Ok(HttpResponse::Ok().json(db.delete_storage(meta.user_id).await?)) }) .await @@ -443,8 +445,17 @@ pub async fn put_bso( .await } -pub async fn get_configuration(creq: ConfigRequest) -> Result { - Ok(HttpResponse::Ok().json(creq.limits)) +pub fn get_configuration(state: Data) -> HttpResponse { + // With no DbConnection (via a `transaction_http` call) needed here, we + // miss out on a couple things it does: + // 1. Ensuring an X-Last-Modified (always 0.00) is returned + // 2. Handling precondition checks + // The precondition checks don't make sense against hardcoded to the + // service limits data + a 0.00 timestamp, so just ensure #1 is handled + HttpResponse::Ok() + .header(X_LAST_MODIFIED, "0.00") + .content_type("application/json") + .body(&state.limits_json) } /** Returns a status message indicating the state of the current server diff --git a/src/web/middleware/sentry.rs b/src/web/middleware/sentry.rs index 4746133613..20c5d741a0 100644 --- a/src/web/middleware/sentry.rs +++ b/src/web/middleware/sentry.rs @@ -100,7 +100,6 @@ where fn call(&mut self, sreq: ServiceRequest) -> Self::Future { let mut tags = Tags::from_request_head(sreq.head()); - let uri = sreq.head().uri.to_string(); sreq.extensions_mut().insert(tags.clone()); Box::pin(self.service.call(sreq).and_then(move |mut sresp| { @@ -119,8 +118,6 @@ where tags.tags.insert(k, v); } }; - // add the uri.path (which can cause influx to puke) - tags.extra.insert("uri.path".to_owned(), uri); match sresp.response().error() { None => { // Middleware errors are eaten by current versions of Actix. Errors are now added @@ -149,7 +146,7 @@ where Some(e) => { if let Some(apie) = e.as_error::() { if let Some(state) = sresp.request().app_data::>() { - apie.on_response(state); + apie.on_response(state.as_ref()); }; if !apie.is_reportable() { debug!("Not reporting error to sentry: {:?}", apie); diff --git a/src/web/tags.rs b/src/web/tags.rs index f38401003d..d5923331ad 100644 --- a/src/web/tags.rs +++ b/src/web/tags.rs @@ -57,6 +57,7 @@ impl Tags { // Return an Option<> type because the later consumers (ApiErrors) presume that // tags are optional and wrapped by an Option<> type. let mut tags = HashMap::new(); + let mut extra = HashMap::new(); if let Some(ua) = req_head.headers().get(USER_AGENT) { if let Ok(uas) = ua.to_str() { let (ua_result, metrics_os, metrics_browser) = parse_user_agent(uas); @@ -65,14 +66,14 @@ impl Tags { insert_if_not_empty("ua.name", ua_result.name, &mut tags); insert_if_not_empty("ua.os.ver", &ua_result.os_version.to_owned(), &mut tags); insert_if_not_empty("ua.browser.ver", ua_result.version, &mut tags); + extra.insert("ua".to_owned(), uas.to_string()); } } - // `uri.path` causes too much cardinality for influx. tags.insert("uri.method".to_owned(), req_head.method.to_string()); - Tags { - tags, - extra: HashMap::new(), - } + // `uri.path` causes too much cardinality for influx but keep it in + // extra for sentry + extra.insert("uri.path".to_owned(), req_head.uri.to_string()); + Tags { tags, extra } } pub fn with_tags(tags: HashMap) -> Tags { diff --git a/tools/spanner/purge_ttl.py b/tools/spanner/purge_ttl.py index 24a0821bb3..2ee8e1d27a 100644 --- a/tools/spanner/purge_ttl.py +++ b/tools/spanner/purge_ttl.py @@ -5,15 +5,16 @@ # file, You can obtain one at https://mozilla.org/MPL/2.0/. import argparse -import json +import logging import os import sys -import logging from datetime import datetime -from statsd.defaults.env import statsd +from typing import List, Optional from urllib import parse from google.cloud import spanner +from google.cloud.spanner_v1.database import Database +from statsd.defaults.env import statsd # set up logger logging.basicConfig( @@ -41,18 +42,25 @@ def use_dsn(args): return args -def deleter(database, name, query): +def deleter(database: Database, name: str, query: str, prefix: Optional[str]): with statsd.timer("syncstorage.purge_ttl.{}_duration".format(name)): logging.info("Running: {}".format(query)) start = datetime.now() result = database.execute_partitioned_dml(query) end = datetime.now() logging.info( - "{name}: removed {result} rows, {name}_duration: {time}".format( - name=name, result=result, time=end - start)) - - -def add_conditions(args, query): + "{name}: removed {result} rows, {name}_duration: {time}, prefix: {prefix}".format( + name=name, result=result, time=end - start, prefix=prefix)) + + +def add_conditions(args, query: str, prefix: Optional[str]): + """ + Add SQL conditions to a query. + :param args: The program arguments + :param query: The SQL query + :param prefix: The current prefix, if given + :return: The updated SQL query + """ if args.collection_ids: query += " AND collection_id" if len(args.collection_ids) == 1: @@ -60,40 +68,58 @@ def add_conditions(args, query): else: query += " in ({})".format( ', '.join(map(str, args.collection_ids))) - if args.uid_starts: - query += " AND fxa_uid LIKE \"{}%\"".format(args.uid_starts) + if prefix: + query += ' AND REGEXP_CONTAINS(fxa_uaid, r"{}")'.format(prefix) return query -def spanner_purge(args, request=None): - instance = client.instance(args.instance_id) - database = instance.database(args.database_id) - - logging.info("For {}:{}".format(args.instance_id, args.database_id)) - batch_query = ( - 'DELETE FROM batches WHERE ' - 'expiry < TIMESTAMP_TRUNC(CURRENT_TIMESTAMP(), DAY, "UTC")' - ) - bso_query = add_conditions( - args, - 'DELETE FROM bsos WHERE ' - 'expiry < TIMESTAMP_TRUNC(CURRENT_TIMESTAMP(), DAY, "UTC")' - ) +def get_expiry_condition(args): + """ + Get the expiry SQL WHERE condition to use + :param args: The program arguments + :return: A SQL snippet to use in the WHERE clause + """ + if args.expiry_mode == "now": + return 'expiry < CURRENT_TIMESTAMP()' + elif args.expiry_mode == "midnight": + return 'expiry < TIMESTAMP_TRUNC(CURRENT_TIMESTAMP(), DAY, "UTC")' + else: + raise Exception("Invalid expiry mode: {}".format(args.expiry_mode)) - # Delete Batches. Also deletes child batch_bsos rows (INTERLEAVE - # IN PARENT batches ON DELETE CASCADE) - deleter( - database, - name="batches", - query=batch_query - ) - # Delete BSOs - deleter( - database, - name="bso", - query=bso_query - ) +def spanner_purge(args): + instance = client.instance(args.instance_id) + database = instance.database(args.database_id) + expiry_condition = get_expiry_condition(args) + prefixes = args.uid_prefixes if args.uid_prefixes else [None] + + for prefix in prefixes: + logging.info("For {}:{}, prefix = {}".format(args.instance_id, args.database_id, prefix)) + + if args.mode in ["batches", "both"]: + # Delete Batches. Also deletes child batch_bsos rows (INTERLEAVE + # IN PARENT batches ON DELETE CASCADE) + batch_query = 'DELETE FROM batches WHERE {}'.format(expiry_condition) + deleter( + database, + name="batches", + query=batch_query, + prefix=prefix + ) + + if args.mode in ["bsos", "both"]: + # Delete BSOs + bso_query = add_conditions( + args, + 'DELETE FROM bsos WHERE {}'.format(expiry_condition), + prefix + ) + deleter( + database, + name="bso", + query=bso_query, + prefix=prefix + ) def get_args(): @@ -120,25 +146,54 @@ def get_args(): ) parser.add_argument( "--collection_ids", + type=parse_args_list, default=os.environ.get("COLLECTION_IDS", "[]"), - help="JSON array of collection IDs to purge" + help="Array of collection IDs to purge" + ) + parser.add_argument( + "--uid_prefixes", + type=parse_args_list, + default=os.environ.get("PURGE_UID_PREFIXES", "[]"), + help="Array of regex strings used to limit purges based on UID. " + "Each entry is a separate purge run." + ) + parser.add_argument( + "--mode", + type=str, + choices=["batches", "bsos", "both"], + default=os.environ.get("PURGE_MODE", "both"), + help="Purge TTLs in batches, bsos, or both" ) parser.add_argument( - "--uid_starts", + "--expiry_mode", type=str, - help="Limit to UIDs starting with specified characters" + choices=["now", "midnight"], + default=os.environ.get("PURGE_EXPIRY_MODE", "midnight"), + help="Choose the timestamp used to check if an entry is expired" ) args = parser.parse_args() - collections = json.loads(args.collection_ids) - if not isinstance(collections, list): - collections = [collections] - args.collection_ids = collections + # override using the DSN URL: if args.sync_database_url: args = use_dsn(args) + return args +def parse_args_list(args_list: str) -> List[str]: + """ + Parse a list of items (or a single string) into a list of strings. + Example input: [item1,item2,item3] + :param args_list: The list/string + :return: A list of strings + """ + if args_list[0] != "[" or args_list[-1] != "]": + # Assume it's a single item + return [args_list] + + return args_list[1:-1].split(",") + + if __name__ == "__main__": args = get_args() with statsd.timer("syncstorage.purge_ttl.total_duration"): diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/Cargo.toml b/vendor/mozilla-rust-sdk/googleapis-raw/Cargo.toml index 36151512a4..ccffc229b7 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/Cargo.toml +++ b/vendor/mozilla-rust-sdk/googleapis-raw/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] futures = "0.3.5" grpcio = "0.6.0" -protobuf = "2.16.2" +protobuf = "2.17.0" [dev-dependencies] slog = "2.5" diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/src/empty.rs b/vendor/mozilla-rust-sdk/googleapis-raw/src/empty.rs index b919d9ec86..6722a969cc 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/src/empty.rs +++ b/vendor/mozilla-rust-sdk/googleapis-raw/src/empty.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.16.2. Do not edit +// This file is generated by rust-protobuf 2.17.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_16_2; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0; #[derive(PartialEq,Clone,Default)] pub struct Empty { diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/src/iam/v1/iam_policy.rs b/vendor/mozilla-rust-sdk/googleapis-raw/src/iam/v1/iam_policy.rs index a1303cc17e..b0fd32cf3b 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/src/iam/v1/iam_policy.rs +++ b/vendor/mozilla-rust-sdk/googleapis-raw/src/iam/v1/iam_policy.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.16.2. Do not edit +// This file is generated by rust-protobuf 2.17.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_16_2; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0; #[derive(PartialEq,Clone,Default)] pub struct SetIamPolicyRequest { diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/src/iam/v1/policy.rs b/vendor/mozilla-rust-sdk/googleapis-raw/src/iam/v1/policy.rs index 827baba5d2..a8b55f9aab 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/src/iam/v1/policy.rs +++ b/vendor/mozilla-rust-sdk/googleapis-raw/src/iam/v1/policy.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.16.2. Do not edit +// This file is generated by rust-protobuf 2.17.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_16_2; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0; #[derive(PartialEq,Clone,Default)] pub struct Policy { diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/src/lib.rs b/vendor/mozilla-rust-sdk/googleapis-raw/src/lib.rs index 627927eb5d..69e4a3e819 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/src/lib.rs +++ b/vendor/mozilla-rust-sdk/googleapis-raw/src/lib.rs @@ -3,7 +3,7 @@ // This appears as a comment in each generated file. Add it once here // to save a bit of time and effort. -const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_16_2; +const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0; pub mod empty; pub(crate) mod iam; diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/src/longrunning/operations.rs b/vendor/mozilla-rust-sdk/googleapis-raw/src/longrunning/operations.rs index 83585aa78f..3d36a6cf3c 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/src/longrunning/operations.rs +++ b/vendor/mozilla-rust-sdk/googleapis-raw/src/longrunning/operations.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.16.2. Do not edit +// This file is generated by rust-protobuf 2.17.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_16_2; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0; #[derive(PartialEq,Clone,Default)] pub struct Operation { diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/src/rpc/code.rs b/vendor/mozilla-rust-sdk/googleapis-raw/src/rpc/code.rs index a68c1b7580..2162890d9f 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/src/rpc/code.rs +++ b/vendor/mozilla-rust-sdk/googleapis-raw/src/rpc/code.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.16.2. Do not edit +// This file is generated by rust-protobuf 2.17.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_16_2; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0; #[derive(Clone,PartialEq,Eq,Debug,Hash)] pub enum Code { diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/src/rpc/error_details.rs b/vendor/mozilla-rust-sdk/googleapis-raw/src/rpc/error_details.rs index 5510510e6f..6f8094738e 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/src/rpc/error_details.rs +++ b/vendor/mozilla-rust-sdk/googleapis-raw/src/rpc/error_details.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.16.2. Do not edit +// This file is generated by rust-protobuf 2.17.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_16_2; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0; #[derive(PartialEq,Clone,Default)] pub struct RetryInfo { diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/src/rpc/status.rs b/vendor/mozilla-rust-sdk/googleapis-raw/src/rpc/status.rs index 7e6545a65c..ec37a504c2 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/src/rpc/status.rs +++ b/vendor/mozilla-rust-sdk/googleapis-raw/src/rpc/status.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.16.2. Do not edit +// This file is generated by rust-protobuf 2.17.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_16_2; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0; #[derive(PartialEq,Clone,Default)] pub struct Status { diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/admin/database/v1/spanner_database_admin.rs b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/admin/database/v1/spanner_database_admin.rs index 4e5e661e7c..f67efcbeb8 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/admin/database/v1/spanner_database_admin.rs +++ b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/admin/database/v1/spanner_database_admin.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.16.2. Do not edit +// This file is generated by rust-protobuf 2.17.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_16_2; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0; #[derive(PartialEq,Clone,Default)] pub struct Database { diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/admin/instance/v1/spanner_instance_admin.rs b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/admin/instance/v1/spanner_instance_admin.rs index 4e28f92181..a8786d3909 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/admin/instance/v1/spanner_instance_admin.rs +++ b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/admin/instance/v1/spanner_instance_admin.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.16.2. Do not edit +// This file is generated by rust-protobuf 2.17.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_16_2; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0; #[derive(PartialEq,Clone,Default)] pub struct InstanceConfig { diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/keys.rs b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/keys.rs index 4b3dad4040..7060cb910e 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/keys.rs +++ b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/keys.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.16.2. Do not edit +// This file is generated by rust-protobuf 2.17.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_16_2; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0; #[derive(PartialEq,Clone,Default)] pub struct KeyRange { diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/mutation.rs b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/mutation.rs index fd3836ff00..a1c4b6a95c 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/mutation.rs +++ b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/mutation.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.16.2. Do not edit +// This file is generated by rust-protobuf 2.17.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_16_2; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0; #[derive(PartialEq,Clone,Default)] pub struct Mutation { diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/query_plan.rs b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/query_plan.rs index 6a550be61f..cc7ad4a38f 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/query_plan.rs +++ b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/query_plan.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.16.2. Do not edit +// This file is generated by rust-protobuf 2.17.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_16_2; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0; #[derive(PartialEq,Clone,Default)] pub struct PlanNode { diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/result_set.rs b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/result_set.rs index 576bcf9543..00e1351c95 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/result_set.rs +++ b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/result_set.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.16.2. Do not edit +// This file is generated by rust-protobuf 2.17.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_16_2; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0; #[derive(PartialEq,Clone,Default)] pub struct ResultSet { diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/spanner.rs b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/spanner.rs index c529de34ca..8e1d849ee5 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/spanner.rs +++ b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/spanner.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.16.2. Do not edit +// This file is generated by rust-protobuf 2.17.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_16_2; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0; #[derive(PartialEq,Clone,Default)] pub struct CreateSessionRequest { diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/transaction.rs b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/transaction.rs index 7ea86d8e03..3a31f33e8e 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/transaction.rs +++ b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/transaction.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.16.2. Do not edit +// This file is generated by rust-protobuf 2.17.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_16_2; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0; #[derive(PartialEq,Clone,Default)] pub struct TransactionOptions { diff --git a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/type_pb.rs b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/type_pb.rs index 9515febd14..be4eb2b86b 100644 --- a/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/type_pb.rs +++ b/vendor/mozilla-rust-sdk/googleapis-raw/src/spanner/v1/type_pb.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.16.2. Do not edit +// This file is generated by rust-protobuf 2.17.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_16_2; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0; #[derive(PartialEq,Clone,Default)] pub struct Type { diff --git a/vendor/mozilla-rust-sdk/googleapis/src/spanner.rs b/vendor/mozilla-rust-sdk/googleapis/src/spanner.rs index fd8f8ce989..a75cfa2d5f 100644 --- a/vendor/mozilla-rust-sdk/googleapis/src/spanner.rs +++ b/vendor/mozilla-rust-sdk/googleapis/src/spanner.rs @@ -36,7 +36,7 @@ impl Client { let creds = ChannelCredentials::google_default_credentials()?; // Create a Spanner client. - let chan = ChannelBuilder::new(env.clone()) + let chan = ChannelBuilder::new(env) .max_send_message_len(100 << 20) .max_receive_message_len(100 << 20) .secure_connect(&endpoint, creds);