Skip to content

Commit

Permalink
feat: run unit tests against the bigtable emulator (#547)
Browse files Browse the repository at this point in the history
also make BigTableError more verbose and use the try! operator in the
run_gauntlet test

Closes: SYNC-4059
  • Loading branch information
pjenvey authored Jan 17, 2024
1 parent 42775f9 commit 21e0d40
Show file tree
Hide file tree
Showing 8 changed files with 96 additions and 84 deletions.
26 changes: 21 additions & 5 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -83,13 +83,21 @@ jobs:
auth:
username: $DOCKER_USER
password: $DOCKER_PASS
environment:
RUST_BACKTRACE: 1
- image: amazon/dynamodb-local:latest
auth:
username: $DOCKER_USER
password: $DOCKER_PASS
command: -jar DynamoDBLocal.jar -sharedDb
- image: google/cloud-sdk:latest
auth:
username: $DOCKER_USER
password: $DOCKER_PASS
command: gcloud beta emulators bigtable start --host-port=localhost:8086
resource_class: xlarge
environment:
BIGTABLE_EMULATOR_HOST: localhost:8086
AWS_LOCAL_DYNAMODB: http://localhost:8000
steps:
- checkout
Expand All @@ -105,7 +113,14 @@ jobs:
name: Set up system
command: |
apt update
apt install libssl-dev -y
apt install libssl-dev apt-transport-https ca-certificates gnupg curl -y
- run:
name: Set up cbt
command: |
echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg
apt-get update -y
apt install google-cloud-cli-cbt -y
- run:
name: Set up Python
command: |
Expand All @@ -122,22 +137,23 @@ jobs:
export PATH=$PATH:$HOME/.cargo/bin
echo 'export PATH=$PATH:$HOME/.cargo/bin' >> $BASH_ENV
rustc --version
cargo build --features=bigtable
cargo build --features=emulator
- run:
name: Check formatting
command: |
cargo fmt -- --check
cargo clippy --all --all-targets --all-features -- -D warnings --deny=clippy::dbg_macro
- run:
name: Setup Bigtable
command: scripts/setup_bt.sh
- run:
name: Rust tests
environment:
BIGTABLE_EMULATOR_HOST: localhost:8086
# Note: This build can potentially exceed the amount of memory availble to the CircleCI instance.
# We've seen that limiting the number of jobs helps reduce the frequency of this. (Note that
# when doing discovery, we found that the docker image `meminfo` and `cpuinfo` often report
# the machine level memory and CPU which are far higher than the memory allocated to the docker
# instance. This may be causing rust to be overly greedy triggering the VM to OOM the process.)
command: cargo test --features=bigtable --jobs=2
command: cargo test --features=emulator --jobs=2
- run:
name: Integration tests (Autopush Legacy)
command: make integration-test-legacy
Expand Down
5 changes: 3 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ SHELL := /bin/sh
CARGO = cargo
TESTS_DIR := tests
TEST_RESULTS_DIR ?= workspace/test-results
PYTEST_ARGS ?=
INTEGRATION_TEST_FILE := $(TESTS_DIR)/integration/test_integration_all_rust.py
LOAD_TEST_DIR := $(TESTS_DIR)/load
POETRY := poetry --directory $(TESTS_DIR)
Expand All @@ -27,7 +28,7 @@ integration-test-legacy:
$(POETRY) install --without dev,load --no-root
$(POETRY) run pytest $(INTEGRATION_TEST_FILE) \
--junit-xml=$(TEST_RESULTS_DIR)/integration_test_legacy_results.xml \
-v
-v $(PYTEST_ARGS)

integration-test:
$(POETRY) -V
Expand All @@ -36,7 +37,7 @@ integration-test:
CONNECTION_SETTINGS_PREFIX=autoconnect__ \
$(POETRY) run pytest $(INTEGRATION_TEST_FILE) \
--junit-xml=$(TEST_RESULTS_DIR)/integration_test_results.xml \
-v
-v $(PYTEST_ARGS)

lint:
$(POETRY) -V
Expand Down
33 changes: 17 additions & 16 deletions autopush-common/src/db/bigtable/bigtable_client/error.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use std::fmt::{self, Display};

use backtrace::Backtrace;
use thiserror::Error;

Expand Down Expand Up @@ -55,9 +57,9 @@ impl From<i32> for MutateRowStatus {
}
}

impl ToString for MutateRowStatus {
fn to_string(&self) -> String {
match self {
impl Display for MutateRowStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
MutateRowStatus::OK => "Ok",
MutateRowStatus::Cancelled => "Cancelled",
MutateRowStatus::Unknown => "Unknown",
Expand All @@ -75,41 +77,40 @@ impl ToString for MutateRowStatus {
MutateRowStatus::Unavailable => "Unavailable",
MutateRowStatus::DataLoss => "Data Loss",
MutateRowStatus::Unauthenticated => "Unauthenticated",
}
.to_owned()
})
}
}

#[derive(Debug, Error)]
pub enum BigTableError {
#[error("Invalid Row Response")]
InvalidRowResponse(grpcio::Error),
#[error("Invalid Row Response: {0}")]
InvalidRowResponse(#[source] grpcio::Error),

#[error("Invalid Chunk")]
InvalidChunk(String),

#[error("BigTable read error")]
Read(grpcio::Error),
#[error("BigTable read error: {0}")]
Read(#[source] grpcio::Error),

#[error("BigTable write timestamp error")]
WriteTime(std::time::SystemTimeError),
#[error("BigTable write timestamp error: {0}")]
WriteTime(#[source] std::time::SystemTimeError),

#[error("Bigtable write error")]
Write(grpcio::Error),
#[error("Bigtable write error: {0}")]
Write(#[source] grpcio::Error),

/// Return a GRPC status code and any message.
/// See https://grpc.github.io/grpc/core/md_doc_statuscodes.html
#[error("Bigtable status response")]
#[error("Bigtable status response: {0:?}")]
Status(MutateRowStatus, String),

#[error("BigTable Admin Error")]
#[error("BigTable Admin Error: {0}")]
Admin(String, Option<String>),

#[error("Bigtable Recycle request")]
Recycle,

/// General Pool builder errors.
#[error("Pool Error")]
#[error("Pool Error: {0}")]
Pool(String),
}

Expand Down
56 changes: 24 additions & 32 deletions autopush-common/src/db/bigtable/bigtable_client/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1298,11 +1298,11 @@ mod tests {
//!
use std::sync::Arc;
use std::time::SystemTime;
use uuid;

use super::*;
use cadence::StatsdClient;
use uuid;

use super::*;
use crate::db::DbSettings;

const TEST_USER: &str = "DEADBEEF-0000-0000-0000-0123456789AB";
Expand All @@ -1324,11 +1324,11 @@ mod tests {
let settings = DbSettings {
// this presumes the table was created with
// ```
// cbt -project test -instance test createtable autopush
// scripts/setup_bt.sh
// ```
// with `message`, `router`, and `message_topic` families
dsn: Some(env_dsn),
db_settings: json!({"table_name":"projects/test/instances/test/tables/autopush"})
db_settings: json!({"table_name": "projects/test/instances/test/tables/autopush"})
.to_string(),
};

Expand Down Expand Up @@ -1358,8 +1358,8 @@ mod tests {
/// run a gauntlet of testing. These are a bit linear because they need
/// to run in sequence.
#[actix_rt::test]
async fn run_gauntlet() {
let client = new_client().unwrap();
async fn run_gauntlet() -> DbResult<()> {
let client = new_client()?;

let connected_at = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
Expand Down Expand Up @@ -1390,29 +1390,25 @@ mod tests {
let _ = client.remove_user(&uaid).await;

// can we add the user?
let user = client.add_user(&test_user).await;
assert!(user.is_ok());
let fetched = client.get_user(&uaid).await.unwrap();
client.add_user(&test_user).await?;
let fetched = client.get_user(&uaid).await?;
assert!(fetched.is_some());
let fetched = fetched.unwrap();
assert_eq!(fetched.router_type, "webpush".to_owned());

// can we add channels?
client.add_channel(&uaid, &chid).await.unwrap();
let channels = client.get_channels(&uaid).await;
assert!(channels.unwrap().contains(&chid));
client.add_channel(&uaid, &chid).await?;
let channels = client.get_channels(&uaid).await?;
assert!(channels.contains(&chid));

// can we add lots of channels?
let mut new_channels: HashSet<Uuid> = HashSet::new();
new_channels.insert(chid);
for _ in 1..10 {
new_channels.insert(uuid::Uuid::new_v4());
}
client
.add_channels(&uaid, new_channels.clone())
.await
.unwrap();
let channels = client.get_channels(&uaid).await.unwrap();
client.add_channels(&uaid, new_channels.clone()).await?;
let channels = client.get_channels(&uaid).await?;
assert_eq!(channels, new_channels);

// now ensure that we can update a user that's after the time we set prior.
Expand All @@ -1426,7 +1422,7 @@ mod tests {
assert!(!result.unwrap());

// Make sure that the `connected_at` wasn't modified
let fetched2 = client.get_user(&fetched.uaid).await.unwrap().unwrap();
let fetched2 = client.get_user(&fetched.uaid).await?.unwrap();
assert_eq!(fetched.connected_at, fetched2.connected_at);

// and make sure we can update a record with a later connected_at time.
Expand All @@ -1439,7 +1435,7 @@ mod tests {
assert!(result.unwrap());
assert_ne!(
test_user.connected_at,
client.get_user(&uaid).await.unwrap().unwrap().connected_at
client.get_user(&uaid).await?.unwrap().connected_at
);

let test_data = "An_encrypted_pile_of_crap".to_owned();
Expand All @@ -1458,10 +1454,7 @@ mod tests {
let res = client.save_message(&uaid, test_notification.clone()).await;
assert!(res.is_ok());

let mut fetched = client
.fetch_timestamp_messages(&uaid, None, 999)
.await
.unwrap();
let mut fetched = client.fetch_timestamp_messages(&uaid, None, 999).await?;
assert_ne!(fetched.messages.len(), 0);
let fm = fetched.messages.pop().unwrap();
assert_eq!(fm.channel_id, test_notification.channel_id);
Expand All @@ -1470,15 +1463,13 @@ mod tests {
// Grab all 1 of the messages that were submmited within the past 10 seconds.
let fetched = client
.fetch_timestamp_messages(&uaid, Some(timestamp - 10), 999)
.await
.unwrap();
.await?;
assert_ne!(fetched.messages.len(), 0);

// Try grabbing a message for 10 seconds from now.
let fetched = client
.fetch_timestamp_messages(&uaid, Some(timestamp + 10), 999)
.await
.unwrap();
.await?;
assert_eq!(fetched.messages.len(), 0);

// can we clean up our toys?
Expand Down Expand Up @@ -1509,14 +1500,14 @@ mod tests {
.await
.is_ok());

let mut fetched = client.fetch_topic_messages(&uaid, 999).await.unwrap();
let mut fetched = client.fetch_topic_messages(&uaid, 999).await?;
assert_ne!(fetched.messages.len(), 0);
let fm = fetched.messages.pop().unwrap();
assert_eq!(fm.channel_id, test_notification.channel_id);
assert_eq!(fm.data, Some(test_data));

// Grab the message that was submmited.
let fetched = client.fetch_topic_messages(&uaid, 999).await.unwrap();
let fetched = client.fetch_topic_messages(&uaid, 999).await?;
assert_ne!(fetched.messages.len(), 0);

// can we clean up our toys?
Expand All @@ -1534,14 +1525,15 @@ mod tests {
// did we remove it?
let msgs = client
.fetch_timestamp_messages(&uaid, None, 999)
.await
.unwrap()
.await?
.messages;
assert!(msgs.is_empty());

assert!(client.remove_user(&uaid).await.is_ok());

assert!(client.get_user(&uaid).await.unwrap().is_none());
assert!(client.get_user(&uaid).await?.is_none());

Ok(())
}

// #[actix_rt::test]
Expand Down
8 changes: 1 addition & 7 deletions docs/bigtable.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,7 @@ you can then use the following commands:

```bash
BIGTABLE_EMULATOR_HOST=localhost:8086 \
cbt -project test -instance test createtable autopush && \
cbt -project test -instance test createfamily autopush router && \
cbt -project test -instance test createfamily autopush message && \
cbt -project test -instance test createfamily autopush message_topic && \
cbt -project test -instance test setgcpolicy autopush router maxversions=1 && \
cbt -project test -instance test setgcpolicy autopush message maxversions=1 and maxage=1s && \
cbt -project test -instance test setgcpolicy autopush message_topic maxversions=1 and maxage=1s
scripts/setup_bt.sh
```

This will create a new project named `test`, a new instance named `test` and a new table named `autopush`, along with column family definitions for `messsage` and `router`.
Expand Down
13 changes: 4 additions & 9 deletions docs/src/install.md
Original file line number Diff line number Diff line change
Expand Up @@ -172,16 +172,11 @@ export BIGTABLE_EMULATOR_HOST=localhost:8086
Bigtable is memory only and does not maintain information between restarts. This
means that you will need to create the table, column families, and policies.

You can initialize these using the `cbt` command from the SDK:
You can initialize these via the `setup_bt.sh` script which uses the `cbt`
command from the SDK:

```bash
cbt -project test -instance test createtable autopush && \
cbt -project test -instance test createfamily autopush message && \
cbt -project test -instance test createfamily autopush message_topic && \
cbt -project test -instance test createfamily autopush router && \
cbt -project test -instance test setgcpolicy autopush message maxage=1s && \
cbt -project test -instance test setgcpolicy autopush router maxversions=1 && \
cbt -project test -instance test setgcpolicy autopush message_topic maxversions=1
scripts/setup_bt.sh
```

The `db_dsn` to access this data store with Autopendpoint would be:
Expand Down Expand Up @@ -234,4 +229,4 @@ our applications.
How we connect and use these systems is described in the following documents:

* [Apple Push Notification service (APNs)](apns.md)
* [Google's Fire Cloud Messaging service (FCM)](fcm.md)
* [Google's Fire Cloud Messaging service (FCM)](fcm.md)
22 changes: 22 additions & 0 deletions scripts/setup_bt.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#!/bin/sh -eu
# Usage: setup_bt.sh [PROJECT] [INSTANCE]
#
# Arguments:
# [PROJECT] (default: test)
# [INSTANCE] (default: test)

PROJECT=${1:-"test"}
INSTANCE=${2:-"test"}

TABLE_NAME=${TABLE_NAME:-"autopush"}
MESSAGE_FAMILY=${MESSAGE_FAMILY:-"message"}
MESSAGE_TOPIC_FAMILY=${MESSAGE_TOPIC_FAMILY:-"message_topic"}
ROUTER_FAMILY=${ROUTER_FAMILY:-"router"}

cbt -project $PROJECT -instance $INSTANCE createtable $TABLE_NAME
cbt -project $PROJECT -instance $INSTANCE createfamily $TABLE_NAME $MESSAGE_FAMILY
cbt -project $PROJECT -instance $INSTANCE createfamily $TABLE_NAME $MESSAGE_TOPIC_FAMILY
cbt -project $PROJECT -instance $INSTANCE createfamily $TABLE_NAME $ROUTER_FAMILY
cbt -project $PROJECT -instance $INSTANCE setgcpolicy $TABLE_NAME $MESSAGE_FAMILY maxage=1s
cbt -project $PROJECT -instance $INSTANCE setgcpolicy $TABLE_NAME $MESSAGE_TOPIC_FAMILY maxversions=1
cbt -project $PROJECT -instance $INSTANCE setgcpolicy $TABLE_NAME $ROUTER_FAMILY maxversions=1
Loading

0 comments on commit 21e0d40

Please sign in to comment.