From 8fb68a25e4ccf34625ee3aaf032f5a013ada8292 Mon Sep 17 00:00:00 2001 From: Rain Date: Thu, 28 Sep 2023 16:32:53 -0700 Subject: [PATCH 01/35] [meta] add a workspace-hack package managed by hakari (#4154) Similar to https://github.com/oxidecomputer/crucible/pull/956, this adds a workspace-hack package managed by cargo-hakari. This should massively cut down on incremental build times. `cargo hakari` doesn't yet support writing out `workspace-hack` dependency lines using workspace inheritance (https://github.com/guppy-rs/guppy/issues/7), so skip over it in the lint. Reverting this change can be done by following the instructions at https://docs.rs/cargo-hakari/latest/cargo_hakari/#disabling-and-uninstalling. --- .config/hakari.toml | 29 +++ .github/workflows/hakari.yml | 37 ++++ Cargo.lock | 169 +++++++++++++++ Cargo.toml | 1 + README.adoc | 29 +-- api_identity/Cargo.toml | 1 + bootstore/Cargo.toml | 1 + bootstrap-agent-client/Cargo.toml | 1 + caboose-util/Cargo.toml | 1 + certificates/Cargo.toml | 1 + common/Cargo.toml | 1 + crdb-seed/Cargo.toml | 1 + ddm-admin-client/Cargo.toml | 1 + deploy/Cargo.toml | 1 + dev-tools/omdb/Cargo.toml | 1 + dev-tools/omicron-dev/Cargo.toml | 1 + dev-tools/xtask/src/main.rs | 14 ++ dns-server/Cargo.toml | 1 + dns-service-client/Cargo.toml | 1 + dpd-client/Cargo.toml | 1 + end-to-end-tests/Cargo.toml | 1 + gateway-cli/Cargo.toml | 1 + gateway-client/Cargo.toml | 1 + gateway-test-utils/Cargo.toml | 1 + gateway/Cargo.toml | 1 + illumos-utils/Cargo.toml | 1 + installinator-artifact-client/Cargo.toml | 1 + installinator-artifactd/Cargo.toml | 1 + installinator-common/Cargo.toml | 1 + installinator/Cargo.toml | 1 + internal-dns-cli/Cargo.toml | 1 + internal-dns/Cargo.toml | 1 + ipcc-key-value/Cargo.toml | 1 + key-manager/Cargo.toml | 1 + nexus-client/Cargo.toml | 1 + nexus/Cargo.toml | 1 + nexus/authz-macros/Cargo.toml | 1 + nexus/db-macros/Cargo.toml | 1 + nexus/db-model/Cargo.toml | 1 + nexus/db-queries/Cargo.toml | 1 + nexus/defaults/Cargo.toml | 1 + nexus/test-interface/Cargo.toml | 1 + nexus/test-utils-macros/Cargo.toml | 1 + nexus/test-utils/Cargo.toml | 1 + nexus/types/Cargo.toml | 1 + oxide-client/Cargo.toml | 1 + oximeter-client/Cargo.toml | 1 + oximeter/collector/Cargo.toml | 1 + oximeter/db/Cargo.toml | 1 + oximeter/instruments/Cargo.toml | 1 + oximeter/oximeter-macro-impl/Cargo.toml | 1 + oximeter/oximeter/Cargo.toml | 1 + oximeter/producer/Cargo.toml | 1 + package/Cargo.toml | 1 + passwords/Cargo.toml | 1 + rpaths/Cargo.toml | 1 + sled-agent-client/Cargo.toml | 1 + sled-agent/Cargo.toml | 1 + sled-hardware/Cargo.toml | 1 + sp-sim/Cargo.toml | 1 + test-utils/Cargo.toml | 1 + tufaceous-lib/Cargo.toml | 1 + tufaceous/Cargo.toml | 1 + update-engine/Cargo.toml | 1 + wicket-common/Cargo.toml | 1 + wicket-dbg/Cargo.toml | 1 + wicket/Cargo.toml | 1 + wicketd-client/Cargo.toml | 1 + wicketd/Cargo.toml | 1 + workspace-hack/.gitattributes | 4 + workspace-hack/Cargo.toml | 261 +++++++++++++++++++++++ workspace-hack/build.rs | 2 + workspace-hack/src/lib.rs | 1 + 73 files changed, 589 insertions(+), 21 deletions(-) create mode 100644 .config/hakari.toml create mode 100644 .github/workflows/hakari.yml create mode 100644 workspace-hack/.gitattributes create mode 100644 workspace-hack/Cargo.toml create mode 100644 workspace-hack/build.rs create mode 100644 workspace-hack/src/lib.rs diff --git a/.config/hakari.toml b/.config/hakari.toml new file mode 100644 index 0000000000..9562f92300 --- /dev/null +++ b/.config/hakari.toml @@ -0,0 +1,29 @@ +# This file contains settings for `cargo hakari`. +# See https://docs.rs/cargo-hakari/latest/cargo_hakari/config for a full list of options. + +hakari-package = "omicron-workspace-hack" + +# Format for `workspace-hack = ...` lines in other Cargo.tomls. Requires cargo-hakari 0.9.8 or above. +dep-format-version = "4" + +# Setting workspace.resolver = "2" in the root Cargo.toml is HIGHLY recommended. +# Hakari works much better with the new feature resolver. +# For more about the new feature resolver, see: +# https://blog.rust-lang.org/2021/03/25/Rust-1.51.0.html#cargos-new-feature-resolver +resolver = "2" + +# Add triples corresponding to platforms commonly used by developers here. +# https://doc.rust-lang.org/rustc/platform-support.html +platforms = [ + "x86_64-unknown-linux-gnu", + "x86_64-apple-darwin", + "aarch64-apple-darwin", + "x86_64-unknown-illumos", + # "x86_64-pc-windows-msvc", +] + +[traversal-excludes] +workspace-members = ["xtask"] + +# Write out exact versions rather than a semver range. (Defaults to false.) +# exact-versions = true diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml new file mode 100644 index 0000000000..d79196d318 --- /dev/null +++ b/.github/workflows/hakari.yml @@ -0,0 +1,37 @@ +# This workflow file serves as an example for cargo-hakari CI integration. + +on: + push: + branches: + - main + pull_request: + branches: + - main + +name: cargo hakari + +jobs: + workspace-hack-check: + name: Check workspace-hack + runs-on: ubuntu-latest + env: + RUSTFLAGS: -D warnings + steps: + - uses: actions/checkout@v4 + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + - name: Install cargo-hakari + uses: taiki-e/install-action@v2 + with: + tool: cargo-hakari + - name: Check workspace-hack Cargo.toml is up-to-date + uses: actions-rs/cargo@v1 + with: + command: hakari + args: generate --diff + - name: Check all crates depend on workspace-hack + uses: actions-rs/cargo@v1 + with: + command: hakari + args: manage-deps --dry-run diff --git a/Cargo.lock b/Cargo.lock index e5130b6b33..5f073db250 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -184,6 +184,7 @@ dependencies = [ name = "api_identity" version = "0.1.0" dependencies = [ + "omicron-workspace-hack", "proc-macro2", "quote", "syn 2.0.32", @@ -370,6 +371,7 @@ name = "authz-macros" version = "0.1.0" dependencies = [ "heck 0.4.1", + "omicron-workspace-hack", "proc-macro2", "quote", "serde", @@ -658,6 +660,7 @@ dependencies = [ "omicron-common 0.1.0", "omicron-rpaths", "omicron-test-utils", + "omicron-workspace-hack", "pq-sys", "proptest", "rand 0.8.5", @@ -684,6 +687,7 @@ dependencies = [ "chrono", "ipnetwork", "omicron-common 0.1.0", + "omicron-workspace-hack", "progenitor", "regress", "reqwest", @@ -789,6 +793,7 @@ version = "0.1.0" dependencies = [ "anyhow", "hubtools", + "omicron-workspace-hack", ] [[package]] @@ -1245,6 +1250,7 @@ dependencies = [ "dropshot", "hex", "omicron-test-utils", + "omicron-workspace-hack", "ring", "slog", "tokio", @@ -1726,6 +1732,7 @@ name = "db-macros" version = "0.1.0" dependencies = [ "heck 0.4.1", + "omicron-workspace-hack", "proc-macro2", "quote", "rustfmt-wrapper", @@ -1741,6 +1748,7 @@ dependencies = [ "anyhow", "either", "omicron-common 0.1.0", + "omicron-workspace-hack", "omicron-zone-package", "progenitor", "progenitor-client", @@ -2009,6 +2017,7 @@ dependencies = [ "expectorate", "http", "omicron-test-utils", + "omicron-workspace-hack", "openapi-lint", "openapiv3", "pretty-hex 0.3.0", @@ -2039,6 +2048,7 @@ version = "0.1.0" dependencies = [ "chrono", "http", + "omicron-workspace-hack", "progenitor", "reqwest", "schemars", @@ -2095,6 +2105,7 @@ dependencies = [ "futures", "http", "ipnetwork", + "omicron-workspace-hack", "omicron-zone-package", "progenitor", "progenitor-client", @@ -2280,6 +2291,7 @@ dependencies = [ "http", "omicron-sled-agent", "omicron-test-utils", + "omicron-workspace-hack", "oxide-client", "rand 0.8.5", "reqwest", @@ -2720,6 +2732,7 @@ dependencies = [ "hex", "libc", "omicron-common 0.1.0", + "omicron-workspace-hack", "reqwest", "serde", "serde_json", @@ -2738,6 +2751,7 @@ version = "0.1.0" dependencies = [ "base64 0.21.4", "chrono", + "omicron-workspace-hack", "progenitor", "rand 0.8.5", "reqwest", @@ -2802,6 +2816,7 @@ dependencies = [ "gateway-messages", "omicron-gateway", "omicron-test-utils", + "omicron-workspace-hack", "slog", "sp-sim", "tokio", @@ -3379,6 +3394,7 @@ dependencies = [ "macaddr", "mockall", "omicron-common 0.1.0", + "omicron-workspace-hack", "opte-ioctl", "oxide-vpc", "regress", @@ -3488,6 +3504,7 @@ dependencies = [ "libc", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "once_cell", "partial-io", "progenitor-client", @@ -3517,6 +3534,7 @@ name = "installinator-artifact-client" version = "0.1.0" dependencies = [ "installinator-common", + "omicron-workspace-hack", "progenitor", "regress", "reqwest", @@ -3542,6 +3560,7 @@ dependencies = [ "installinator-common", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "openapi-lint", "openapiv3", "schemars", @@ -3561,6 +3580,7 @@ dependencies = [ "camino", "illumos-utils", "omicron-common 0.1.0", + "omicron-workspace-hack", "schemars", "serde", "serde_json", @@ -3593,6 +3613,7 @@ dependencies = [ "hyper", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "progenitor", "reqwest", "serde", @@ -3635,6 +3656,7 @@ dependencies = [ "dropshot", "internal-dns 0.1.0", "omicron-common 0.1.0", + "omicron-workspace-hack", "slog", "tokio", "trust-dns-resolver", @@ -3658,6 +3680,7 @@ dependencies = [ "ciborium", "libc", "omicron-common 0.1.0", + "omicron-workspace-hack", "proptest", "serde", "test-strategy", @@ -3762,6 +3785,7 @@ dependencies = [ "async-trait", "hkdf", "omicron-common 0.1.0", + "omicron-workspace-hack", "secrecy", "sha3", "slog", @@ -4252,6 +4276,7 @@ dependencies = [ "ipnetwork", "omicron-common 0.1.0", "omicron-passwords 0.1.0", + "omicron-workspace-hack", "progenitor", "regress", "reqwest", @@ -4301,6 +4326,7 @@ dependencies = [ "omicron-common 0.1.0", "omicron-passwords 0.1.0", "omicron-rpaths", + "omicron-workspace-hack", "parse-display", "pq-sys", "rand 0.8.5", @@ -4356,6 +4382,7 @@ dependencies = [ "omicron-rpaths", "omicron-sled-agent", "omicron-test-utils", + "omicron-workspace-hack", "once_cell", "openapiv3", "openssl", @@ -4401,6 +4428,7 @@ dependencies = [ "ipnetwork", "lazy_static", "omicron-common 0.1.0", + "omicron-workspace-hack", "rand 0.8.5", "serde_json", ] @@ -4414,6 +4442,7 @@ dependencies = [ "internal-dns 0.1.0", "nexus-types", "omicron-common 0.1.0", + "omicron-workspace-hack", "slog", "uuid", ] @@ -4442,6 +4471,7 @@ dependencies = [ "omicron-passwords 0.1.0", "omicron-sled-agent", "omicron-test-utils", + "omicron-workspace-hack", "oximeter 0.1.0", "oximeter-client", "oximeter-collector", @@ -4461,6 +4491,7 @@ dependencies = [ name = "nexus-test-utils-macros" version = "0.1.0" dependencies = [ + "omicron-workspace-hack", "proc-macro2", "quote", "syn 2.0.32", @@ -4479,6 +4510,7 @@ dependencies = [ "newtype_derive", "omicron-common 0.1.0", "omicron-passwords 0.1.0", + "omicron-workspace-hack", "openssl", "openssl-probe", "openssl-sys", @@ -4785,6 +4817,7 @@ dependencies = [ "foreign-types 0.3.2", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "openssl", "openssl-sys", "rcgen", @@ -4812,6 +4845,7 @@ dependencies = [ "lazy_static", "libc", "macaddr", + "omicron-workspace-hack", "parse-display", "progenitor", "proptest", @@ -4885,6 +4919,7 @@ dependencies = [ "clap 4.4.3", "crossbeam", "omicron-package", + "omicron-workspace-hack", "serde", "serde_derive", "thiserror", @@ -4910,6 +4945,7 @@ dependencies = [ "omicron-rpaths", "omicron-sled-agent", "omicron-test-utils", + "omicron-workspace-hack", "openssl", "oxide-client", "pq-sys", @@ -4943,6 +4979,7 @@ dependencies = [ "ipcc-key-value", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "once_cell", "openapi-lint", "openapiv3", @@ -5020,6 +5057,7 @@ dependencies = [ "omicron-rpaths", "omicron-sled-agent", "omicron-test-utils", + "omicron-workspace-hack", "once_cell", "openapi-lint", "openapiv3", @@ -5099,6 +5137,7 @@ dependencies = [ "omicron-nexus", "omicron-rpaths", "omicron-test-utils", + "omicron-workspace-hack", "pq-sys", "regex", "serde", @@ -5125,6 +5164,7 @@ dependencies = [ "illumos-utils", "indicatif", "omicron-common 0.1.0", + "omicron-workspace-hack", "omicron-zone-package", "petgraph", "rayon", @@ -5154,6 +5194,7 @@ version = "0.1.0" dependencies = [ "argon2", "criterion", + "omicron-workspace-hack", "rand 0.8.5", "rust-argon2", "schemars", @@ -5178,6 +5219,9 @@ dependencies = [ [[package]] name = "omicron-rpaths" version = "0.1.0" +dependencies = [ + "omicron-workspace-hack", +] [[package]] name = "omicron-sled-agent" @@ -5223,6 +5267,7 @@ dependencies = [ "nexus-client 0.1.0", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "once_cell", "openapi-lint", "openapiv3", @@ -5278,6 +5323,7 @@ dependencies = [ "http", "libc", "omicron-common 0.1.0", + "omicron-workspace-hack", "pem", "rcgen", "regex", @@ -5292,6 +5338,110 @@ dependencies = [ "usdt", ] +[[package]] +name = "omicron-workspace-hack" +version = "0.1.0" +dependencies = [ + "anyhow", + "bit-set", + "bit-vec", + "bitflags 1.3.2", + "bitflags 2.4.0", + "bitvec", + "bstr 0.2.17", + "bstr 1.6.0", + "bytes", + "cc", + "chrono", + "cipher", + "clap 4.4.3", + "clap_builder", + "console", + "const-oid", + "crossbeam-epoch", + "crossbeam-utils", + "crypto-common", + "diesel", + "digest", + "either", + "flate2", + "futures", + "futures-channel", + "futures-core", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", + "gateway-messages", + "generic-array", + "getrandom 0.2.10", + "hashbrown 0.13.2", + "hashbrown 0.14.0", + "hex", + "hyper", + "hyper-rustls", + "indexmap 2.0.0", + "inout", + "ipnetwork", + "itertools 0.10.5", + "lalrpop-util", + "lazy_static", + "libc", + "log", + "managed", + "memchr", + "mio", + "num-bigint", + "num-integer", + "num-iter", + "num-traits", + "once_cell", + "openapiv3", + "petgraph", + "postgres-types", + "ppv-lite86", + "predicates 3.0.3", + "rand 0.8.5", + "rand_chacha 0.3.1", + "regex", + "regex-automata 0.3.8", + "regex-syntax 0.7.5", + "reqwest", + "ring", + "rustix 0.38.9", + "schemars", + "semver 1.0.18", + "serde", + "sha2", + "signature 2.1.0", + "similar", + "slog", + "spin 0.9.8", + "string_cache", + "subtle", + "syn 1.0.109", + "syn 2.0.32", + "textwrap 0.16.0", + "time", + "time-macros", + "tokio", + "tokio-postgres", + "tokio-stream", + "toml 0.7.8", + "toml_datetime", + "toml_edit 0.19.15", + "tracing", + "trust-dns-proto", + "unicode-bidi", + "unicode-normalization", + "unicode-xid", + "usdt", + "uuid", + "yasna", + "zeroize", + "zip", +] + [[package]] name = "omicron-zone-package" version = "0.8.3" @@ -5498,6 +5648,7 @@ dependencies = [ "futures", "http", "hyper", + "omicron-workspace-hack", "progenitor", "rand 0.8.5", "regress", @@ -5532,6 +5683,7 @@ dependencies = [ "chrono", "num", "omicron-common 0.1.0", + "omicron-workspace-hack", "oximeter-macro-impl 0.1.0", "rstest", "schemars", @@ -5563,6 +5715,7 @@ version = "0.1.0" dependencies = [ "chrono", "omicron-common 0.1.0", + "omicron-workspace-hack", "progenitor", "reqwest", "serde", @@ -5582,6 +5735,7 @@ dependencies = [ "nexus-client 0.1.0", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "openapi-lint", "openapiv3", "oximeter 0.1.0", @@ -5610,6 +5764,7 @@ dependencies = [ "dropshot", "itertools 0.11.0", "omicron-test-utils", + "omicron-workspace-hack", "oximeter 0.1.0", "regex", "reqwest", @@ -5634,6 +5789,7 @@ dependencies = [ "dropshot", "futures", "http", + "omicron-workspace-hack", "oximeter 0.1.0", "tokio", "uuid", @@ -5643,6 +5799,7 @@ dependencies = [ name = "oximeter-macro-impl" version = "0.1.0" dependencies = [ + "omicron-workspace-hack", "proc-macro2", "quote", "syn 2.0.32", @@ -5666,6 +5823,7 @@ dependencies = [ "dropshot", "nexus-client 0.1.0", "omicron-common 0.1.0", + "omicron-workspace-hack", "oximeter 0.1.0", "reqwest", "schemars", @@ -7920,6 +8078,7 @@ dependencies = [ "chrono", "ipnetwork", "omicron-common 0.1.0", + "omicron-workspace-hack", "progenitor", "regress", "reqwest", @@ -7945,6 +8104,7 @@ dependencies = [ "nexus-client 0.1.0", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "rand 0.8.5", "schemars", "serde", @@ -8171,6 +8331,7 @@ dependencies = [ "hex", "omicron-common 0.1.0", "omicron-gateway", + "omicron-workspace-hack", "serde", "slog", "slog-dtrace", @@ -9168,6 +9329,7 @@ dependencies = [ "humantime", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "predicates 3.0.3", "slog", "slog-async", @@ -9196,6 +9358,7 @@ dependencies = [ "itertools 0.11.0", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "rand 0.8.5", "ring", "serde", @@ -9422,6 +9585,7 @@ dependencies = [ "indicatif", "linear-map", "omicron-test-utils", + "omicron-workspace-hack", "owo-colors", "petgraph", "schemars", @@ -9799,6 +9963,7 @@ dependencies = [ "itertools 0.11.0", "omicron-common 0.1.0", "omicron-passwords 0.1.0", + "omicron-workspace-hack", "once_cell", "owo-colors", "proptest", @@ -9834,6 +9999,7 @@ dependencies = [ "anyhow", "gateway-client", "omicron-common 0.1.0", + "omicron-workspace-hack", "schemars", "serde", "serde_json", @@ -9851,6 +10017,7 @@ dependencies = [ "ciborium", "clap 4.4.3", "crossterm 0.27.0", + "omicron-workspace-hack", "ratatui", "reedline", "serde", @@ -9900,6 +10067,7 @@ dependencies = [ "omicron-common 0.1.0", "omicron-passwords 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "openapi-lint", "openapiv3", "rand 0.8.5", @@ -9936,6 +10104,7 @@ dependencies = [ "chrono", "installinator-common", "ipnetwork", + "omicron-workspace-hack", "progenitor", "regress", "reqwest", diff --git a/Cargo.toml b/Cargo.toml index 9498157b28..d660397d9e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -64,6 +64,7 @@ members = [ "wicket", "wicketd-client", "wicketd", + "workspace-hack", ] default-members = [ diff --git a/README.adoc b/README.adoc index 988c1276fd..93d1fa4fb8 100644 --- a/README.adoc +++ b/README.adoc @@ -152,30 +152,17 @@ Many of these components themselves are made up of other packages (e.g., `nexus- Use Cargo's `-p PACKAGE` to check/build/test only the package you're working on. Since people are usually only working on one or two components at a time, you can usually iterate faster this way. -=== Why is Cargo rebuilding stuff all the time? +=== Workspace management -People are often surprised to find Cargo rebuilding stuff that it seems like it's just built, even when the relevant source files haven't changed. +Omicron uses `cargo-hakari` to ensure that all workspace dependencies enable the same set of features. This dramatically improves compilation time when switching between different subsets of packages (e.g. `-p wicket` or `-p nexus-db-model`), because the sets of enabled features remain consistent. -* Say you're iterating on code, running `cargo build -p nexus-db-model` to build just that package. Great, it works. Let's run tests: `cargo nextest run -p nexus-db-model`. Now it's rebuilding some _dependency_ of `nexus-db-model` again?! -* Say you've just run `cargo nextest run -p nexus-db-model`. Now you go run `cargo nextest run -p omicron-nexus`, which uses `nexus-db-model`. You see Cargo building `nexus-db-model` again?! +`cargo hakari` status is checked in CI; if the CI check fails, then update the configuration locally with -This usually has to do with the way Cargo selects package https://doc.rust-lang.org/cargo/reference/features.html[features]. These are essentially tags that are used at build time to include specific code or dependencies. For example, the https://docs.rs/serde/latest/serde/[serde] crate defines a feature called https://docs.rs/crate/serde/latest/features["derive"] that controls whether the `Serialize`/`Deserialize` derive macros will be included. Let's look at how this affects builds. - -TIP: You can use `cargo tree` to inspect a package's dependencies, including features. This is useful for debugging feature-related build issues. - -==== Feature selection differs when building tests - -When you run `cargo build -p nexus-db-model`, Cargo looks at all the packages in the depencency tree of `nexus-db-model` and figures out what features it needs for each one. Let's take the `uuid` package as an example. Cargo takes https://doc.rust-lang.org/cargo/reference/features.html#feature-unification[union of the features required by any of the packages that depend on `uuid` in the whole dependency tree of `nexus-db-model`]. Let's say that's just the "v4" feature. Simple enough. - -When you then run `cargo nextest run -p nexus-db-model`, it does the same thing. Only this time, it's looking at the `dev-dependencies` tree. `nexus-db-model` 's dev-dependencies might include some other package that depends on `uuid` and requires the "v5" feature. Now, Cargo has to rebuild `uuid` -- and anything else that depends on it. - -This is why when using Cargo's check/build/clippy commands, we suggest using `--all-targets`. When you use `cargo build --all-targets`, it builds the tests as well. It's usually not much more time and avoids extra rebuilds when switching back and forth between the default targets and the targets with tests included. - -==== Feature selection differs when building different packages - -People run into a similar problem when switching packages within Omicron. Once you've got `cargo nextest run -p nexus-db-model` working, you may run `cargo nextest run -p omicron-nexus`, which uses `nexus-db-model`. And you may be surprised to see Cargo rebuilding some common dependency like `uuid`. It's the same as above: we're building a different package now. It has a different (larger) dependency tree. That may result in some crate deep in the dependency tree needing some new feature, causing it and all of its dependents to be rebuilt. - -NOTE: https://github.com/rust-lang/cargo/issues/4463[There is interest in changing the way feature selection works in workspaces like Omicron for exactly this reason.] It's been suggested to have an option for Cargo to always look at the features required for all packages in the workspace, rather than just the one you've selected. This could eliminate this particular problem. In the meantime, we mitigate this with heavy use of https://doc.rust-lang.org/cargo/reference/workspaces.html#the-dependencies-table[workspace dependencies], which helps make sure that different packages _within_ Omicron depend on the same set of features for a given dependency. +``` +cargo install cargo-hakari --locked # only needed on the first run +cargo hakari generate +cargo hakari manage-deps +``` === Why am I getting compile errors after I thought I'd already built everything? diff --git a/api_identity/Cargo.toml b/api_identity/Cargo.toml index 761e5e3017..9faf2a1878 100644 --- a/api_identity/Cargo.toml +++ b/api_identity/Cargo.toml @@ -14,3 +14,4 @@ proc-macro = true proc-macro2.workspace = true quote.workspace = true syn.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/bootstore/Cargo.toml b/bootstore/Cargo.toml index d916bf80c7..eefe05c8d6 100644 --- a/bootstore/Cargo.toml +++ b/bootstore/Cargo.toml @@ -36,6 +36,7 @@ zeroize.workspace = true # utils`. Unfortunately, it doesn't appear possible to put the `pq-sys` dep # only in `[dev-dependencies]`. pq-sys = "*" +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] assert_matches.workspace = true diff --git a/bootstrap-agent-client/Cargo.toml b/bootstrap-agent-client/Cargo.toml index f7d3ad2db6..17989a5c5f 100644 --- a/bootstrap-agent-client/Cargo.toml +++ b/bootstrap-agent-client/Cargo.toml @@ -17,3 +17,4 @@ serde.workspace = true sled-hardware.workspace = true slog.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/caboose-util/Cargo.toml b/caboose-util/Cargo.toml index 195bbfd5d7..253d54643d 100644 --- a/caboose-util/Cargo.toml +++ b/caboose-util/Cargo.toml @@ -7,3 +7,4 @@ license = "MPL-2.0" [dependencies] anyhow.workspace = true hubtools.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/certificates/Cargo.toml b/certificates/Cargo.toml index 29c4a8bc2e..d20d257e4c 100644 --- a/certificates/Cargo.toml +++ b/certificates/Cargo.toml @@ -12,6 +12,7 @@ openssl-sys.workspace = true thiserror.workspace = true omicron-common.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] omicron-test-utils.workspace = true diff --git a/common/Cargo.toml b/common/Cargo.toml index 492b6ca860..bda88d0d43 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -40,6 +40,7 @@ toml.workspace = true uuid.workspace = true parse-display.workspace = true progenitor.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] camino-tempfile.workspace = true diff --git a/crdb-seed/Cargo.toml b/crdb-seed/Cargo.toml index 01af7cb1d7..fa71fe7e8a 100644 --- a/crdb-seed/Cargo.toml +++ b/crdb-seed/Cargo.toml @@ -13,3 +13,4 @@ omicron-test-utils.workspace = true ring.workspace = true slog.workspace = true tokio.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/ddm-admin-client/Cargo.toml b/ddm-admin-client/Cargo.toml index 6e9ee930a0..3814446b3e 100644 --- a/ddm-admin-client/Cargo.toml +++ b/ddm-admin-client/Cargo.toml @@ -15,6 +15,7 @@ tokio.workspace = true omicron-common.workspace = true sled-hardware.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [build-dependencies] anyhow.workspace = true diff --git a/deploy/Cargo.toml b/deploy/Cargo.toml index 1b8e6a92d8..17bacd6354 100644 --- a/deploy/Cargo.toml +++ b/deploy/Cargo.toml @@ -14,6 +14,7 @@ serde.workspace = true serde_derive.workspace = true thiserror.workspace = true toml.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [[bin]] name = "thing-flinger" diff --git a/dev-tools/omdb/Cargo.toml b/dev-tools/omdb/Cargo.toml index c9ebbe35ad..5b2adde1b2 100644 --- a/dev-tools/omdb/Cargo.toml +++ b/dev-tools/omdb/Cargo.toml @@ -32,6 +32,7 @@ tabled.workspace = true textwrap.workspace = true tokio = { workspace = true, features = [ "full" ] } uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [dev-dependencies] expectorate.workspace = true diff --git a/dev-tools/omicron-dev/Cargo.toml b/dev-tools/omicron-dev/Cargo.toml index 2061489cbb..95da4d42ef 100644 --- a/dev-tools/omicron-dev/Cargo.toml +++ b/dev-tools/omicron-dev/Cargo.toml @@ -28,6 +28,7 @@ signal-hook-tokio.workspace = true tokio = { workspace = true, features = [ "full" ] } tokio-postgres.workspace = true toml.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [dev-dependencies] camino-tempfile.workspace = true diff --git a/dev-tools/xtask/src/main.rs b/dev-tools/xtask/src/main.rs index a64c87a570..3e52d742f5 100644 --- a/dev-tools/xtask/src/main.rs +++ b/dev-tools/xtask/src/main.rs @@ -78,6 +78,8 @@ fn cmd_clippy() -> Result<()> { Ok(()) } +const WORKSPACE_HACK_PACKAGE_NAME: &str = "omicron-workspace-hack"; + fn cmd_check_workspace_deps() -> Result<()> { // Ignore issues with "pq-sys". See the omicron-rpaths package for details. const EXCLUDED: &[&'static str] = &["pq-sys"]; @@ -97,6 +99,12 @@ fn cmd_check_workspace_deps() -> Result<()> { // Iterate the workspace packages and fill out the maps above. for pkg_info in workspace.workspace_packages() { + if pkg_info.name == WORKSPACE_HACK_PACKAGE_NAME { + // Skip over workspace-hack because hakari doesn't yet support + // workspace deps: https://github.com/guppy-rs/guppy/issues/7 + continue; + } + let manifest_path = &pkg_info.manifest_path; let manifest = read_cargo_toml(manifest_path)?; for tree in [ @@ -125,6 +133,12 @@ fn cmd_check_workspace_deps() -> Result<()> { } } + if name == WORKSPACE_HACK_PACKAGE_NAME { + // Skip over workspace-hack because hakari doesn't yet support + // workspace deps: https://github.com/guppy-rs/guppy/issues/7 + continue; + } + non_workspace_dependencies .entry(name.to_owned()) .or_insert_with(Vec::new) diff --git a/dns-server/Cargo.toml b/dns-server/Cargo.toml index 243876a5a2..d7606dcff5 100644 --- a/dns-server/Cargo.toml +++ b/dns-server/Cargo.toml @@ -30,6 +30,7 @@ trust-dns-proto.workspace = true trust-dns-resolver.workspace = true trust-dns-server.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] expectorate.workspace = true diff --git a/dns-service-client/Cargo.toml b/dns-service-client/Cargo.toml index 7f5cf63d6a..e351d90da2 100644 --- a/dns-service-client/Cargo.toml +++ b/dns-service-client/Cargo.toml @@ -14,3 +14,4 @@ serde.workspace = true serde_json.workspace = true slog.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/dpd-client/Cargo.toml b/dpd-client/Cargo.toml index fdbdcd07af..26807f7d79 100644 --- a/dpd-client/Cargo.toml +++ b/dpd-client/Cargo.toml @@ -17,6 +17,7 @@ ipnetwork.workspace = true http.workspace = true schemars.workspace = true rand.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [build-dependencies] anyhow.workspace = true diff --git a/end-to-end-tests/Cargo.toml b/end-to-end-tests/Cargo.toml index a0e099b756..5ff0f9b377 100644 --- a/end-to-end-tests/Cargo.toml +++ b/end-to-end-tests/Cargo.toml @@ -24,3 +24,4 @@ tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } toml.workspace = true trust-dns-resolver.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/gateway-cli/Cargo.toml b/gateway-cli/Cargo.toml index d5083d1999..0d179750ea 100644 --- a/gateway-cli/Cargo.toml +++ b/gateway-cli/Cargo.toml @@ -24,3 +24,4 @@ uuid.workspace = true gateway-client.workspace = true gateway-messages.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/gateway-client/Cargo.toml b/gateway-client/Cargo.toml index 81d1630a1d..96a1eb221f 100644 --- a/gateway-client/Cargo.toml +++ b/gateway-client/Cargo.toml @@ -15,3 +15,4 @@ serde_json.workspace = true schemars.workspace = true slog.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/gateway-test-utils/Cargo.toml b/gateway-test-utils/Cargo.toml index 8f6e14d68a..9d80e63f05 100644 --- a/gateway-test-utils/Cargo.toml +++ b/gateway-test-utils/Cargo.toml @@ -14,3 +14,4 @@ slog.workspace = true sp-sim.workspace = true tokio.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/gateway/Cargo.toml b/gateway/Cargo.toml index 307baa3f27..f5abce88e9 100644 --- a/gateway/Cargo.toml +++ b/gateway/Cargo.toml @@ -34,6 +34,7 @@ tokio-tungstenite.workspace = true tokio-util.workspace = true toml.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] expectorate.workspace = true diff --git a/illumos-utils/Cargo.toml b/illumos-utils/Cargo.toml index 3c0c2e7fc9..e292097bc5 100644 --- a/illumos-utils/Cargo.toml +++ b/illumos-utils/Cargo.toml @@ -29,6 +29,7 @@ zone.workspace = true # only enabled via the `testing` feature mockall = { workspace = true, optional = true } +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [target.'cfg(target_os = "illumos")'.dependencies] opte-ioctl.workspace = true diff --git a/installinator-artifact-client/Cargo.toml b/installinator-artifact-client/Cargo.toml index ddbc106ee8..18447b8e83 100644 --- a/installinator-artifact-client/Cargo.toml +++ b/installinator-artifact-client/Cargo.toml @@ -15,3 +15,4 @@ serde_json.workspace = true slog.workspace = true update-engine.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/installinator-artifactd/Cargo.toml b/installinator-artifactd/Cargo.toml index 3ce6112165..9318b725db 100644 --- a/installinator-artifactd/Cargo.toml +++ b/installinator-artifactd/Cargo.toml @@ -20,6 +20,7 @@ uuid.workspace = true installinator-common.workspace = true omicron-common.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] expectorate.workspace = true diff --git a/installinator-common/Cargo.toml b/installinator-common/Cargo.toml index ff664f28a3..0f1bf86901 100644 --- a/installinator-common/Cargo.toml +++ b/installinator-common/Cargo.toml @@ -15,3 +15,4 @@ serde_json.workspace = true serde_with.workspace = true thiserror.workspace = true update-engine.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/installinator/Cargo.toml b/installinator/Cargo.toml index c0e7625e6e..3b2f04c38f 100644 --- a/installinator/Cargo.toml +++ b/installinator/Cargo.toml @@ -42,6 +42,7 @@ toml.workspace = true tufaceous-lib.workspace = true update-engine.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] omicron-test-utils.workspace = true diff --git a/internal-dns-cli/Cargo.toml b/internal-dns-cli/Cargo.toml index d922544722..fb5780d22a 100644 --- a/internal-dns-cli/Cargo.toml +++ b/internal-dns-cli/Cargo.toml @@ -13,3 +13,4 @@ omicron-common.workspace = true slog.workspace = true tokio.workspace = true trust-dns-resolver.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/internal-dns/Cargo.toml b/internal-dns/Cargo.toml index 5ead1cc8a4..d680ab3ce1 100644 --- a/internal-dns/Cargo.toml +++ b/internal-dns/Cargo.toml @@ -17,6 +17,7 @@ thiserror.workspace = true trust-dns-proto.workspace = true trust-dns-resolver.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] assert_matches.workspace = true diff --git a/ipcc-key-value/Cargo.toml b/ipcc-key-value/Cargo.toml index a3f17cea52..128fde9a01 100644 --- a/ipcc-key-value/Cargo.toml +++ b/ipcc-key-value/Cargo.toml @@ -11,6 +11,7 @@ omicron-common.workspace = true serde.workspace = true thiserror.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] omicron-common = { workspace = true, features = ["testing"] } diff --git a/key-manager/Cargo.toml b/key-manager/Cargo.toml index 7954a977a3..69ae3b25bd 100644 --- a/key-manager/Cargo.toml +++ b/key-manager/Cargo.toml @@ -14,4 +14,5 @@ slog.workspace = true thiserror.workspace = true tokio.workspace = true zeroize.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/nexus-client/Cargo.toml b/nexus-client/Cargo.toml index 589562c930..d59c013992 100644 --- a/nexus-client/Cargo.toml +++ b/nexus-client/Cargo.toml @@ -18,3 +18,4 @@ serde.workspace = true serde_json.workspace = true slog.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 1a09f07f6c..91872e2c32 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -90,6 +90,7 @@ oximeter.workspace = true oximeter-instruments = { workspace = true, features = ["http-instruments"] } oximeter-producer.workspace = true rustls = { workspace = true } +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] async-bb8-diesel.workspace = true diff --git a/nexus/authz-macros/Cargo.toml b/nexus/authz-macros/Cargo.toml index 40303b2e34..3d55afa477 100644 --- a/nexus/authz-macros/Cargo.toml +++ b/nexus/authz-macros/Cargo.toml @@ -14,3 +14,4 @@ quote.workspace = true serde.workspace = true serde_tokenstream.workspace = true syn.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } diff --git a/nexus/db-macros/Cargo.toml b/nexus/db-macros/Cargo.toml index 3fb228f26c..ce206bb56e 100644 --- a/nexus/db-macros/Cargo.toml +++ b/nexus/db-macros/Cargo.toml @@ -15,6 +15,7 @@ quote.workspace = true serde.workspace = true serde_tokenstream.workspace = true syn = { workspace = true, features = ["extra-traits"] } +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [dev-dependencies] rustfmt-wrapper.workspace = true diff --git a/nexus/db-model/Cargo.toml b/nexus/db-model/Cargo.toml index dc83670725..aedbb9168b 100644 --- a/nexus/db-model/Cargo.toml +++ b/nexus/db-model/Cargo.toml @@ -36,6 +36,7 @@ nexus-defaults.workspace = true nexus-types.workspace = true omicron-passwords.workspace = true sled-agent-client.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [dev-dependencies] expectorate.workspace = true diff --git a/nexus/db-queries/Cargo.toml b/nexus/db-queries/Cargo.toml index a8256cb60a..af01c1732b 100644 --- a/nexus/db-queries/Cargo.toml +++ b/nexus/db-queries/Cargo.toml @@ -63,6 +63,7 @@ nexus-types.workspace = true omicron-common.workspace = true omicron-passwords.workspace = true oximeter.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [dev-dependencies] assert_matches.workspace = true diff --git a/nexus/defaults/Cargo.toml b/nexus/defaults/Cargo.toml index 910ae2afd6..09a95fa839 100644 --- a/nexus/defaults/Cargo.toml +++ b/nexus/defaults/Cargo.toml @@ -11,3 +11,4 @@ rand.workspace = true serde_json.workspace = true omicron-common.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } diff --git a/nexus/test-interface/Cargo.toml b/nexus/test-interface/Cargo.toml index 44c894411b..e0743e84bc 100644 --- a/nexus/test-interface/Cargo.toml +++ b/nexus/test-interface/Cargo.toml @@ -12,3 +12,4 @@ nexus-types.workspace = true omicron-common.workspace = true slog.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } diff --git a/nexus/test-utils-macros/Cargo.toml b/nexus/test-utils-macros/Cargo.toml index 4f9d3eca32..1bfa25017a 100644 --- a/nexus/test-utils-macros/Cargo.toml +++ b/nexus/test-utils-macros/Cargo.toml @@ -11,3 +11,4 @@ proc-macro = true proc-macro2.workspace = true quote.workspace = true syn = { workspace = true, features = [ "fold", "parsing" ] } +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } diff --git a/nexus/test-utils/Cargo.toml b/nexus/test-utils/Cargo.toml index bad225516a..a2e7600e93 100644 --- a/nexus/test-utils/Cargo.toml +++ b/nexus/test-utils/Cargo.toml @@ -38,3 +38,4 @@ tempfile.workspace = true trust-dns-proto.workspace = true trust-dns-resolver.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index c0f175cf31..f7ffafec52 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -25,3 +25,4 @@ api_identity.workspace = true dns-service-client.workspace = true omicron-common.workspace = true omicron-passwords.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } diff --git a/oxide-client/Cargo.toml b/oxide-client/Cargo.toml index 0602066e6d..df34ab9721 100644 --- a/oxide-client/Cargo.toml +++ b/oxide-client/Cargo.toml @@ -21,3 +21,4 @@ thiserror.workspace = true tokio = { workspace = true, features = [ "net" ] } trust-dns-resolver.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/oximeter-client/Cargo.toml b/oximeter-client/Cargo.toml index e4e68464d7..297dfb6c92 100644 --- a/oximeter-client/Cargo.toml +++ b/oximeter-client/Cargo.toml @@ -12,3 +12,4 @@ reqwest = { workspace = true, features = ["json", "rustls-tls", "stream"] } serde.workspace = true slog.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/oximeter/collector/Cargo.toml b/oximeter/collector/Cargo.toml index 1137651aa0..c8c4030dba 100644 --- a/oximeter/collector/Cargo.toml +++ b/oximeter/collector/Cargo.toml @@ -22,6 +22,7 @@ thiserror.workspace = true tokio.workspace = true toml.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [dev-dependencies] expectorate.workspace = true diff --git a/oximeter/db/Cargo.toml b/oximeter/db/Cargo.toml index 9ff4ac5c06..77bce09db9 100644 --- a/oximeter/db/Cargo.toml +++ b/oximeter/db/Cargo.toml @@ -25,6 +25,7 @@ thiserror.workspace = true tokio = { workspace = true, features = [ "rt-multi-thread", "macros" ] } usdt.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [dev-dependencies] itertools.workspace = true diff --git a/oximeter/instruments/Cargo.toml b/oximeter/instruments/Cargo.toml index 98f8f3b5b2..4adff0463a 100644 --- a/oximeter/instruments/Cargo.toml +++ b/oximeter/instruments/Cargo.toml @@ -12,6 +12,7 @@ oximeter.workspace = true tokio.workspace = true http = { workspace = true, optional = true } uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [features] default = ["http-instruments"] diff --git a/oximeter/oximeter-macro-impl/Cargo.toml b/oximeter/oximeter-macro-impl/Cargo.toml index c38d85ed2d..ff116e1c9d 100644 --- a/oximeter/oximeter-macro-impl/Cargo.toml +++ b/oximeter/oximeter-macro-impl/Cargo.toml @@ -12,3 +12,4 @@ proc-macro = true proc-macro2.workspace = true quote.workspace = true syn = { workspace = true, features = [ "full", "extra-traits" ] } +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } diff --git a/oximeter/oximeter/Cargo.toml b/oximeter/oximeter/Cargo.toml index f0549548a6..b2aa15f85e 100644 --- a/oximeter/oximeter/Cargo.toml +++ b/oximeter/oximeter/Cargo.toml @@ -15,6 +15,7 @@ schemars = { workspace = true, features = [ "uuid1", "bytes", "chrono" ] } serde.workspace = true thiserror.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [dev-dependencies] approx.workspace = true diff --git a/oximeter/producer/Cargo.toml b/oximeter/producer/Cargo.toml index e511294e52..f171f57e8a 100644 --- a/oximeter/producer/Cargo.toml +++ b/oximeter/producer/Cargo.toml @@ -19,3 +19,4 @@ slog-dtrace.workspace = true tokio.workspace = true thiserror.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } diff --git a/package/Cargo.toml b/package/Cargo.toml index 7c786b77ef..9fc4610020 100644 --- a/package/Cargo.toml +++ b/package/Cargo.toml @@ -34,6 +34,7 @@ tokio = { workspace = true, features = [ "full" ] } toml.workspace = true topological-sort.workspace = true walkdir.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] expectorate.workspace = true diff --git a/passwords/Cargo.toml b/passwords/Cargo.toml index 1731716101..cbd569ef4c 100644 --- a/passwords/Cargo.toml +++ b/passwords/Cargo.toml @@ -11,6 +11,7 @@ thiserror.workspace = true schemars.workspace = true serde.workspace = true serde_with.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] argon2alt = { package = "rust-argon2", version = "1.0" } diff --git a/rpaths/Cargo.toml b/rpaths/Cargo.toml index 829b4ffe28..7671be4968 100644 --- a/rpaths/Cargo.toml +++ b/rpaths/Cargo.toml @@ -5,3 +5,4 @@ edition = "2021" license = "MPL-2.0" [dependencies] +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/sled-agent-client/Cargo.toml b/sled-agent-client/Cargo.toml index f6c58fbf2b..01c1032a51 100644 --- a/sled-agent-client/Cargo.toml +++ b/sled-agent-client/Cargo.toml @@ -15,3 +15,4 @@ reqwest = { workspace = true, features = [ "json", "rustls-tls", "stream" ] } serde.workspace = true slog.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/sled-agent/Cargo.toml b/sled-agent/Cargo.toml index f172136726..b131698395 100644 --- a/sled-agent/Cargo.toml +++ b/sled-agent/Cargo.toml @@ -76,6 +76,7 @@ uuid.workspace = true zeroize.workspace = true zone.workspace = true static_assertions.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [target.'cfg(target_os = "illumos")'.dependencies] opte-ioctl.workspace = true diff --git a/sled-hardware/Cargo.toml b/sled-hardware/Cargo.toml index c6bc09f41e..880f93441c 100644 --- a/sled-hardware/Cargo.toml +++ b/sled-hardware/Cargo.toml @@ -24,6 +24,7 @@ thiserror.workspace = true tofino.workspace = true tokio.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [target.'cfg(target_os = "illumos")'.dependencies] illumos-devinfo = { git = "https://github.com/oxidecomputer/illumos-devinfo", branch = "main" } diff --git a/sp-sim/Cargo.toml b/sp-sim/Cargo.toml index 5a73f46d9e..2a1ae19468 100644 --- a/sp-sim/Cargo.toml +++ b/sp-sim/Cargo.toml @@ -21,6 +21,7 @@ sprockets-rot.workspace = true thiserror.workspace = true tokio = { workspace = true, features = [ "full" ] } toml.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [[bin]] name = "sp-sim" diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index 09ff12a806..a0227a4de2 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -25,6 +25,7 @@ usdt.workspace = true rcgen.workspace = true regex.workspace = true reqwest.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] expectorate.workspace = true diff --git a/tufaceous-lib/Cargo.toml b/tufaceous-lib/Cargo.toml index e4799a69a4..8b5c4fa7ca 100644 --- a/tufaceous-lib/Cargo.toml +++ b/tufaceous-lib/Cargo.toml @@ -32,6 +32,7 @@ toml.workspace = true tough.workspace = true url = "2.4.1" zip.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] omicron-test-utils.workspace = true diff --git a/tufaceous/Cargo.toml b/tufaceous/Cargo.toml index 09772daef4..f3e3b815d2 100644 --- a/tufaceous/Cargo.toml +++ b/tufaceous/Cargo.toml @@ -18,6 +18,7 @@ slog-async.workspace = true slog-envlogger.workspace = true slog-term.workspace = true tufaceous-lib.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] assert_cmd.workspace = true diff --git a/update-engine/Cargo.toml b/update-engine/Cargo.toml index 4c2841cf0f..25ade83f34 100644 --- a/update-engine/Cargo.toml +++ b/update-engine/Cargo.toml @@ -21,6 +21,7 @@ schemars = { workspace = true, features = ["uuid1"] } slog.workspace = true tokio = { workspace = true, features = ["macros", "sync", "time", "rt-multi-thread"] } uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] buf-list.workspace = true diff --git a/wicket-common/Cargo.toml b/wicket-common/Cargo.toml index 735f4a758e..229561cd38 100644 --- a/wicket-common/Cargo.toml +++ b/wicket-common/Cargo.toml @@ -13,3 +13,4 @@ serde.workspace = true serde_json.workspace = true thiserror.workspace = true update-engine.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/wicket-dbg/Cargo.toml b/wicket-dbg/Cargo.toml index 1aa8e10171..bc22424c69 100644 --- a/wicket-dbg/Cargo.toml +++ b/wicket-dbg/Cargo.toml @@ -22,6 +22,7 @@ wicket.workspace = true # used only by wicket-dbg binary reedline = "0.23.0" +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [[bin]] name = "wicket-dbg" diff --git a/wicket/Cargo.toml b/wicket/Cargo.toml index d2004e0a68..58605c8037 100644 --- a/wicket/Cargo.toml +++ b/wicket/Cargo.toml @@ -46,6 +46,7 @@ omicron-passwords.workspace = true update-engine.workspace = true wicket-common.workspace = true wicketd-client.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] assert_cmd.workspace = true diff --git a/wicketd-client/Cargo.toml b/wicketd-client/Cargo.toml index 69a7f8fae4..2d959f1f8d 100644 --- a/wicketd-client/Cargo.toml +++ b/wicketd-client/Cargo.toml @@ -18,3 +18,4 @@ slog.workspace = true update-engine.workspace = true uuid.workspace = true wicket-common.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/wicketd/Cargo.toml b/wicketd/Cargo.toml index 8f4faf6c40..a36344b6fb 100644 --- a/wicketd/Cargo.toml +++ b/wicketd/Cargo.toml @@ -53,6 +53,7 @@ sled-hardware.workspace = true tufaceous-lib.workspace = true update-engine.workspace = true wicket-common.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [[bin]] name = "wicketd" diff --git a/workspace-hack/.gitattributes b/workspace-hack/.gitattributes new file mode 100644 index 0000000000..3e9dba4b64 --- /dev/null +++ b/workspace-hack/.gitattributes @@ -0,0 +1,4 @@ +# Avoid putting conflict markers in the generated Cargo.toml file, since their presence breaks +# Cargo. +# Also do not check out the file as CRLF on Windows, as that's what hakari needs. +Cargo.toml merge=binary -crlf diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml new file mode 100644 index 0000000000..d3e00b1831 --- /dev/null +++ b/workspace-hack/Cargo.toml @@ -0,0 +1,261 @@ +# This file is generated by `cargo hakari`. +# To regenerate, run: +# cargo hakari generate + +[package] +name = "omicron-workspace-hack" +version = "0.1.0" +description = "workspace-hack package, managed by hakari" +# You can choose to publish this crate: see https://docs.rs/cargo-hakari/latest/cargo_hakari/publishing. +publish = false + +# The parts of the file between the BEGIN HAKARI SECTION and END HAKARI SECTION comments +# are managed by hakari. + +### BEGIN HAKARI SECTION +[dependencies] +anyhow = { version = "1", features = ["backtrace"] } +bit-set = { version = "0.5" } +bit-vec = { version = "0.6" } +bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1" } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["serde"] } +bitvec = { version = "1" } +bstr-6f8ce4dd05d13bba = { package = "bstr", version = "0.2" } +bstr-dff4ba8e3ae991db = { package = "bstr", version = "1" } +bytes = { version = "1", features = ["serde"] } +chrono = { version = "0.4", features = ["alloc", "serde"] } +cipher = { version = "0.4", default-features = false, features = ["block-padding", "zeroize"] } +clap = { version = "4", features = ["derive", "env", "wrap_help"] } +clap_builder = { version = "4", default-features = false, features = ["color", "env", "std", "suggestions", "usage", "wrap_help"] } +console = { version = "0.15" } +const-oid = { version = "0.9", default-features = false, features = ["db", "std"] } +crossbeam-epoch = { version = "0.9" } +crossbeam-utils = { version = "0.8" } +crypto-common = { version = "0.1", default-features = false, features = ["getrandom", "std"] } +diesel = { version = "2", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } +digest = { version = "0.10", features = ["mac", "oid", "std"] } +either = { version = "1" } +flate2 = { version = "1" } +futures = { version = "0.3" } +futures-channel = { version = "0.3", features = ["sink"] } +futures-core = { version = "0.3" } +futures-io = { version = "0.3", default-features = false, features = ["std"] } +futures-sink = { version = "0.3" } +futures-task = { version = "0.3", default-features = false, features = ["std"] } +futures-util = { version = "0.3", features = ["channel", "io", "sink"] } +gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "1e180ae55e56bd17af35cb868ffbd18ce487351d", features = ["std"] } +generic-array = { version = "0.14", default-features = false, features = ["more_lengths", "zeroize"] } +getrandom = { version = "0.2", default-features = false, features = ["js", "rdrand", "std"] } +hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14", features = ["raw"] } +hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13" } +hex = { version = "0.4", features = ["serde"] } +hyper = { version = "0.14", features = ["full"] } +indexmap = { version = "2", features = ["serde"] } +inout = { version = "0.1", default-features = false, features = ["std"] } +ipnetwork = { version = "0.20", features = ["schemars"] } +itertools = { version = "0.10" } +lalrpop-util = { version = "0.19" } +lazy_static = { version = "1", default-features = false, features = ["spin_no_std"] } +libc = { version = "0.2", features = ["extra_traits"] } +log = { version = "0.4", default-features = false, features = ["std"] } +managed = { version = "0.8", default-features = false, features = ["alloc", "map"] } +memchr = { version = "2" } +num-bigint = { version = "0.4", features = ["rand"] } +num-integer = { version = "0.1", features = ["i128"] } +num-iter = { version = "0.1", default-features = false, features = ["i128"] } +num-traits = { version = "0.2", features = ["i128", "libm"] } +openapiv3 = { version = "1", default-features = false, features = ["skip_serializing_defaults"] } +petgraph = { version = "0.6", features = ["serde-1"] } +postgres-types = { version = "0.2", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } +ppv-lite86 = { version = "0.2", default-features = false, features = ["simd", "std"] } +predicates = { version = "3" } +rand = { version = "0.8", features = ["min_const_gen"] } +rand_chacha = { version = "0.3" } +regex = { version = "1" } +regex-automata = { version = "0.3", default-features = false, features = ["dfa-onepass", "dfa-search", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } +regex-syntax = { version = "0.7" } +reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls", "stream"] } +ring = { version = "0.16", features = ["std"] } +schemars = { version = "0.8", features = ["bytes", "chrono", "uuid1"] } +semver = { version = "1", features = ["serde"] } +serde = { version = "1", features = ["alloc", "derive", "rc"] } +sha2 = { version = "0.10", features = ["oid"] } +signature = { version = "2", default-features = false, features = ["digest", "rand_core", "std"] } +similar = { version = "2", features = ["inline", "unicode"] } +slog = { version = "2", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } +spin = { version = "0.9" } +string_cache = { version = "0.8" } +subtle = { version = "2" } +syn-dff4ba8e3ae991db = { package = "syn", version = "1", features = ["extra-traits", "fold", "full", "visit"] } +syn-f595c2ba2a3f28df = { package = "syn", version = "2", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +textwrap = { version = "0.16" } +time = { version = "0.3", features = ["formatting", "local-offset", "macros", "parsing"] } +tokio = { version = "1", features = ["full", "test-util"] } +tokio-postgres = { version = "0.7", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } +tokio-stream = { version = "0.1", features = ["net"] } +toml = { version = "0.7" } +toml_datetime = { version = "0.6", default-features = false, features = ["serde"] } +toml_edit = { version = "0.19", features = ["serde"] } +tracing = { version = "0.1", features = ["log"] } +trust-dns-proto = { version = "0.22" } +unicode-bidi = { version = "0.3" } +unicode-normalization = { version = "0.1" } +usdt = { version = "0.3" } +uuid = { version = "1", features = ["serde", "v4"] } +yasna = { version = "0.5", features = ["bit-vec", "num-bigint", "std", "time"] } +zeroize = { version = "1", features = ["std", "zeroize_derive"] } +zip = { version = "0.6", default-features = false, features = ["bzip2", "deflate"] } + +[build-dependencies] +anyhow = { version = "1", features = ["backtrace"] } +bit-set = { version = "0.5" } +bit-vec = { version = "0.6" } +bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1" } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["serde"] } +bitvec = { version = "1" } +bstr-6f8ce4dd05d13bba = { package = "bstr", version = "0.2" } +bstr-dff4ba8e3ae991db = { package = "bstr", version = "1" } +bytes = { version = "1", features = ["serde"] } +cc = { version = "1", default-features = false, features = ["parallel"] } +chrono = { version = "0.4", features = ["alloc", "serde"] } +cipher = { version = "0.4", default-features = false, features = ["block-padding", "zeroize"] } +clap = { version = "4", features = ["derive", "env", "wrap_help"] } +clap_builder = { version = "4", default-features = false, features = ["color", "env", "std", "suggestions", "usage", "wrap_help"] } +console = { version = "0.15" } +const-oid = { version = "0.9", default-features = false, features = ["db", "std"] } +crossbeam-epoch = { version = "0.9" } +crossbeam-utils = { version = "0.8" } +crypto-common = { version = "0.1", default-features = false, features = ["getrandom", "std"] } +diesel = { version = "2", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } +digest = { version = "0.10", features = ["mac", "oid", "std"] } +either = { version = "1" } +flate2 = { version = "1" } +futures = { version = "0.3" } +futures-channel = { version = "0.3", features = ["sink"] } +futures-core = { version = "0.3" } +futures-io = { version = "0.3", default-features = false, features = ["std"] } +futures-sink = { version = "0.3" } +futures-task = { version = "0.3", default-features = false, features = ["std"] } +futures-util = { version = "0.3", features = ["channel", "io", "sink"] } +gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "1e180ae55e56bd17af35cb868ffbd18ce487351d", features = ["std"] } +generic-array = { version = "0.14", default-features = false, features = ["more_lengths", "zeroize"] } +getrandom = { version = "0.2", default-features = false, features = ["js", "rdrand", "std"] } +hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14", features = ["raw"] } +hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13" } +hex = { version = "0.4", features = ["serde"] } +hyper = { version = "0.14", features = ["full"] } +indexmap = { version = "2", features = ["serde"] } +inout = { version = "0.1", default-features = false, features = ["std"] } +ipnetwork = { version = "0.20", features = ["schemars"] } +itertools = { version = "0.10" } +lalrpop-util = { version = "0.19" } +lazy_static = { version = "1", default-features = false, features = ["spin_no_std"] } +libc = { version = "0.2", features = ["extra_traits"] } +log = { version = "0.4", default-features = false, features = ["std"] } +managed = { version = "0.8", default-features = false, features = ["alloc", "map"] } +memchr = { version = "2" } +num-bigint = { version = "0.4", features = ["rand"] } +num-integer = { version = "0.1", features = ["i128"] } +num-iter = { version = "0.1", default-features = false, features = ["i128"] } +num-traits = { version = "0.2", features = ["i128", "libm"] } +openapiv3 = { version = "1", default-features = false, features = ["skip_serializing_defaults"] } +petgraph = { version = "0.6", features = ["serde-1"] } +postgres-types = { version = "0.2", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } +ppv-lite86 = { version = "0.2", default-features = false, features = ["simd", "std"] } +predicates = { version = "3" } +rand = { version = "0.8", features = ["min_const_gen"] } +rand_chacha = { version = "0.3" } +regex = { version = "1" } +regex-automata = { version = "0.3", default-features = false, features = ["dfa-onepass", "dfa-search", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } +regex-syntax = { version = "0.7" } +reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls", "stream"] } +ring = { version = "0.16", features = ["std"] } +schemars = { version = "0.8", features = ["bytes", "chrono", "uuid1"] } +semver = { version = "1", features = ["serde"] } +serde = { version = "1", features = ["alloc", "derive", "rc"] } +sha2 = { version = "0.10", features = ["oid"] } +signature = { version = "2", default-features = false, features = ["digest", "rand_core", "std"] } +similar = { version = "2", features = ["inline", "unicode"] } +slog = { version = "2", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } +spin = { version = "0.9" } +string_cache = { version = "0.8" } +subtle = { version = "2" } +syn-dff4ba8e3ae991db = { package = "syn", version = "1", features = ["extra-traits", "fold", "full", "visit"] } +syn-f595c2ba2a3f28df = { package = "syn", version = "2", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +textwrap = { version = "0.16" } +time = { version = "0.3", features = ["formatting", "local-offset", "macros", "parsing"] } +time-macros = { version = "0.2", default-features = false, features = ["formatting", "parsing"] } +tokio = { version = "1", features = ["full", "test-util"] } +tokio-postgres = { version = "0.7", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } +tokio-stream = { version = "0.1", features = ["net"] } +toml = { version = "0.7" } +toml_datetime = { version = "0.6", default-features = false, features = ["serde"] } +toml_edit = { version = "0.19", features = ["serde"] } +tracing = { version = "0.1", features = ["log"] } +trust-dns-proto = { version = "0.22" } +unicode-bidi = { version = "0.3" } +unicode-normalization = { version = "0.1" } +unicode-xid = { version = "0.2" } +usdt = { version = "0.3" } +uuid = { version = "1", features = ["serde", "v4"] } +yasna = { version = "0.5", features = ["bit-vec", "num-bigint", "std", "time"] } +zeroize = { version = "1", features = ["std", "zeroize_derive"] } +zip = { version = "0.6", default-features = false, features = ["bzip2", "deflate"] } + +[target.x86_64-unknown-linux-gnu.dependencies] +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24" } +mio = { version = "0.8", features = ["net", "os-ext"] } +once_cell = { version = "1", features = ["unstable"] } +rustix = { version = "0.38", features = ["fs", "termios"] } + +[target.x86_64-unknown-linux-gnu.build-dependencies] +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24" } +mio = { version = "0.8", features = ["net", "os-ext"] } +once_cell = { version = "1", features = ["unstable"] } +rustix = { version = "0.38", features = ["fs", "termios"] } + +[target.x86_64-apple-darwin.dependencies] +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24" } +mio = { version = "0.8", features = ["net", "os-ext"] } +once_cell = { version = "1", features = ["unstable"] } +rustix = { version = "0.38", features = ["fs", "termios"] } + +[target.x86_64-apple-darwin.build-dependencies] +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24" } +mio = { version = "0.8", features = ["net", "os-ext"] } +once_cell = { version = "1", features = ["unstable"] } +rustix = { version = "0.38", features = ["fs", "termios"] } + +[target.aarch64-apple-darwin.dependencies] +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24" } +mio = { version = "0.8", features = ["net", "os-ext"] } +once_cell = { version = "1", features = ["unstable"] } +rustix = { version = "0.38", features = ["fs", "termios"] } + +[target.aarch64-apple-darwin.build-dependencies] +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24" } +mio = { version = "0.8", features = ["net", "os-ext"] } +once_cell = { version = "1", features = ["unstable"] } +rustix = { version = "0.38", features = ["fs", "termios"] } + +[target.x86_64-unknown-illumos.dependencies] +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24" } +mio = { version = "0.8", features = ["net", "os-ext"] } +once_cell = { version = "1", features = ["unstable"] } +rustix = { version = "0.38", features = ["fs", "termios"] } + +[target.x86_64-unknown-illumos.build-dependencies] +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24" } +mio = { version = "0.8", features = ["net", "os-ext"] } +once_cell = { version = "1", features = ["unstable"] } +rustix = { version = "0.38", features = ["fs", "termios"] } + +### END HAKARI SECTION diff --git a/workspace-hack/build.rs b/workspace-hack/build.rs new file mode 100644 index 0000000000..92518ef04c --- /dev/null +++ b/workspace-hack/build.rs @@ -0,0 +1,2 @@ +// A build script is required for cargo to consider build dependencies. +fn main() {} diff --git a/workspace-hack/src/lib.rs b/workspace-hack/src/lib.rs new file mode 100644 index 0000000000..22489f632b --- /dev/null +++ b/workspace-hack/src/lib.rs @@ -0,0 +1 @@ +// This is a stub lib.rs. From b03dd6b740a2b6545a0de847b74847b3e2199800 Mon Sep 17 00:00:00 2001 From: Rain Date: Thu, 28 Sep 2023 20:13:43 -0700 Subject: [PATCH 02/35] [wicketd] allow starting multiple updates with one API call (#4039) Extend `post_start_update` to allow starting updates on several sleds at once. This is not currently used (the TUI always updates one sled at a time), but will be used for command-line driven mupdates. If we're issuing updates on several sleds at once, we can encounter different kinds of errors for each sled. So instead of returning immediately, we collect errors into a vector and then return them all at once. This also required some refactoring in `update_tracker.rs`. To take care of all possible situations: 1. Add a new `SpawnUpdateDriver` trait, which has two methods: one to perform a one-time setup, and one to perform a spawn operation for each SP. 2. Add three implementations of `SpawnUpdateDriver`: `RealUpdateDriver` which is the actual implementation, `FakeUpdateDriver` which is used for tests, and `NeverUpdateDriver` which is an uninhabited type (empty enum, can never be constructed) and is used to perform pre-update checks but not the update itself. Happy to hear suggestions about how to make this better. One path I went down but rejected is using a typestate to indicate that update checks had passed -- then the caller could decide whether to perform the update or not. The problem is that for the typestate to be valid it would have to hold on to the `MutexGuard` (otherwise something could come in between and replace the task that we thought was finished), and that seems a bit fraught as you could accidentally attempt to lock the update data again. A callback-like approach, which was the previous implementation and which has been retained in this PR, does not have that pitfall. I tested this by spinning up sp-sim, mgs, and wicketd, and it worked as expected. Errors (e.g. no inventory present) were caught as expected. --- Cargo.lock | 1 + openapi/wicketd.json | 97 ++-- wicket/src/wicketd.rs | 11 +- wicketd/Cargo.toml | 1 + wicketd/src/helpers.rs | 41 ++ wicketd/src/http_entrypoints.rs | 219 ++++++--- wicketd/src/lib.rs | 1 + wicketd/src/update_tracker.rs | 499 ++++++++++++++------- wicketd/tests/integration_tests/updates.rs | 27 +- 9 files changed, 595 insertions(+), 302 deletions(-) create mode 100644 wicketd/src/helpers.rs diff --git a/Cargo.lock b/Cargo.lock index 5f073db250..138080640e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10063,6 +10063,7 @@ dependencies = [ "installinator-artifact-client", "installinator-artifactd", "installinator-common", + "itertools 0.11.0", "omicron-certificates", "omicron-common 0.1.0", "omicron-passwords 0.1.0", diff --git a/openapi/wicketd.json b/openapi/wicketd.json index 40d798da00..d67fc79f7a 100644 --- a/openapi/wicketd.json +++ b/openapi/wicketd.json @@ -598,6 +598,33 @@ } } }, + "/update": { + "post": { + "summary": "An endpoint to start updating one or more sleds, switches and PSCs.", + "operationId": "post_start_update", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StartUpdateParams" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/update/{type}/{slot}": { "get": { "summary": "An endpoint to get the status of any update being performed or recently", @@ -641,51 +668,6 @@ "$ref": "#/components/responses/Error" } } - }, - "post": { - "summary": "An endpoint to start updating a sled.", - "operationId": "post_start_update", - "parameters": [ - { - "in": "path", - "name": "slot", - "required": true, - "schema": { - "type": "integer", - "format": "uint32", - "minimum": 0 - } - }, - { - "in": "path", - "name": "type", - "required": true, - "schema": { - "$ref": "#/components/schemas/SpType" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/StartUpdateOptions" - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "resource updated" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } } } }, @@ -2761,6 +2743,31 @@ "skip_sp_version_check" ] }, + "StartUpdateParams": { + "type": "object", + "properties": { + "options": { + "description": "Options for the update.", + "allOf": [ + { + "$ref": "#/components/schemas/StartUpdateOptions" + } + ] + }, + "targets": { + "description": "The SP identifiers to start the update with. Must be non-empty.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SpIdentifier" + }, + "uniqueItems": true + } + }, + "required": [ + "options", + "targets" + ] + }, "StepComponentSummaryForGenericSpec": { "type": "object", "properties": { diff --git a/wicket/src/wicketd.rs b/wicket/src/wicketd.rs index 160bcb1c6a..2411542429 100644 --- a/wicket/src/wicketd.rs +++ b/wicket/src/wicketd.rs @@ -12,7 +12,7 @@ use tokio::time::{interval, Duration, MissedTickBehavior}; use wicketd_client::types::{ AbortUpdateOptions, ClearUpdateStateOptions, GetInventoryParams, GetInventoryResponse, GetLocationResponse, IgnitionCommand, SpIdentifier, - SpType, StartUpdateOptions, + SpType, StartUpdateOptions, StartUpdateParams, }; use crate::events::EventReportMap; @@ -164,10 +164,11 @@ impl WicketdManager { tokio::spawn(async move { let update_client = create_wicketd_client(&log, addr, WICKETD_TIMEOUT); - let sp: SpIdentifier = component_id.into(); - let response = match update_client - .post_start_update(sp.type_, sp.slot, &options) - .await + let params = StartUpdateParams { + targets: vec![component_id.into()], + options, + }; + let response = match update_client.post_start_update(¶ms).await { Ok(_) => Ok(()), Err(error) => Err(error.to_string()), diff --git a/wicketd/Cargo.toml b/wicketd/Cargo.toml index a36344b6fb..6df5e0e4e5 100644 --- a/wicketd/Cargo.toml +++ b/wicketd/Cargo.toml @@ -24,6 +24,7 @@ hubtools.workspace = true http.workspace = true hyper.workspace = true illumos-utils.workspace = true +itertools.workspace = true reqwest.workspace = true schemars.workspace = true serde.workspace = true diff --git a/wicketd/src/helpers.rs b/wicketd/src/helpers.rs new file mode 100644 index 0000000000..a8b47d4f12 --- /dev/null +++ b/wicketd/src/helpers.rs @@ -0,0 +1,41 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Helpers and utility functions for wicketd. + +use std::fmt; + +use gateway_client::types::{SpIdentifier, SpType}; +use itertools::Itertools; + +#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)] +pub(crate) struct SpIdentifierDisplay(pub(crate) SpIdentifier); + +impl From for SpIdentifierDisplay { + fn from(id: SpIdentifier) -> Self { + SpIdentifierDisplay(id) + } +} + +impl<'a> From<&'a SpIdentifier> for SpIdentifierDisplay { + fn from(id: &'a SpIdentifier) -> Self { + SpIdentifierDisplay(*id) + } +} + +impl fmt::Display for SpIdentifierDisplay { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0.type_ { + SpType::Sled => write!(f, "sled {}", self.0.slot), + SpType::Switch => write!(f, "switch {}", self.0.slot), + SpType::Power => write!(f, "PSC {}", self.0.slot), + } + } +} + +pub(crate) fn sps_to_string>( + sps: impl IntoIterator, +) -> String { + sps.into_iter().map_into().join(", ") +} diff --git a/wicketd/src/http_entrypoints.rs b/wicketd/src/http_entrypoints.rs index 98cac8dc5d..72c3341334 100644 --- a/wicketd/src/http_entrypoints.rs +++ b/wicketd/src/http_entrypoints.rs @@ -4,6 +4,8 @@ //! HTTP entrypoint functions for wicketd +use crate::helpers::sps_to_string; +use crate::helpers::SpIdentifierDisplay; use crate::mgs::GetInventoryError; use crate::mgs::GetInventoryResponse; use crate::mgs::MgsHandle; @@ -44,7 +46,6 @@ use std::net::IpAddr; use std::net::Ipv6Addr; use std::time::Duration; use tokio::io::AsyncWriteExt; -use uuid::Uuid; use wicket_common::rack_setup::PutRssUserConfigInsensitive; use wicket_common::update_events::EventReport; @@ -652,6 +653,15 @@ async fn get_artifacts_and_event_reports( Ok(HttpResponseOk(response)) } +#[derive(Clone, Debug, JsonSchema, Deserialize)] +pub(crate) struct StartUpdateParams { + /// The SP identifiers to start the update with. Must be non-empty. + pub(crate) targets: BTreeSet, + + /// Options for the update. + pub(crate) options: StartUpdateOptions, +} + #[derive(Clone, Debug, JsonSchema, Deserialize)] pub(crate) struct StartUpdateOptions { /// If passed in, fails the update with a simulated error. @@ -730,19 +740,24 @@ impl UpdateTestError { log: &slog::Logger, reason: &str, ) -> HttpError { + let message = self.into_error_string(log, reason).await; + HttpError::for_bad_request(None, message) + } + + pub(crate) async fn into_error_string( + self, + log: &slog::Logger, + reason: &str, + ) -> String { match self { - UpdateTestError::Fail => HttpError::for_bad_request( - None, - format!("Simulated failure while {reason}"), - ), + UpdateTestError::Fail => { + format!("Simulated failure while {reason}") + } UpdateTestError::Timeout { secs } => { slog::info!(log, "Simulating timeout while {reason}"); // 15 seconds should be enough to cause a timeout. tokio::time::sleep(Duration::from_secs(secs)).await; - HttpError::for_bad_request( - None, - "XXX request should time out before this is hit".into(), - ) + "XXX request should time out before this is hit".into() } } } @@ -834,21 +849,27 @@ async fn get_location( })) } -/// An endpoint to start updating a sled. +/// An endpoint to start updating one or more sleds, switches and PSCs. #[endpoint { method = POST, - path = "/update/{type}/{slot}", + path = "/update", }] async fn post_start_update( rqctx: RequestContext, - target: Path, - opts: TypedBody, + params: TypedBody, ) -> Result { let log = &rqctx.log; let rqctx = rqctx.context(); - let target = target.into_inner(); + let params = params.into_inner(); + + if params.targets.is_empty() { + return Err(HttpError::for_bad_request( + None, + "No update targets specified".into(), + )); + } - // Can we update the target SP? We refuse to update if: + // Can we update the target SPs? We refuse to update if, for any target SP: // // 1. We haven't pulled its state in our inventory (most likely cause: the // cubby is empty; less likely cause: the SP is misbehaving, which will @@ -870,70 +891,136 @@ async fn post_start_update( } }; - // Next, do we have the state of the target SP? - let sp_state = match inventory { + // Error cases. + let mut inventory_absent = BTreeSet::new(); + let mut self_update = None; + let mut maybe_self_update = BTreeSet::new(); + + // Next, do we have the states of the target SP? + let sp_states = match inventory { GetInventoryResponse::Response { inventory, .. } => inventory .sps .into_iter() - .filter_map(|sp| if sp.id == target { sp.state } else { None }) - .next(), - GetInventoryResponse::Unavailable => None, - }; - let Some(sp_state) = sp_state else { - return Err(HttpError::for_bad_request( - None, - "cannot update target sled (no inventory state present)".into(), - )); + .filter_map(|sp| { + if params.targets.contains(&sp.id) { + if let Some(sp_state) = sp.state { + Some((sp.id, sp_state)) + } else { + None + } + } else { + None + } + }) + .collect(), + GetInventoryResponse::Unavailable => BTreeMap::new(), }; - // If we have the state of the SP, are we allowed to update it? We - // refuse to try to update our own sled. - match &rqctx.baseboard { - Some(baseboard) => { - if baseboard.identifier() == sp_state.serial_number - && baseboard.model() == sp_state.model - && baseboard.revision() == i64::from(sp_state.revision) - { - return Err(HttpError::for_bad_request( - None, - "cannot update sled where wicketd is running".into(), - )); + for target in ¶ms.targets { + let sp_state = match sp_states.get(target) { + Some(sp_state) => sp_state, + None => { + // The state isn't present, so add to inventory_absent. + inventory_absent.insert(*target); + continue; } - } - None => { - // We don't know our own baseboard, which is a very - // questionable state to be in! For now, we will hard-code - // the possibly locations where we could be running: - // scrimlets can only be in cubbies 14 or 16, so we refuse - // to update either of those. - let target_is_scrimlet = - matches!((target.type_, target.slot), (SpType::Sled, 14 | 16)); - if target_is_scrimlet { - return Err(HttpError::for_bad_request( - None, - "wicketd does not know its own baseboard details: \ - refusing to update either scrimlet" - .into(), - )); + }; + + // If we have the state of the SP, are we allowed to update it? We + // refuse to try to update our own sled. + match &rqctx.baseboard { + Some(baseboard) => { + if baseboard.identifier() == sp_state.serial_number + && baseboard.model() == sp_state.model + && baseboard.revision() == i64::from(sp_state.revision) + { + self_update = Some(*target); + continue; + } + } + None => { + // We don't know our own baseboard, which is a very questionable + // state to be in! For now, we will hard-code the possibly + // locations where we could be running: scrimlets can only be in + // cubbies 14 or 16, so we refuse to update either of those. + let target_is_scrimlet = matches!( + (target.type_, target.slot), + (SpType::Sled, 14 | 16) + ); + if target_is_scrimlet { + maybe_self_update.insert(*target); + continue; + } } } } - let opts = opts.into_inner(); - if let Some(test_error) = opts.test_error { - return Err(test_error.into_http_error(log, "starting update").await); + // Do we have any errors? + let mut errors = Vec::new(); + if !inventory_absent.is_empty() { + errors.push(format!( + "cannot update sleds (no inventory state present for {})", + sps_to_string(&inventory_absent) + )); + } + if let Some(self_update) = self_update { + errors.push(format!( + "cannot update sled where wicketd is running ({})", + SpIdentifierDisplay(self_update) + )); + } + if !maybe_self_update.is_empty() { + errors.push(format!( + "wicketd does not know its own baseboard details: \ + refusing to update either scrimlet ({})", + sps_to_string(&inventory_absent) + )); } - // All pre-flight update checks look OK: start the update. - // - // Generate an ID for this update; the update tracker will send it to the - // sled as part of the InstallinatorImageId, and installinator will send it - // back to our artifact server with its progress reports. - let update_id = Uuid::new_v4(); + if let Some(test_error) = ¶ms.options.test_error { + errors.push(test_error.into_error_string(log, "starting update").await); + } - match rqctx.update_tracker.start(target, update_id, opts).await { - Ok(()) => Ok(HttpResponseUpdatedNoContent {}), - Err(err) => Err(err.to_http_error()), + let start_update_errors = if errors.is_empty() { + // No errors: we can try and proceed with this update. + match rqctx.update_tracker.start(params.targets, params.options).await { + Ok(()) => return Ok(HttpResponseUpdatedNoContent {}), + Err(errors) => errors, + } + } else { + // We've already found errors, so all we want to do is to check whether + // the update tracker thinks there are any errors as well. + match rqctx.update_tracker.update_pre_checks(params.targets).await { + Ok(()) => Vec::new(), + Err(errors) => errors, + } + }; + + errors.extend(start_update_errors.iter().map(|error| error.to_string())); + + // If we get here, we have errors to report. + + match errors.len() { + 0 => { + unreachable!( + "we already returned Ok(_) above if there were no errors" + ) + } + 1 => { + return Err(HttpError::for_bad_request( + None, + errors.pop().unwrap(), + )); + } + _ => { + return Err(HttpError::for_bad_request( + None, + format!( + "multiple errors encountered:\n - {}", + itertools::join(errors, "\n - ") + ), + )); + } } } diff --git a/wicketd/src/lib.rs b/wicketd/src/lib.rs index 78209ea04a..e17c15642c 100644 --- a/wicketd/src/lib.rs +++ b/wicketd/src/lib.rs @@ -6,6 +6,7 @@ mod artifacts; mod bootstrap_addrs; mod config; mod context; +mod helpers; mod http_entrypoints; mod installinator_progress; mod inventory; diff --git a/wicketd/src/update_tracker.rs b/wicketd/src/update_tracker.rs index a95a98bd72..1bbda00158 100644 --- a/wicketd/src/update_tracker.rs +++ b/wicketd/src/update_tracker.rs @@ -7,6 +7,7 @@ use crate::artifacts::ArtifactIdData; use crate::artifacts::UpdatePlan; use crate::artifacts::WicketdArtifactStore; +use crate::helpers::sps_to_string; use crate::http_entrypoints::GetArtifactsAndEventReportsResponse; use crate::http_entrypoints::StartUpdateOptions; use crate::http_entrypoints::UpdateSimulatedResult; @@ -19,7 +20,6 @@ use anyhow::ensure; use anyhow::Context; use display_error_chain::DisplayErrorChain; use dropshot::HttpError; -use futures::Future; use gateway_client::types::HostPhase2Progress; use gateway_client::types::HostPhase2RecoveryImageId; use gateway_client::types::HostStartupOptions; @@ -156,146 +156,23 @@ impl UpdateTracker { pub(crate) async fn start( &self, - sp: SpIdentifier, - update_id: Uuid, + sps: BTreeSet, opts: StartUpdateOptions, - ) -> Result<(), StartUpdateError> { - self.start_impl(sp, |plan| async { - // Do we need to upload this plan's trampoline phase 2 to MGS? - let upload_trampoline_phase_2_to_mgs = { - let mut upload_trampoline_phase_2_to_mgs = - self.upload_trampoline_phase_2_to_mgs.lock().await; - - match upload_trampoline_phase_2_to_mgs.as_mut() { - Some(prev) => { - // We've previously started an upload - does it match - // this artifact? If not, cancel the old task (which - // might still be trying to upload) and start a new one - // with our current image. - if prev.status.borrow().hash - != plan.trampoline_phase_2.data.hash() - { - // It does _not_ match - we have a new plan with a - // different trampoline image. If the old task is - // still running, cancel it, and start a new one. - prev.task.abort(); - *prev = self - .spawn_upload_trampoline_phase_2_to_mgs(&plan); - } - } - None => { - *upload_trampoline_phase_2_to_mgs = Some( - self.spawn_upload_trampoline_phase_2_to_mgs(&plan), - ); - } - } - - // Both branches above leave `upload_trampoline_phase_2_to_mgs` - // with data, so we can unwrap here to clone the `watch` - // channel. - upload_trampoline_phase_2_to_mgs - .as_ref() - .unwrap() - .status - .clone() - }; - - let event_buffer = Arc::new(StdMutex::new(EventBuffer::new(16))); - let ipr_start_receiver = - self.ipr_update_tracker.register(update_id); - - let update_cx = UpdateContext { - update_id, - sp, - mgs_client: self.mgs_client.clone(), - upload_trampoline_phase_2_to_mgs, - log: self.log.new(o!( - "sp" => format!("{sp:?}"), - "update_id" => update_id.to_string(), - )), - }; - // TODO do we need `UpdateDriver` as a distinct type? - let update_driver = UpdateDriver {}; - - // Using a oneshot channel to communicate the abort handle isn't - // ideal, but it works and is the easiest way to send it without - // restructuring this code. - let (abort_handle_sender, abort_handle_receiver) = - oneshot::channel(); - let task = tokio::spawn(update_driver.run( - plan, - update_cx, - event_buffer.clone(), - ipr_start_receiver, - opts, - abort_handle_sender, - )); - - let abort_handle = abort_handle_receiver - .await - .expect("abort handle is sent immediately"); - - SpUpdateData { task, abort_handle, event_buffer } - }) - .await + ) -> Result<(), Vec> { + let imp = RealSpawnUpdateDriver { update_tracker: self, opts }; + self.start_impl(sps, Some(imp)).await } /// Starts a fake update that doesn't perform any steps, but simply waits - /// for a oneshot receiver to resolve. + /// for a watch receiver to resolve. #[doc(hidden)] pub async fn start_fake_update( &self, - sp: SpIdentifier, - oneshot_receiver: oneshot::Receiver<()>, - ) -> Result<(), StartUpdateError> { - self.start_impl(sp, |_plan| async move { - let (sender, mut receiver) = mpsc::channel(128); - let event_buffer = Arc::new(StdMutex::new(EventBuffer::new(16))); - let event_buffer_2 = event_buffer.clone(); - let log = self.log.clone(); - - let engine = UpdateEngine::new(&log, sender); - let abort_handle = engine.abort_handle(); - - let task = tokio::spawn(async move { - // The step component and ID have been chosen arbitrarily here -- - // they aren't important. - engine - .new_step( - UpdateComponent::Host, - UpdateStepId::RunningInstallinator, - "Fake step that waits for receiver to resolve", - move |_cx| async move { - _ = oneshot_receiver.await; - StepSuccess::new(()).into() - }, - ) - .register(); - - // Spawn a task to accept all events from the executing engine. - let event_receiving_task = tokio::spawn(async move { - while let Some(event) = receiver.recv().await { - event_buffer_2.lock().unwrap().add_event(event); - } - }); - - match engine.execute().await { - Ok(_cx) => (), - Err(err) => { - error!(log, "update failed"; "err" => %err); - } - } - - // Wait for all events to be received and written to the event - // buffer. - event_receiving_task - .await - .expect("event receiving task panicked"); - }); - - SpUpdateData { task, abort_handle, event_buffer } - }) - .await + sps: BTreeSet, + watch_receiver: watch::Receiver<()>, + ) -> Result<(), Vec> { + let imp = FakeUpdateDriver { watch_receiver, log: self.log.clone() }; + self.start_impl(sps, Some(imp)).await } pub(crate) async fn clear_update_state( @@ -315,40 +192,107 @@ impl UpdateTracker { update_data.abort_update(sp, message).await } - async fn start_impl( + /// Checks whether an update can be started for the given SPs, without + /// actually starting it. + /// + /// This should only be used in situations where starting the update is not + /// desired (for example, if we've already encountered errors earlier in the + /// process and we're just adding to the list of errors). In cases where the + /// start method *is* desired, prefer the [`Self::start`] method, which also + /// performs the same checks. + pub(crate) async fn update_pre_checks( &self, - sp: SpIdentifier, - spawn_update_driver: F, - ) -> Result<(), StartUpdateError> + sps: BTreeSet, + ) -> Result<(), Vec> { + self.start_impl::(sps, None).await + } + + async fn start_impl( + &self, + sps: BTreeSet, + spawn_update_driver: Option, + ) -> Result<(), Vec> where - F: FnOnce(UpdatePlan) -> Fut, - Fut: Future + Send, + Spawn: SpawnUpdateDriver, { let mut update_data = self.sp_update_data.lock().await; - let plan = update_data - .artifact_store - .current_plan() - .ok_or(StartUpdateError::TufRepositoryUnavailable)?; + let mut errors = Vec::new(); - match update_data.sp_update_data.entry(sp) { - // Vacant: this is the first time we've started an update to this - // sp. - Entry::Vacant(slot) => { - slot.insert(spawn_update_driver(plan).await); - Ok(()) - } - // Occupied: we've previously started an update to this sp; only - // allow this one if that update is no longer running. - Entry::Occupied(mut slot) => { - if slot.get().task.is_finished() { - slot.insert(spawn_update_driver(plan).await); - Ok(()) - } else { - Err(StartUpdateError::UpdateInProgress(sp)) + // Check that we're not already updating any of these SPs. + let update_in_progress: Vec<_> = sps + .iter() + .filter(|sp| { + // If we don't have any update data for this SP, it's not in + // progress. + // + // If we do, it's in progress if the task is not finished. + update_data + .sp_update_data + .get(sp) + .map_or(false, |data| !data.task.is_finished()) + }) + .copied() + .collect(); + + if !update_in_progress.is_empty() { + errors.push(StartUpdateError::UpdateInProgress(update_in_progress)); + } + + let plan = update_data.artifact_store.current_plan(); + if plan.is_none() { + // (1), referred to below. + errors.push(StartUpdateError::TufRepositoryUnavailable); + } + + // If there are any errors, return now. + if !errors.is_empty() { + return Err(errors); + } + + let plan = + plan.expect("we'd have returned an error at (1) if plan was None"); + + // Call the setup method now. + if let Some(mut spawn_update_driver) = spawn_update_driver { + let setup_data = spawn_update_driver.setup(&plan).await; + + for sp in sps { + match update_data.sp_update_data.entry(sp) { + // Vacant: this is the first time we've started an update to this + // sp. + Entry::Vacant(slot) => { + slot.insert( + spawn_update_driver + .spawn_update_driver( + sp, + plan.clone(), + &setup_data, + ) + .await, + ); + } + // Occupied: we've previously started an update to this sp. + Entry::Occupied(mut slot) => { + assert!( + slot.get().task.is_finished(), + "we just checked that the task was finished" + ); + slot.insert( + spawn_update_driver + .spawn_update_driver( + sp, + plan.clone(), + &setup_data, + ) + .await, + ); + } } } } + + Ok(()) } fn spawn_upload_trampoline_phase_2_to_mgs( @@ -425,6 +369,226 @@ impl UpdateTracker { } } +/// A trait that represents a backend implementation for spawning the update +/// driver. +#[async_trait::async_trait] +trait SpawnUpdateDriver { + /// The type returned by the [`Self::setup`] method. This is passed in by + /// reference to [`Self::spawn_update_driver`]. + type Setup; + + /// Perform setup required to spawn the update driver. + /// + /// This is called *once*, before any calls to + /// [`Self::spawn_update_driver`]. + async fn setup(&mut self, plan: &UpdatePlan) -> Self::Setup; + + /// Spawn the update driver for the given SP. + /// + /// This is called once per SP. + async fn spawn_update_driver( + &mut self, + sp: SpIdentifier, + plan: UpdatePlan, + setup_data: &Self::Setup, + ) -> SpUpdateData; +} + +/// The production implementation of [`SpawnUpdateDriver`]. +/// +/// This implementation spawns real update drivers. +#[derive(Debug)] +struct RealSpawnUpdateDriver<'tr> { + update_tracker: &'tr UpdateTracker, + opts: StartUpdateOptions, +} + +#[async_trait::async_trait] +impl<'tr> SpawnUpdateDriver for RealSpawnUpdateDriver<'tr> { + type Setup = watch::Receiver; + + async fn setup(&mut self, plan: &UpdatePlan) -> Self::Setup { + // Do we need to upload this plan's trampoline phase 2 to MGS? + + let mut upload_trampoline_phase_2_to_mgs = + self.update_tracker.upload_trampoline_phase_2_to_mgs.lock().await; + + match upload_trampoline_phase_2_to_mgs.as_mut() { + Some(prev) => { + // We've previously started an upload - does it match + // this artifact? If not, cancel the old task (which + // might still be trying to upload) and start a new one + // with our current image. + if prev.status.borrow().hash + != plan.trampoline_phase_2.data.hash() + { + // It does _not_ match - we have a new plan with a + // different trampoline image. If the old task is + // still running, cancel it, and start a new one. + prev.task.abort(); + *prev = self + .update_tracker + .spawn_upload_trampoline_phase_2_to_mgs(&plan); + } + } + None => { + *upload_trampoline_phase_2_to_mgs = Some( + self.update_tracker + .spawn_upload_trampoline_phase_2_to_mgs(&plan), + ); + } + } + + // Both branches above leave `upload_trampoline_phase_2_to_mgs` + // with data, so we can unwrap here to clone the `watch` + // channel. + upload_trampoline_phase_2_to_mgs.as_ref().unwrap().status.clone() + } + + async fn spawn_update_driver( + &mut self, + sp: SpIdentifier, + plan: UpdatePlan, + setup_data: &Self::Setup, + ) -> SpUpdateData { + // Generate an ID for this update; the update tracker will send it to the + // sled as part of the InstallinatorImageId, and installinator will send it + // back to our artifact server with its progress reports. + let update_id = Uuid::new_v4(); + + let event_buffer = Arc::new(StdMutex::new(EventBuffer::new(16))); + let ipr_start_receiver = + self.update_tracker.ipr_update_tracker.register(update_id); + + let update_cx = UpdateContext { + update_id, + sp, + mgs_client: self.update_tracker.mgs_client.clone(), + upload_trampoline_phase_2_to_mgs: setup_data.clone(), + log: self.update_tracker.log.new(o!( + "sp" => format!("{sp:?}"), + "update_id" => update_id.to_string(), + )), + }; + // TODO do we need `UpdateDriver` as a distinct type? + let update_driver = UpdateDriver {}; + + // Using a oneshot channel to communicate the abort handle isn't + // ideal, but it works and is the easiest way to send it without + // restructuring this code. + let (abort_handle_sender, abort_handle_receiver) = oneshot::channel(); + let task = tokio::spawn(update_driver.run( + plan, + update_cx, + event_buffer.clone(), + ipr_start_receiver, + self.opts.clone(), + abort_handle_sender, + )); + + let abort_handle = abort_handle_receiver + .await + .expect("abort handle is sent immediately"); + + SpUpdateData { task, abort_handle, event_buffer } + } +} + +/// A fake implementation of [`SpawnUpdateDriver`]. +/// +/// This implementation is only used by tests. It contains a single step that +/// waits for a [`watch::Receiver`] to resolve. +#[derive(Debug)] +struct FakeUpdateDriver { + watch_receiver: watch::Receiver<()>, + log: Logger, +} + +#[async_trait::async_trait] +impl SpawnUpdateDriver for FakeUpdateDriver { + type Setup = (); + + async fn setup(&mut self, _plan: &UpdatePlan) -> Self::Setup {} + + async fn spawn_update_driver( + &mut self, + _sp: SpIdentifier, + _plan: UpdatePlan, + _setup_data: &Self::Setup, + ) -> SpUpdateData { + let (sender, mut receiver) = mpsc::channel(128); + let event_buffer = Arc::new(StdMutex::new(EventBuffer::new(16))); + let event_buffer_2 = event_buffer.clone(); + let log = self.log.clone(); + + let engine = UpdateEngine::new(&log, sender); + let abort_handle = engine.abort_handle(); + + let mut watch_receiver = self.watch_receiver.clone(); + + let task = tokio::spawn(async move { + // The step component and ID have been chosen arbitrarily here -- + // they aren't important. + engine + .new_step( + UpdateComponent::Host, + UpdateStepId::RunningInstallinator, + "Fake step that waits for receiver to resolve", + move |_cx| async move { + // This will resolve as soon as the watch sender + // (typically a test) sends a value over the watch + // channel. + _ = watch_receiver.changed().await; + StepSuccess::new(()).into() + }, + ) + .register(); + + // Spawn a task to accept all events from the executing engine. + let event_receiving_task = tokio::spawn(async move { + while let Some(event) = receiver.recv().await { + event_buffer_2.lock().unwrap().add_event(event); + } + }); + + match engine.execute().await { + Ok(_cx) => (), + Err(err) => { + error!(log, "update failed"; "err" => %err); + } + } + + // Wait for all events to be received and written to the event + // buffer. + event_receiving_task.await.expect("event receiving task panicked"); + }); + + SpUpdateData { task, abort_handle, event_buffer } + } +} + +/// An implementation of [`SpawnUpdateDriver`] that cannot be constructed. +/// +/// This is an uninhabited type (an empty enum), and is only used to provide a +/// type parameter for the [`UpdateTracker::update_pre_checks`] method. +enum NeverUpdateDriver {} + +#[async_trait::async_trait] +impl SpawnUpdateDriver for NeverUpdateDriver { + type Setup = (); + + async fn setup(&mut self, _plan: &UpdatePlan) -> Self::Setup {} + + async fn spawn_update_driver( + &mut self, + _sp: SpIdentifier, + _plan: UpdatePlan, + _setup_data: &Self::Setup, + ) -> SpUpdateData { + unreachable!("this update driver cannot be constructed") + } +} + #[derive(Debug)] struct UpdateTrackerData { artifact_store: WicketdArtifactStore, @@ -518,21 +682,8 @@ impl UpdateTrackerData { pub enum StartUpdateError { #[error("no TUF repository available")] TufRepositoryUnavailable, - #[error("target is already being updated: {0:?}")] - UpdateInProgress(SpIdentifier), -} - -impl StartUpdateError { - pub(crate) fn to_http_error(&self) -> HttpError { - let message = DisplayErrorChain::new(self).to_string(); - - match self { - StartUpdateError::TufRepositoryUnavailable - | StartUpdateError::UpdateInProgress(_) => { - HttpError::for_bad_request(None, message) - } - } - } + #[error("targets are already being updated: {}", sps_to_string(.0))] + UpdateInProgress(Vec), } #[derive(Debug, Clone, Error, Eq, PartialEq)] diff --git a/wicketd/tests/integration_tests/updates.rs b/wicketd/tests/integration_tests/updates.rs index a4b330930a..a198068ef3 100644 --- a/wicketd/tests/integration_tests/updates.rs +++ b/wicketd/tests/integration_tests/updates.rs @@ -16,13 +16,13 @@ use omicron_common::{ api::internal::nexus::KnownArtifactKind, update::{ArtifactHashId, ArtifactKind}, }; -use tokio::sync::oneshot; +use tokio::sync::watch; use uuid::Uuid; use wicket_common::update_events::{StepEventKind, UpdateComponent}; use wicketd::{RunningUpdateState, StartUpdateError}; use wicketd_client::types::{ GetInventoryParams, GetInventoryResponse, SpIdentifier, SpType, - StartUpdateOptions, + StartUpdateOptions, StartUpdateParams, }; #[tokio::test] @@ -138,13 +138,11 @@ async fn test_updates() { } // Now, try starting the update on SP 0. + let options = StartUpdateOptions::default(); + let params = StartUpdateParams { targets: vec![target_sp], options }; wicketd_testctx .wicketd_client - .post_start_update( - target_sp.type_, - target_sp.slot, - &StartUpdateOptions::default(), - ) + .post_start_update(¶ms) .await .expect("update started successfully"); @@ -352,12 +350,13 @@ async fn test_update_races() { slot: 0, type_: gateway_client::types::SpType::Sled, }; + let sps: BTreeSet<_> = vec![sp].into_iter().collect(); - let (sender, receiver) = oneshot::channel(); + let (sender, receiver) = watch::channel(()); wicketd_testctx .server .update_tracker - .start_fake_update(sp, receiver) + .start_fake_update(sps.clone(), receiver) .await .expect("start_fake_update successful"); @@ -372,14 +371,18 @@ async fn test_update_races() { // Also try starting another fake update, which should fail -- we don't let // updates be started in the middle of other updates. { - let (_, receiver) = oneshot::channel(); + let (_, receiver) = watch::channel(()); let err = wicketd_testctx .server .update_tracker - .start_fake_update(sp, receiver) + .start_fake_update(sps, receiver) .await .expect_err("start_fake_update failed while update is running"); - assert_eq!(err, StartUpdateError::UpdateInProgress(sp)); + assert_eq!(err.len(), 1, "one error returned: {err:?}"); + assert_eq!( + err.first().unwrap(), + &StartUpdateError::UpdateInProgress(vec![sp]) + ); } // Unblock the update, letting it run to completion. From 2a6ef48917d16fa4d45375998a1c2e5ff1a89136 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 29 Sep 2023 12:24:54 -0700 Subject: [PATCH 03/35] [db] Access DB via connections, not via pool (#4140) - Accesses the DB via connections explicitly, rather than using a pool - This change reduces generics slightly, and simplifies error handling. Callers can deal with "pool errors" when checking out the connection, and can deal with "query errors" separately when issuing the queries themselves. Depends on https://github.com/oxidecomputer/async-bb8-diesel/pull/52 Fixes https://github.com/oxidecomputer/omicron/issues/4132 --- Cargo.lock | 2 +- Cargo.toml | 2 +- dev-tools/omdb/src/bin/omdb/db.rs | 26 +-- nexus/db-macros/src/lookup.rs | 8 +- nexus/db-queries/src/db/collection_attach.rs | 167 ++++++++------- nexus/db-queries/src/db/collection_detach.rs | 114 +++++----- .../src/db/collection_detach_many.rs | 155 +++++++------- nexus/db-queries/src/db/collection_insert.rs | 47 ++-- .../src/db/datastore/address_lot.rs | 52 +++-- .../src/db/datastore/certificate.rs | 16 +- .../src/db/datastore/console_session.rs | 6 +- nexus/db-queries/src/db/datastore/dataset.rs | 22 +- .../src/db/datastore/db_metadata.rs | 27 ++- .../src/db/datastore/device_auth.rs | 16 +- nexus/db-queries/src/db/datastore/disk.rs | 50 ++--- nexus/db-queries/src/db/datastore/dns.rs | 202 +++++++----------- .../src/db/datastore/external_ip.rs | 37 ++-- .../src/db/datastore/identity_provider.rs | 12 +- nexus/db-queries/src/db/datastore/image.rs | 62 +++--- nexus/db-queries/src/db/datastore/instance.rs | 37 ++-- nexus/db-queries/src/db/datastore/ip_pool.rs | 95 ++++---- nexus/db-queries/src/db/datastore/mod.rs | 90 +++++--- .../src/db/datastore/network_interface.rs | 46 ++-- nexus/db-queries/src/db/datastore/oximeter.rs | 20 +- .../src/db/datastore/physical_disk.rs | 20 +- nexus/db-queries/src/db/datastore/project.rs | 42 ++-- nexus/db-queries/src/db/datastore/rack.rs | 59 ++--- nexus/db-queries/src/db/datastore/region.rs | 29 +-- .../src/db/datastore/region_snapshot.rs | 10 +- nexus/db-queries/src/db/datastore/role.rs | 56 ++--- nexus/db-queries/src/db/datastore/saga.rs | 26 +-- nexus/db-queries/src/db/datastore/service.rs | 38 ++-- nexus/db-queries/src/db/datastore/silo.rs | 168 +++++++-------- .../db-queries/src/db/datastore/silo_group.rs | 41 ++-- .../db-queries/src/db/datastore/silo_user.rs | 69 +++--- nexus/db-queries/src/db/datastore/sled.rs | 22 +- .../src/db/datastore/sled_instance.rs | 8 +- nexus/db-queries/src/db/datastore/snapshot.rs | 27 +-- nexus/db-queries/src/db/datastore/ssh_key.rs | 14 +- nexus/db-queries/src/db/datastore/switch.rs | 12 +- .../src/db/datastore/switch_interface.rs | 52 ++--- .../src/db/datastore/switch_port.rs | 127 +++++------ nexus/db-queries/src/db/datastore/update.rs | 62 +++--- .../virtual_provisioning_collection.rs | 80 +++---- nexus/db-queries/src/db/datastore/volume.rs | 111 +++++----- nexus/db-queries/src/db/datastore/vpc.rs | 152 +++++++------ nexus/db-queries/src/db/datastore/zpool.rs | 22 +- nexus/db-queries/src/db/error.rs | 123 +++++------ nexus/db-queries/src/db/explain.rs | 18 +- nexus/db-queries/src/db/lookup.rs | 2 +- nexus/db-queries/src/db/pagination.rs | 21 +- .../db-queries/src/db/queries/external_ip.rs | 35 +-- .../src/db/queries/network_interface.rs | 107 +++++----- nexus/db-queries/src/db/queries/next_item.rs | 9 +- .../src/db/queries/region_allocation.rs | 4 +- nexus/db-queries/src/db/queries/vpc_subnet.rs | 33 ++- nexus/db-queries/src/db/true_or_cast_error.rs | 8 +- nexus/db-queries/src/db/update_and_check.rs | 11 +- nexus/src/app/background/dns_config.rs | 6 +- nexus/src/app/background/dns_servers.rs | 12 +- nexus/src/app/background/init.rs | 2 +- nexus/src/app/sagas/disk_create.rs | 12 +- nexus/src/app/sagas/instance_create.rs | 18 +- nexus/src/app/sagas/project_create.rs | 6 +- nexus/src/app/sagas/snapshot_create.rs | 6 +- nexus/src/app/sagas/test_helpers.rs | 2 +- nexus/src/app/sagas/vpc_create.rs | 16 +- 67 files changed, 1474 insertions(+), 1535 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 138080640e..b7296ea184 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -287,7 +287,7 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-bb8-diesel" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/async-bb8-diesel?rev=be3d9bce50051d8c0e0c06078e8066cc27db3001#be3d9bce50051d8c0e0c06078e8066cc27db3001" +source = "git+https://github.com/oxidecomputer/async-bb8-diesel?rev=da04c087f835a51e0441addb19c5ef4986e1fcf2#da04c087f835a51e0441addb19c5ef4986e1fcf2" dependencies = [ "async-trait", "bb8", diff --git a/Cargo.toml b/Cargo.toml index d660397d9e..0e194394f9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -135,7 +135,7 @@ api_identity = { path = "api_identity" } approx = "0.5.1" assert_matches = "1.5.0" assert_cmd = "2.0.12" -async-bb8-diesel = { git = "https://github.com/oxidecomputer/async-bb8-diesel", rev = "be3d9bce50051d8c0e0c06078e8066cc27db3001" } +async-bb8-diesel = { git = "https://github.com/oxidecomputer/async-bb8-diesel", rev = "da04c087f835a51e0441addb19c5ef4986e1fcf2" } async-trait = "0.1.73" authz-macros = { path = "nexus/authz-macros" } backoff = { version = "0.4.0", features = [ "tokio" ] } diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 42f4d53730..93e5ef4301 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -367,7 +367,7 @@ async fn cmd_db_disk_list( .filter(dsl::time_deleted.is_null()) .limit(i64::from(u32::from(limit))) .select(Disk::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*datastore.pool_connection_for_tests().await?) .await .context("loading disks")?; @@ -421,11 +421,13 @@ async fn cmd_db_disk_info( use db::schema::disk::dsl as disk_dsl; + let conn = datastore.pool_connection_for_tests().await?; + let disk = disk_dsl::disk .filter(disk_dsl::id.eq(args.uuid)) .limit(1) .select(Disk::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*conn) .await .context("loading requested disk")?; @@ -445,7 +447,7 @@ async fn cmd_db_disk_info( .filter(instance_dsl::id.eq(instance_uuid)) .limit(1) .select(Instance::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*conn) .await .context("loading requested instance")?; @@ -540,7 +542,7 @@ async fn cmd_db_disk_physical( .filter(zpool_dsl::time_deleted.is_null()) .filter(zpool_dsl::physical_disk_id.eq(args.uuid)) .select(Zpool::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*datastore.pool_connection_for_tests().await?) .await .context("loading zpool from pysical disk id")?; @@ -560,7 +562,7 @@ async fn cmd_db_disk_physical( .filter(dataset_dsl::time_deleted.is_null()) .filter(dataset_dsl::pool_id.eq(zp.id())) .select(Dataset::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*datastore.pool_connection_for_tests().await?) .await .context("loading dataset")?; @@ -595,7 +597,7 @@ async fn cmd_db_disk_physical( let regions = region_dsl::region .filter(region_dsl::dataset_id.eq(did)) .select(Region::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*datastore.pool_connection_for_tests().await?) .await .context("loading region")?; @@ -614,7 +616,7 @@ async fn cmd_db_disk_physical( .filter(dsl::volume_id.eq_any(volume_ids)) .limit(i64::from(u32::from(limit))) .select(Disk::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*datastore.pool_connection_for_tests().await?) .await .context("loading disks")?; @@ -642,7 +644,7 @@ async fn cmd_db_disk_physical( .filter(instance_dsl::id.eq(instance_uuid)) .limit(1) .select(Instance::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*datastore.pool_connection_for_tests().await?) .await .context("loading requested instance")?; @@ -877,7 +879,7 @@ async fn cmd_db_instances( let instances = dsl::instance .limit(i64::from(u32::from(limit))) .select(Instance::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*datastore.pool_connection_for_tests().await?) .await .context("loading instances")?; @@ -971,7 +973,7 @@ async fn load_zones_version( .filter(dsl::version.eq(nexus_db_model::Generation::from(version))) .limit(1) .select(DnsVersion::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*datastore.pool_connection_for_tests().await?) .await .context("loading requested version")?; @@ -1013,7 +1015,7 @@ async fn cmd_db_dns_diff( .filter(dsl::version_added.eq(version.version)) .limit(i64::from(u32::from(limit))) .select(DnsName::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*datastore.pool_connection_for_tests().await?) .await .context("loading added names")?; check_limit(&added, limit, || "loading added names"); @@ -1023,7 +1025,7 @@ async fn cmd_db_dns_diff( .filter(dsl::version_removed.eq(version.version)) .limit(i64::from(u32::from(limit))) .select(DnsName::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*datastore.pool_connection_for_tests().await?) .await .context("loading added names")?; check_limit(&added, limit, || "loading removed names"); diff --git a/nexus/db-macros/src/lookup.rs b/nexus/db-macros/src/lookup.rs index 93c2bd3652..38cab15e30 100644 --- a/nexus/db-macros/src/lookup.rs +++ b/nexus/db-macros/src/lookup.rs @@ -806,11 +806,11 @@ fn generate_database_functions(config: &Config) -> TokenStream { #lookup_filter .select(nexus_db_model::#resource_name::as_select()) .get_result_async( - datastore.pool_authorized(opctx).await? + &*datastore.pool_connection_authorized(opctx).await? ) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::#resource_name, @@ -891,10 +891,10 @@ fn generate_database_functions(config: &Config) -> TokenStream { #soft_delete_filter #(.filter(dsl::#pkey_column_names.eq(#pkey_names.clone())))* .select(nexus_db_model::#resource_name::as_select()) - .get_result_async(datastore.pool_authorized(opctx).await?) + .get_result_async(&*datastore.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::#resource_name, diff --git a/nexus/db-queries/src/db/collection_attach.rs b/nexus/db-queries/src/db/collection_attach.rs index c88054795d..40ec659bf9 100644 --- a/nexus/db-queries/src/db/collection_attach.rs +++ b/nexus/db-queries/src/db/collection_attach.rs @@ -17,7 +17,7 @@ use super::cte_utils::{ QueryFromClause, QuerySqlType, TableDefaultWhereClause, }; use super::pool::DbConnection; -use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; +use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionError}; use diesel::associations::HasTable; use diesel::expression::{AsExpression, Expression}; use diesel::helper_types::*; @@ -299,7 +299,7 @@ where /// Result of [`AttachToCollectionStatement`] when executed asynchronously pub type AsyncAttachToCollectionResult = - Result<(C, ResourceType), AttachError>; + Result<(C, ResourceType), AttachError>; /// Errors returned by [`AttachToCollectionStatement`]. #[derive(Debug)] @@ -332,10 +332,9 @@ where AttachToCollectionStatement: Send, { /// Issues the CTE asynchronously and parses the result. - pub async fn attach_and_get_result_async( + pub async fn attach_and_get_result_async( self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + conn: &async_bb8_diesel::Connection, ) -> AsyncAttachToCollectionResult where // We require this bound to ensure that "Self" is runnable as query. @@ -344,13 +343,11 @@ where DbConnection, RawOutput, >, - ConnErr: From + Send + 'static, - PoolError: From, { self.get_result_async::>(conn) .await // If the database returns an error, propagate it right away. - .map_err(|e| AttachError::DatabaseError(PoolError::from(e))) + .map_err(|e| AttachError::DatabaseError(e)) // Otherwise, parse the output to determine if the CTE succeeded. .and_then(Self::parse_result) } @@ -570,6 +567,7 @@ mod test { }; use async_bb8_diesel::{ AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, + ConnectionManager, }; use chrono::Utc; use db_macros::Resource; @@ -605,7 +603,9 @@ mod test { } } - async fn setup_db(pool: &crate::db::Pool) { + async fn setup_db( + pool: &crate::db::Pool, + ) -> bb8::PooledConnection> { let connection = pool.pool().get().await.unwrap(); (*connection) .batch_execute_async( @@ -633,6 +633,7 @@ mod test { ) .await .unwrap(); + connection } /// Describes a resource within the database. @@ -669,7 +670,7 @@ mod test { async fn insert_collection( id: Uuid, name: &str, - pool: &db::Pool, + conn: &async_bb8_diesel::Connection, ) -> Collection { let create_params = IdentityMetadataCreateParams { name: Name::try_from(name.to_string()).unwrap(), @@ -680,18 +681,21 @@ mod test { diesel::insert_into(collection::table) .values(c) - .execute_async(pool.pool()) + .execute_async(conn) .await .unwrap(); - get_collection(id, &pool).await + get_collection(id, &conn).await } - async fn get_collection(id: Uuid, pool: &db::Pool) -> Collection { + async fn get_collection( + id: Uuid, + conn: &async_bb8_diesel::Connection, + ) -> Collection { collection::table .find(id) .select(Collection::as_select()) - .first_async(pool.pool()) + .first_async(conn) .await .unwrap() } @@ -699,7 +703,7 @@ mod test { async fn insert_resource( id: Uuid, name: &str, - pool: &db::Pool, + conn: &async_bb8_diesel::Connection, ) -> Resource { let create_params = IdentityMetadataCreateParams { name: Name::try_from(name.to_string()).unwrap(), @@ -712,18 +716,21 @@ mod test { diesel::insert_into(resource::table) .values(r) - .execute_async(pool.pool()) + .execute_async(conn) .await .unwrap(); - get_resource(id, &pool).await + get_resource(id, conn).await } - async fn get_resource(id: Uuid, pool: &db::Pool) -> Resource { + async fn get_resource( + id: Uuid, + conn: &async_bb8_diesel::Connection, + ) -> Resource { resource::table .find(id) .select(Resource::as_select()) - .first_async(pool.pool()) + .first_async(conn) .await .unwrap() } @@ -856,7 +863,7 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -869,7 +876,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; assert!(matches!(attach, Err(AttachError::CollectionNotFound))); @@ -885,14 +892,14 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection let collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; // Attempt to attach - even though the resource does not exist. let attach = Collection::attach_resource( @@ -904,12 +911,12 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; assert!(matches!(attach, Err(AttachError::ResourceNotFound))); // The collection should remain unchanged. - assert_eq!(collection, get_collection(collection_id, &pool).await); + assert_eq!(collection, get_collection(collection_id, &conn).await); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -922,15 +929,15 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; // Attach the resource to the collection. let attach = Collection::attach_resource( @@ -942,7 +949,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; // "attach_and_get_result_async" should return the "attached" resource. @@ -955,9 +962,9 @@ mod test { // The returned value should be the latest value in the DB. assert_eq!( returned_collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); - assert_eq!(returned_resource, get_resource(resource_id, &pool).await); + assert_eq!(returned_resource, get_resource(resource_id, &conn).await); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -970,15 +977,15 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; // Attach the resource to the collection. let attach_query = Collection::attach_resource( @@ -991,10 +998,10 @@ mod test { .set(resource::dsl::collection_id.eq(collection_id)), ); - type TxnError = - TransactionError>; - let result = pool - .pool() + type TxnError = TransactionError< + AttachError, + >; + let result = conn .transaction_async(|conn| async move { attach_query.attach_and_get_result_async(&conn).await.map_err( |e| match e { @@ -1015,9 +1022,9 @@ mod test { // The returned values should be the latest value in the DB. assert_eq!( returned_collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); - assert_eq!(returned_resource, get_resource(resource_id, &pool).await); + assert_eq!(returned_resource, get_resource(resource_id, &conn).await); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -1030,7 +1037,7 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; const RESOURCE_COUNT: u32 = 5; @@ -1038,12 +1045,12 @@ mod test { // Create the collection. let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; // Create each resource, attaching them to the collection. for i in 0..RESOURCE_COUNT { let resource_id = uuid::Uuid::new_v4(); - insert_resource(resource_id, &format!("resource{}", i), &pool) + insert_resource(resource_id, &format!("resource{}", i), &conn) .await; // Attach the resource to the collection. @@ -1056,7 +1063,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; // "attach_and_get_result_async" should return the "attached" resource. @@ -1071,7 +1078,7 @@ mod test { // The returned resource value should be the latest value in the DB. assert_eq!( returned_resource, - get_resource(resource_id, &pool).await + get_resource(resource_id, &conn).await ); } @@ -1086,15 +1093,15 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); // Attach a resource to a collection, as usual. let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; let resource_id1 = uuid::Uuid::new_v4(); - let _resource = insert_resource(resource_id1, "resource1", &pool).await; + let _resource = insert_resource(resource_id1, "resource1", &conn).await; let attach = Collection::attach_resource( collection_id, resource_id1, @@ -1104,7 +1111,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; assert_eq!( attach.expect("Attach should have worked").1.id(), @@ -1113,7 +1120,7 @@ mod test { // Let's try attaching a second resource, now that we're at capacity. let resource_id2 = uuid::Uuid::new_v4(); - let _resource = insert_resource(resource_id2, "resource2", &pool).await; + let _resource = insert_resource(resource_id2, "resource2", &conn).await; let attach = Collection::attach_resource( collection_id, resource_id2, @@ -1123,17 +1130,17 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; let err = attach.expect_err("Should have failed to attach"); match err { AttachError::NoUpdate { attached_count, resource, collection } => { assert_eq!(attached_count, 1); - assert_eq!(resource, get_resource(resource_id2, &pool).await); + assert_eq!(resource, get_resource(resource_id2, &conn).await); assert_eq!( collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); } _ => panic!("Unexpected error: {:?}", err), @@ -1150,15 +1157,15 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); // Attach a resource to a collection, as usual. let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; let resource_id = uuid::Uuid::new_v4(); - let _resource = insert_resource(resource_id, "resource", &pool).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; let attach = Collection::attach_resource( collection_id, resource_id, @@ -1168,7 +1175,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; assert_eq!( attach.expect("Attach should have worked").1.id(), @@ -1185,7 +1192,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; let err = attach.expect_err("Should have failed to attach"); @@ -1203,10 +1210,10 @@ mod test { .expect("Should already be attached"), collection_id ); - assert_eq!(resource, get_resource(resource_id, &pool).await); + assert_eq!(resource, get_resource(resource_id, &conn).await); assert_eq!( collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); } _ => panic!("Unexpected error: {:?}", err), @@ -1222,7 +1229,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; let err = attach.expect_err("Should have failed to attach"); // Even when at capacity, the same information should be propagated back @@ -1237,10 +1244,10 @@ mod test { .expect("Should already be attached"), collection_id ); - assert_eq!(resource, get_resource(resource_id, &pool).await); + assert_eq!(resource, get_resource(resource_id, &conn).await); assert_eq!( collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); } _ => panic!("Unexpected error: {:?}", err), @@ -1257,15 +1264,15 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; // Attach the resource to the collection. // @@ -1290,7 +1297,7 @@ mod test { resource::dsl::description.eq("new description".to_string()), )), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; let (_, returned_resource) = attach.expect("Attach should have worked"); @@ -1298,7 +1305,7 @@ mod test { returned_resource.collection_id.expect("Expected a collection ID"), collection_id ); - assert_eq!(returned_resource, get_resource(resource_id, &pool).await); + assert_eq!(returned_resource, get_resource(resource_id, &conn).await); assert_eq!(returned_resource.description(), "new description"); db.cleanup().await.unwrap(); @@ -1312,22 +1319,22 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; // Immediately soft-delete the resource. diesel::update( resource::table.filter(resource::dsl::id.eq(resource_id)), ) .set(resource::dsl::time_deleted.eq(Utc::now())) - .execute_async(pool.pool()) + .execute_async(&*conn) .await .unwrap(); @@ -1342,7 +1349,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; assert!(matches!(attach, Err(AttachError::ResourceNotFound))); @@ -1357,19 +1364,19 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); // Create the collection and some resources. let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; let resource_id1 = uuid::Uuid::new_v4(); let resource_id2 = uuid::Uuid::new_v4(); let _resource1 = - insert_resource(resource_id1, "resource1", &pool).await; + insert_resource(resource_id1, "resource1", &conn).await; let _resource2 = - insert_resource(resource_id2, "resource2", &pool).await; + insert_resource(resource_id2, "resource2", &conn).await; // Attach the resource to the collection. // @@ -1384,7 +1391,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; let (_, returned_resource) = attach.expect("Attach should have worked"); @@ -1394,10 +1401,10 @@ mod test { // "resource2" should have automatically been filtered away from the // update statement, regardless of user input. assert_eq!( - get_resource(resource_id1, &pool).await.collection_id.unwrap(), + get_resource(resource_id1, &conn).await.collection_id.unwrap(), collection_id ); - assert!(get_resource(resource_id2, &pool) + assert!(get_resource(resource_id2, &conn) .await .collection_id .is_none()); diff --git a/nexus/db-queries/src/db/collection_detach.rs b/nexus/db-queries/src/db/collection_detach.rs index 04894ecb21..df157040e6 100644 --- a/nexus/db-queries/src/db/collection_detach.rs +++ b/nexus/db-queries/src/db/collection_detach.rs @@ -16,7 +16,7 @@ use super::cte_utils::{ QueryFromClause, QuerySqlType, }; use super::pool::DbConnection; -use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; +use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionError}; use diesel::associations::HasTable; use diesel::expression::{AsExpression, Expression}; use diesel::helper_types::*; @@ -230,7 +230,7 @@ where /// Result of [`DetachFromCollectionStatement`] when executed asynchronously pub type AsyncDetachFromCollectionResult = - Result>; + Result>; /// Errors returned by [`DetachFromCollectionStatement`]. #[derive(Debug)] @@ -265,8 +265,7 @@ where /// Issues the CTE asynchronously and parses the result. pub async fn detach_and_get_result_async( self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + conn: &async_bb8_diesel::Connection, ) -> AsyncDetachFromCollectionResult where // We require this bound to ensure that "Self" is runnable as query. @@ -482,7 +481,9 @@ mod test { use super::*; use crate::db::collection_attach::DatastoreAttachTarget; use crate::db::{self, identity::Resource as IdentityResource}; - use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; + use async_bb8_diesel::{ + AsyncRunQueryDsl, AsyncSimpleConnection, ConnectionManager, + }; use chrono::Utc; use db_macros::Resource; use diesel::expression_methods::ExpressionMethods; @@ -517,7 +518,9 @@ mod test { } } - async fn setup_db(pool: &crate::db::Pool) { + async fn setup_db( + pool: &crate::db::Pool, + ) -> bb8::PooledConnection> { let connection = pool.pool().get().await.unwrap(); (*connection) .batch_execute_async( @@ -545,6 +548,7 @@ mod test { ) .await .unwrap(); + connection } /// Describes a resource within the database. @@ -581,7 +585,7 @@ mod test { async fn insert_collection( id: Uuid, name: &str, - pool: &db::Pool, + conn: &async_bb8_diesel::Connection, ) -> Collection { let create_params = IdentityMetadataCreateParams { name: Name::try_from(name.to_string()).unwrap(), @@ -592,18 +596,21 @@ mod test { diesel::insert_into(collection::table) .values(c) - .execute_async(pool.pool()) + .execute_async(conn) .await .unwrap(); - get_collection(id, &pool).await + get_collection(id, conn).await } - async fn get_collection(id: Uuid, pool: &db::Pool) -> Collection { + async fn get_collection( + id: Uuid, + conn: &async_bb8_diesel::Connection, + ) -> Collection { collection::table .find(id) .select(Collection::as_select()) - .first_async(pool.pool()) + .first_async(conn) .await .unwrap() } @@ -611,7 +618,7 @@ mod test { async fn insert_resource( id: Uuid, name: &str, - pool: &db::Pool, + conn: &async_bb8_diesel::Connection, ) -> Resource { let create_params = IdentityMetadataCreateParams { name: Name::try_from(name.to_string()).unwrap(), @@ -624,17 +631,17 @@ mod test { diesel::insert_into(resource::table) .values(r) - .execute_async(pool.pool()) + .execute_async(conn) .await .unwrap(); - get_resource(id, &pool).await + get_resource(id, conn).await } async fn attach_resource( collection_id: Uuid, resource_id: Uuid, - pool: &db::Pool, + conn: &async_bb8_diesel::Connection, ) { Collection::attach_resource( collection_id, @@ -645,16 +652,19 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(conn) .await .unwrap(); } - async fn get_resource(id: Uuid, pool: &db::Pool) -> Resource { + async fn get_resource( + id: Uuid, + conn: &async_bb8_diesel::Connection, + ) -> Resource { resource::table .find(id) .select(Resource::as_select()) - .first_async(pool.pool()) + .first_async(conn) .await .unwrap() } @@ -777,7 +787,7 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -789,7 +799,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; assert!(matches!(detach, Err(DetachError::CollectionNotFound))); @@ -805,14 +815,14 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection let collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; // Attempt to detach - even though the resource does not exist. let detach = Collection::detach_resource( @@ -823,12 +833,12 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; assert!(matches!(detach, Err(DetachError::ResourceNotFound))); // The collection should remain unchanged. - assert_eq!(collection, get_collection(collection_id, &pool).await); + assert_eq!(collection, get_collection(collection_id, &conn).await); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -841,16 +851,16 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. Attach them. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; - attach_resource(collection_id, resource_id, &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; + attach_resource(collection_id, resource_id, &conn).await; // Detach the resource from the collection. let detach = Collection::detach_resource( @@ -861,14 +871,14 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; // "detach_and_get_result_async" should return the "detached" resource. let returned_resource = detach.expect("Detach should have worked"); assert!(returned_resource.collection_id.is_none(),); // The returned value should be the latest value in the DB. - assert_eq!(returned_resource, get_resource(resource_id, &pool).await); + assert_eq!(returned_resource, get_resource(resource_id, &conn).await); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -881,15 +891,15 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; let resource_id = uuid::Uuid::new_v4(); - let _resource = insert_resource(resource_id, "resource", &pool).await; - attach_resource(collection_id, resource_id, &pool).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; + attach_resource(collection_id, resource_id, &conn).await; // Detach a resource from a collection, as usual. let detach = Collection::detach_resource( @@ -900,7 +910,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; assert_eq!( detach.expect("Detach should have worked").id(), @@ -916,7 +926,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; let err = detach.expect_err("Should have failed to detach"); @@ -925,10 +935,10 @@ mod test { match err { DetachError::NoUpdate { resource, collection } => { assert!(resource.collection_id.as_ref().is_none()); - assert_eq!(resource, get_resource(resource_id, &pool).await); + assert_eq!(resource, get_resource(resource_id, &conn).await); assert_eq!( collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); } _ => panic!("Unexpected error: {:?}", err), @@ -945,22 +955,22 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; // Immediately soft-delete the resource. diesel::update( resource::table.filter(resource::dsl::id.eq(resource_id)), ) .set(resource::dsl::time_deleted.eq(Utc::now())) - .execute_async(pool.pool()) + .execute_async(&*conn) .await .unwrap(); @@ -974,7 +984,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; assert!(matches!(detach, Err(DetachError::ResourceNotFound))); @@ -989,21 +999,21 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); // Create the collection and some resources. let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; let resource_id1 = uuid::Uuid::new_v4(); let resource_id2 = uuid::Uuid::new_v4(); let _resource1 = - insert_resource(resource_id1, "resource1", &pool).await; - attach_resource(collection_id, resource_id1, &pool).await; + insert_resource(resource_id1, "resource1", &conn).await; + attach_resource(collection_id, resource_id1, &conn).await; let _resource2 = - insert_resource(resource_id2, "resource2", &pool).await; - attach_resource(collection_id, resource_id2, &pool).await; + insert_resource(resource_id2, "resource2", &conn).await; + attach_resource(collection_id, resource_id2, &conn).await; // Detach the resource from the collection. // @@ -1017,7 +1027,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; let returned_resource = detach.expect("Detach should have worked"); @@ -1026,11 +1036,11 @@ mod test { // Note that only "resource1" should be detached. // "resource2" should have automatically been filtered away from the // update statement, regardless of user input. - assert!(get_resource(resource_id1, &pool) + assert!(get_resource(resource_id1, &conn) .await .collection_id .is_none()); - assert!(get_resource(resource_id2, &pool) + assert!(get_resource(resource_id2, &conn) .await .collection_id .is_some()); diff --git a/nexus/db-queries/src/db/collection_detach_many.rs b/nexus/db-queries/src/db/collection_detach_many.rs index 3418296568..0b65c404c5 100644 --- a/nexus/db-queries/src/db/collection_detach_many.rs +++ b/nexus/db-queries/src/db/collection_detach_many.rs @@ -16,7 +16,7 @@ use super::cte_utils::{ QueryFromClause, QuerySqlType, }; use super::pool::DbConnection; -use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; +use async_bb8_diesel::AsyncRunQueryDsl; use diesel::associations::HasTable; use diesel::expression::{AsExpression, Expression}; use diesel::helper_types::*; @@ -241,7 +241,7 @@ where /// Result of [`DetachManyFromCollectionStatement`] when executed asynchronously pub type AsyncDetachManyFromCollectionResult = - Result>; + Result>; /// Errors returned by [`DetachManyFromCollectionStatement`]. #[derive(Debug)] @@ -273,21 +273,18 @@ where DetachManyFromCollectionStatement: Send, { /// Issues the CTE asynchronously and parses the result. - pub async fn detach_and_get_result_async( + pub async fn detach_and_get_result_async( self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + conn: &async_bb8_diesel::Connection, ) -> AsyncDetachManyFromCollectionResult where // We require this bound to ensure that "Self" is runnable as query. Self: query_methods::LoadQuery<'static, DbConnection, RawOutput>, - ConnErr: From + Send + 'static, - PoolError: From, { self.get_result_async::>(conn) .await // If the database returns an error, propagate it right away. - .map_err(|e| DetachManyError::DatabaseError(PoolError::from(e))) + .map_err(|e| DetachManyError::DatabaseError(e)) // Otherwise, parse the output to determine if the CTE succeeded. .and_then(Self::parse_result) } @@ -486,6 +483,7 @@ mod test { }; use async_bb8_diesel::{ AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, + ConnectionManager, }; use chrono::Utc; use db_macros::Resource; @@ -521,7 +519,9 @@ mod test { } } - async fn setup_db(pool: &crate::db::Pool) { + async fn setup_db( + pool: &crate::db::Pool, + ) -> bb8::PooledConnection> { let connection = pool.pool().get().await.unwrap(); (*connection) .batch_execute_async( @@ -549,6 +549,7 @@ mod test { ) .await .unwrap(); + connection } /// Describes a resource within the database. @@ -585,7 +586,7 @@ mod test { async fn insert_collection( id: Uuid, name: &str, - pool: &db::Pool, + conn: &async_bb8_diesel::Connection, ) -> Collection { let create_params = IdentityMetadataCreateParams { name: Name::try_from(name.to_string()).unwrap(), @@ -596,18 +597,21 @@ mod test { diesel::insert_into(collection::table) .values(c) - .execute_async(pool.pool()) + .execute_async(conn) .await .unwrap(); - get_collection(id, &pool).await + get_collection(id, conn).await } - async fn get_collection(id: Uuid, pool: &db::Pool) -> Collection { + async fn get_collection( + id: Uuid, + conn: &async_bb8_diesel::Connection, + ) -> Collection { collection::table .find(id) .select(Collection::as_select()) - .first_async(pool.pool()) + .first_async(conn) .await .unwrap() } @@ -615,7 +619,7 @@ mod test { async fn insert_resource( id: Uuid, name: &str, - pool: &db::Pool, + conn: &async_bb8_diesel::Connection, ) -> Resource { let create_params = IdentityMetadataCreateParams { name: Name::try_from(name.to_string()).unwrap(), @@ -628,17 +632,17 @@ mod test { diesel::insert_into(resource::table) .values(r) - .execute_async(pool.pool()) + .execute_async(conn) .await .unwrap(); - get_resource(id, &pool).await + get_resource(id, conn).await } async fn attach_resource( collection_id: Uuid, resource_id: Uuid, - pool: &db::Pool, + conn: &async_bb8_diesel::Connection, ) { Collection::attach_resource( collection_id, @@ -649,16 +653,19 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(conn) .await .unwrap(); } - async fn get_resource(id: Uuid, pool: &db::Pool) -> Resource { + async fn get_resource( + id: Uuid, + conn: &async_bb8_diesel::Connection, + ) -> Resource { resource::table .find(id) .select(Resource::as_select()) - .first_async(pool.pool()) + .first_async(conn) .await .unwrap() } @@ -775,7 +782,7 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let _resource_id = uuid::Uuid::new_v4(); @@ -788,7 +795,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; assert!(matches!(detach, Err(DetachManyError::CollectionNotFound))); @@ -805,14 +812,14 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let _resource_id = uuid::Uuid::new_v4(); // Create the collection let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; // Attempt to detach - even though the resource does not exist. let detach = Collection::detach_resources( @@ -824,7 +831,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; let returned_collection = detach.expect("Detach should have worked"); @@ -832,7 +839,7 @@ mod test { // The collection should still be updated. assert_eq!( returned_collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); db.cleanup().await.unwrap(); @@ -846,16 +853,16 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. Attach them. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; - attach_resource(collection_id, resource_id, &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; + attach_resource(collection_id, resource_id, &conn).await; // Detach the resource from the collection. let detach = Collection::detach_resources( @@ -867,7 +874,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; // "detach_and_get_result_async" should return the updated collection. @@ -875,7 +882,7 @@ mod test { // The returned value should be the latest value in the DB. assert_eq!( returned_collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); db.cleanup().await.unwrap(); @@ -889,16 +896,16 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; - attach_resource(collection_id, resource_id, &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; + attach_resource(collection_id, resource_id, &conn).await; // Detach the resource from the collection. let detach_query = Collection::detach_resources( @@ -911,10 +918,10 @@ mod test { .set(resource::dsl::collection_id.eq(Option::::None)), ); - type TxnError = - TransactionError>; - let result = pool - .pool() + type TxnError = TransactionError< + DetachManyError, + >; + let result = conn .transaction_async(|conn| async move { detach_query.detach_and_get_result_async(&conn).await.map_err( |e| match e { @@ -930,7 +937,7 @@ mod test { // The returned values should be the latest value in the DB. assert_eq!( returned_collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); db.cleanup().await.unwrap(); @@ -944,15 +951,15 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; let resource_id = uuid::Uuid::new_v4(); - let _resource = insert_resource(resource_id, "resource", &pool).await; - attach_resource(collection_id, resource_id, &pool).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; + attach_resource(collection_id, resource_id, &conn).await; // Detach a resource from a collection, as usual. let detach = Collection::detach_resources( @@ -964,7 +971,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; assert_eq!( detach.expect("Detach should have worked").description(), @@ -982,7 +989,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; assert_eq!( detach.expect("Detach should have worked").description(), @@ -1000,15 +1007,15 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; let resource_id = uuid::Uuid::new_v4(); - let _resource = insert_resource(resource_id, "resource", &pool).await; - attach_resource(collection_id, resource_id, &pool).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; + attach_resource(collection_id, resource_id, &conn).await; // Detach a resource from a collection, but do so with a picky filter // on the collectipon. @@ -1023,7 +1030,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; let err = detach.expect_err("Expected this detach to fail"); @@ -1034,7 +1041,7 @@ mod test { DetachManyError::NoUpdate { collection } => { assert_eq!( collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); } _ => panic!("Unexpected error: {:?}", err), @@ -1051,23 +1058,23 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; - attach_resource(collection_id, resource_id, &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; + attach_resource(collection_id, resource_id, &conn).await; // Immediately soft-delete the resource. diesel::update( resource::table.filter(resource::dsl::id.eq(resource_id)), ) .set(resource::dsl::time_deleted.eq(Utc::now())) - .execute_async(pool.pool()) + .execute_async(&*conn) .await .unwrap(); @@ -1082,7 +1089,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; assert_eq!( @@ -1090,7 +1097,7 @@ mod test { "Updated desc" ); assert_eq!( - get_resource(resource_id, &pool) + get_resource(resource_id, &conn) .await .collection_id .as_ref() @@ -1109,20 +1116,20 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; // Create the collection and some resources. let collection_id1 = uuid::Uuid::new_v4(); let _collection1 = - insert_collection(collection_id1, "collection", &pool).await; + insert_collection(collection_id1, "collection", &conn).await; let resource_id1 = uuid::Uuid::new_v4(); let resource_id2 = uuid::Uuid::new_v4(); let _resource1 = - insert_resource(resource_id1, "resource1", &pool).await; - attach_resource(collection_id1, resource_id1, &pool).await; + insert_resource(resource_id1, "resource1", &conn).await; + attach_resource(collection_id1, resource_id1, &conn).await; let _resource2 = - insert_resource(resource_id2, "resource2", &pool).await; - attach_resource(collection_id1, resource_id2, &pool).await; + insert_resource(resource_id2, "resource2", &conn).await; + attach_resource(collection_id1, resource_id2, &conn).await; // Create a separate collection with a resource. // @@ -1130,11 +1137,11 @@ mod test { // on "collection_id1". let collection_id2 = uuid::Uuid::new_v4(); let _collection2 = - insert_collection(collection_id2, "collection2", &pool).await; + insert_collection(collection_id2, "collection2", &conn).await; let resource_id3 = uuid::Uuid::new_v4(); let _resource3 = - insert_resource(resource_id3, "resource3", &pool).await; - attach_resource(collection_id2, resource_id3, &pool).await; + insert_resource(resource_id3, "resource3", &conn).await; + attach_resource(collection_id2, resource_id3, &conn).await; // Detach the resource from the collection. let detach = Collection::detach_resources( @@ -1146,7 +1153,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; let returned_resource = detach.expect("Detach should have worked"); @@ -1154,18 +1161,18 @@ mod test { assert_eq!(returned_resource.description(), "Updated desc"); // Note that only "resource1" and "resource2" should be detached. - assert!(get_resource(resource_id1, &pool) + assert!(get_resource(resource_id1, &conn) .await .collection_id .is_none()); - assert!(get_resource(resource_id2, &pool) + assert!(get_resource(resource_id2, &conn) .await .collection_id .is_none()); // "resource3" should have been left alone. assert_eq!( - get_resource(resource_id3, &pool) + get_resource(resource_id3, &conn) .await .collection_id .as_ref() diff --git a/nexus/db-queries/src/db/collection_insert.rs b/nexus/db-queries/src/db/collection_insert.rs index cebb21a96d..993f16e048 100644 --- a/nexus/db-queries/src/db/collection_insert.rs +++ b/nexus/db-queries/src/db/collection_insert.rs @@ -10,7 +10,7 @@ //! 3) inserts the child resource row use super::pool::DbConnection; -use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionError, PoolError}; +use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionError}; use diesel::associations::HasTable; use diesel::helper_types::*; use diesel::pg::Pg; @@ -170,7 +170,7 @@ pub enum AsyncInsertError { /// The collection that the query was inserting into does not exist CollectionNotFound, /// Other database error - DatabaseError(PoolError), + DatabaseError(ConnectionError), } impl InsertIntoCollectionStatement @@ -188,20 +188,17 @@ where /// - Ok(new row) /// - Error(collection not found) /// - Error(other diesel error) - pub async fn insert_and_get_result_async( + pub async fn insert_and_get_result_async( self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + conn: &async_bb8_diesel::Connection, ) -> AsyncInsertIntoCollectionResult where // We require this bound to ensure that "Self" is runnable as query. Self: query_methods::LoadQuery<'static, DbConnection, ResourceType>, - ConnErr: From + Send + 'static, - PoolError: From, { self.get_result_async::(conn) .await - .map_err(|e| Self::translate_async_error(PoolError::from(e))) + .map_err(|e| Self::translate_async_error(e)) } /// Issues the CTE asynchronously and parses the result. @@ -210,20 +207,17 @@ where /// - Ok(Vec of new rows) /// - Error(collection not found) /// - Error(other diesel error) - pub async fn insert_and_get_results_async( + pub async fn insert_and_get_results_async( self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + conn: &async_bb8_diesel::Connection, ) -> AsyncInsertIntoCollectionResult> where // We require this bound to ensure that "Self" is runnable as query. Self: query_methods::LoadQuery<'static, DbConnection, ResourceType>, - ConnErr: From + Send + 'static, - PoolError: From, { self.get_results_async::(conn) .await - .map_err(|e| Self::translate_async_error(PoolError::from(e))) + .map_err(|e| Self::translate_async_error(e)) } /// Check for the intentional division by zero error @@ -244,9 +238,9 @@ where /// Translate from diesel errors into AsyncInsertError, handling the /// intentional division-by-zero error in the CTE. - fn translate_async_error(err: PoolError) -> AsyncInsertError { + fn translate_async_error(err: ConnectionError) -> AsyncInsertError { match err { - PoolError::Connection(ConnectionError::Query(err)) + ConnectionError::Query(err) if Self::error_is_division_by_zero(&err) => { AsyncInsertError::CollectionNotFound @@ -393,7 +387,9 @@ where mod test { use super::*; use crate::db::{self, identity::Resource as IdentityResource}; - use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; + use async_bb8_diesel::{ + AsyncRunQueryDsl, AsyncSimpleConnection, ConnectionManager, + }; use chrono::{NaiveDateTime, TimeZone, Utc}; use db_macros::Resource; use diesel::expression_methods::ExpressionMethods; @@ -426,7 +422,9 @@ mod test { } } - async fn setup_db(pool: &crate::db::Pool) { + async fn setup_db( + pool: &crate::db::Pool, + ) -> bb8::PooledConnection> { let connection = pool.pool().get().await.unwrap(); (*connection) .batch_execute_async( @@ -452,6 +450,7 @@ mod test { ) .await .unwrap(); + connection } /// Describes an organization within the database. @@ -548,7 +547,7 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -563,7 +562,7 @@ mod test { resource::dsl::collection_id.eq(collection_id), )), ) - .insert_and_get_result_async(pool.pool()) + .insert_and_get_result_async(&conn) .await; assert!(matches!(insert, Err(AsyncInsertError::CollectionNotFound))); @@ -578,7 +577,7 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -593,7 +592,7 @@ mod test { collection::dsl::time_modified.eq(Utc::now()), collection::dsl::rcgen.eq(1), )]) - .execute_async(pool.pool()) + .execute_async(&*conn) .await .unwrap(); @@ -614,7 +613,7 @@ mod test { resource::dsl::collection_id.eq(collection_id), )]), ) - .insert_and_get_result_async(pool.pool()) + .insert_and_get_result_async(&conn) .await .unwrap(); assert_eq!(resource.id(), resource_id); @@ -627,7 +626,7 @@ mod test { let collection_rcgen = collection::table .find(collection_id) .select(collection::dsl::rcgen) - .first_async::(pool.pool()) + .first_async::(&*conn) .await .unwrap(); diff --git a/nexus/db-queries/src/db/datastore/address_lot.rs b/nexus/db-queries/src/db/datastore/address_lot.rs index 35b45753e6..9d264dbf6b 100644 --- a/nexus/db-queries/src/db/datastore/address_lot.rs +++ b/nexus/db-queries/src/db/datastore/address_lot.rs @@ -7,14 +7,14 @@ use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::datastore::PgConnection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::model::Name; use crate::db::model::{AddressLot, AddressLotBlock, AddressLotReservedBlock}; use crate::db::pagination::paginated; use async_bb8_diesel::{ - AsyncConnection, AsyncRunQueryDsl, Connection, ConnectionError, PoolError, + AsyncConnection, AsyncRunQueryDsl, Connection, ConnectionError, }; use chrono::Utc; use diesel::result::Error as DieselError; @@ -47,7 +47,7 @@ impl DataStore { use db::schema::address_lot::dsl as lot_dsl; use db::schema::address_lot_block::dsl as block_dsl; - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage @@ -84,16 +84,16 @@ impl DataStore { }) .await .map_err(|e| match e { - PoolError::Connection(ConnectionError::Query( - DieselError::DatabaseError(_, _), - )) => public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::AddressLot, - ¶ms.identity.name.as_str(), - ), - ), - _ => public_error_from_diesel_pool(e, ErrorHandler::Server), + ConnectionError::Query(DieselError::DatabaseError(_, _)) => { + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::AddressLot, + ¶ms.identity.name.as_str(), + ), + ) + } + _ => public_error_from_diesel(e, ErrorHandler::Server), }) } @@ -110,7 +110,7 @@ impl DataStore { let id = authz_address_lot.id(); - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; #[derive(Debug)] enum AddressLotDeleteError { @@ -121,7 +121,7 @@ impl DataStore { // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { let rsvd: Vec = rsvd_block_dsl::address_lot_rsvd_block .filter(rsvd_block_dsl::address_lot_id.eq(id)) @@ -151,8 +151,8 @@ impl DataStore { }) .await .map_err(|e| match e { - TxnError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TxnError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } TxnError::CustomError(AddressLotDeleteError::LotInUse) => { Error::invalid_request("lot is in use") @@ -179,9 +179,9 @@ impl DataStore { } .filter(dsl::time_deleted.is_null()) .select(AddressLot::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn address_lot_block_list( @@ -192,14 +192,14 @@ impl DataStore { ) -> ListResultVec { use db::schema::address_lot_block::dsl; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; paginated(dsl::address_lot_block, dsl::id, &pagparams) .filter(dsl::address_lot_id.eq(authz_address_lot.id())) .select(AddressLotBlock::as_select()) - .load_async(pool) + .load_async(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn address_lot_id_for_block_id( @@ -207,7 +207,7 @@ impl DataStore { opctx: &OpContext, address_lot_block_id: Uuid, ) -> LookupResult { - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; use db::schema::address_lot_block; use db::schema::address_lot_block::dsl as block_dsl; @@ -216,11 +216,9 @@ impl DataStore { .filter(address_lot_block::id.eq(address_lot_block_id)) .select(address_lot_block::address_lot_id) .limit(1) - .first_async::(pool) + .first_async::(&*conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; Ok(address_lot_id) } diff --git a/nexus/db-queries/src/db/datastore/certificate.rs b/nexus/db-queries/src/db/datastore/certificate.rs index c37d026251..4b043becd8 100644 --- a/nexus/db-queries/src/db/datastore/certificate.rs +++ b/nexus/db-queries/src/db/datastore/certificate.rs @@ -8,7 +8,7 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::Certificate; use crate::db::model::Name; @@ -49,10 +49,10 @@ impl DataStore { .do_update() .set(dsl::time_modified.eq(dsl::time_modified)) .returning(Certificate::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::Certificate, @@ -117,9 +117,11 @@ impl DataStore { query .select(Certificate::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn certificate_delete( @@ -136,10 +138,10 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::id.eq(authz_cert.id())) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_cert), ) diff --git a/nexus/db-queries/src/db/datastore/console_session.rs b/nexus/db-queries/src/db/datastore/console_session.rs index 1e02f9b61d..113a316ae4 100644 --- a/nexus/db-queries/src/db/datastore/console_session.rs +++ b/nexus/db-queries/src/db/datastore/console_session.rs @@ -46,7 +46,7 @@ impl DataStore { diesel::insert_into(dsl::console_session) .values(session) .returning(ConsoleSession::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { Error::internal_error(&format!( @@ -68,7 +68,7 @@ impl DataStore { .filter(dsl::token.eq(authz_session.id())) .set((dsl::time_last_used.eq(Utc::now()),)) .returning(ConsoleSession::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { Error::internal_error(&format!( @@ -130,7 +130,7 @@ impl DataStore { diesel::delete(dsl::console_session) .filter(dsl::silo_user_id.eq(silo_user_id)) .filter(dsl::token.eq(authz_session.id())) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map(|_rows_deleted| ()) .map_err(|e| { diff --git a/nexus/db-queries/src/db/datastore/dataset.rs b/nexus/db-queries/src/db/datastore/dataset.rs index 55259e922f..99972459c8 100644 --- a/nexus/db-queries/src/db/datastore/dataset.rs +++ b/nexus/db-queries/src/db/datastore/dataset.rs @@ -8,7 +8,7 @@ use super::DataStore; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Asset; use crate::db::model::Dataset; @@ -44,22 +44,22 @@ impl DataStore { dsl::kind.eq(excluded(dsl::kind)), )), ) - .insert_and_get_result_async(self.pool()) + .insert_and_get_result_async( + &*self.pool_connection_unauthorized().await?, + ) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { type_name: ResourceType::Zpool, lookup_type: LookupType::ById(zpool_id), }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Dataset, - &dataset.id().to_string(), - ), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::Dataset, + &dataset.id().to_string(), + ), + ), }) } } diff --git a/nexus/db-queries/src/db/datastore/db_metadata.rs b/nexus/db-queries/src/db/datastore/db_metadata.rs index ac43081601..181b3c1798 100644 --- a/nexus/db-queries/src/db/datastore/db_metadata.rs +++ b/nexus/db-queries/src/db/datastore/db_metadata.rs @@ -6,7 +6,7 @@ use super::DataStore; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::TransactionError; use async_bb8_diesel::{ @@ -270,11 +270,9 @@ impl DataStore { let version: String = dsl::db_metadata .filter(dsl::singleton.eq(true)) .select(dsl::version) - .get_result_async(self.pool()) + .get_result_async(&*self.pool_connection_unauthorized().await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; SemverVersion::from_str(&version).map_err(|e| { Error::internal_error(&format!("Invalid schema version: {e}")) @@ -312,9 +310,9 @@ impl DataStore { dsl::time_modified.eq(Utc::now()), dsl::target_version.eq(Some(to_version.to_string())), )) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if rows_updated != 1 { return Err(Error::internal_error( @@ -332,7 +330,7 @@ impl DataStore { target: &SemverVersion, sql: &String, ) -> Result<(), Error> { - let result = self.pool().transaction_async(|conn| async move { + let result = self.pool_connection_unauthorized().await?.transaction_async(|conn| async move { if target.to_string() != EARLIEST_SUPPORTED_VERSION { let validate_version_query = format!("SELECT CAST(\ IF(\ @@ -353,8 +351,8 @@ impl DataStore { match result { Ok(()) => Ok(()), Err(TransactionError::CustomError(())) => panic!("No custom error"), - Err(TransactionError::Pool(e)) => { - Err(public_error_from_diesel_pool(e, ErrorHandler::Server)) + Err(TransactionError::Connection(e)) => { + Err(public_error_from_diesel(e, ErrorHandler::Server)) } } } @@ -378,9 +376,9 @@ impl DataStore { dsl::version.eq(to_version.to_string()), dsl::target_version.eq(None as Option), )) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if rows_updated != 1 { return Err(Error::internal_error( @@ -432,6 +430,7 @@ mod test { let cfg = db::Config { url: crdb.pg_config().clone() }; let pool = Arc::new(db::Pool::new(&logctx.log, &cfg)); + let conn = pool.pool().get().await.unwrap(); // Mimic the layout of "schema/crdb". let config_dir = tempfile::TempDir::new().unwrap(); @@ -457,7 +456,7 @@ mod test { use db::schema::db_metadata::dsl; diesel::update(dsl::db_metadata.filter(dsl::singleton.eq(true))) .set(dsl::version.eq(v0.to_string())) - .execute_async(pool.pool()) + .execute_async(&*conn) .await .expect("Failed to set version back to 0.0.0"); @@ -507,7 +506,7 @@ mod test { "EXISTS (SELECT * FROM pg_tables WHERE tablename = 'widget')" ) ) - .get_result_async::(datastore.pool()) + .get_result_async::(&*datastore.pool_connection_for_tests().await.unwrap()) .await .expect("Failed to query for table"); assert_eq!(result, false, "The 'widget' table should have been deleted, but it exists.\ diff --git a/nexus/db-queries/src/db/datastore/device_auth.rs b/nexus/db-queries/src/db/datastore/device_auth.rs index 62e54f2321..e084834833 100644 --- a/nexus/db-queries/src/db/datastore/device_auth.rs +++ b/nexus/db-queries/src/db/datastore/device_auth.rs @@ -8,7 +8,7 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::model::DeviceAccessToken; @@ -42,9 +42,9 @@ impl DataStore { diesel::insert_into(dsl::device_auth_request) .values(auth_request) .returning(DeviceAuthRequest::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Remove the device authorization request and create a new device @@ -77,7 +77,7 @@ impl DataStore { } type TxnError = TransactionError; - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { match delete_request.execute_async(&conn).await? { @@ -103,8 +103,8 @@ impl DataStore { TxnError::CustomError(TokenGrantError::TooManyRequests) => { Error::internal_error("unexpectedly found multiple device auth requests for the same user code") } - TxnError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TxnError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } }) } @@ -127,10 +127,10 @@ impl DataStore { .filter(dsl::client_id.eq(client_id)) .filter(dsl::device_code.eq(device_code)) .select(DeviceAccessToken::as_select()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::DeviceAccessToken, diff --git a/nexus/db-queries/src/db/datastore/disk.rs b/nexus/db-queries/src/db/datastore/disk.rs index 7ae9967285..80f72c1e18 100644 --- a/nexus/db-queries/src/db/datastore/disk.rs +++ b/nexus/db-queries/src/db/datastore/disk.rs @@ -15,7 +15,7 @@ use crate::db::collection_detach::DatastoreDetachTarget; use crate::db::collection_detach::DetachError; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Resource; use crate::db::lookup::LookupPath; @@ -71,9 +71,9 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::attach_instance_id.eq(authz_instance.id())) .select(Disk::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn project_create_disk( @@ -98,16 +98,16 @@ impl DataStore { .do_update() .set(dsl::time_modified.eq(dsl::time_modified)), ) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) + .insert_and_get_result_async( + &*self.pool_connection_authorized(opctx).await?, + ) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => authz_project.not_found(), - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict(ResourceType::Disk, name.as_str()), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict(ResourceType::Disk, name.as_str()), + ), })?; let runtime = disk.runtime(); @@ -146,9 +146,9 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::project_id.eq(authz_project.id())) .select(Disk::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Attaches a disk to an instance, if both objects: @@ -199,7 +199,7 @@ impl DataStore { diesel::update(disk::dsl::disk).set(attach_update), ); - let (instance, disk) = query.attach_and_get_result_async(self.pool_authorized(opctx).await?) + let (instance, disk) = query.attach_and_get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .or_else(|e| { match e { @@ -278,7 +278,7 @@ impl DataStore { } }, AttachError::DatabaseError(e) => { - Err(public_error_from_diesel_pool(e, ErrorHandler::Server)) + Err(public_error_from_diesel(e, ErrorHandler::Server)) }, } })?; @@ -331,7 +331,7 @@ impl DataStore { disk::dsl::slot.eq(Option::::None) )) ) - .detach_and_get_result_async(self.pool_authorized(opctx).await?) + .detach_and_get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .or_else(|e| { match e { @@ -405,7 +405,7 @@ impl DataStore { } }, DetachError::DatabaseError(e) => { - Err(public_error_from_diesel_pool(e, ErrorHandler::Server)) + Err(public_error_from_diesel(e, ErrorHandler::Server)) }, } })?; @@ -438,14 +438,14 @@ impl DataStore { .filter(dsl::state_generation.lt(new_runtime.gen)) .set(new_runtime.clone()) .check_if_exists::(disk_id) - .execute_and_check(self.pool()) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) .await .map(|r| match r.status { UpdateStatus::Updated => true, UpdateStatus::NotUpdatedButExists => false, }) .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_disk), ) @@ -469,14 +469,14 @@ impl DataStore { .filter(dsl::id.eq(disk_id)) .set(dsl::pantry_address.eq(pantry_address.to_string())) .check_if_exists::(disk_id) - .execute_and_check(self.pool_authorized(opctx).await?) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) .await .map(|r| match r.status { UpdateStatus::Updated => true, UpdateStatus::NotUpdatedButExists => false, }) .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_disk), ) @@ -499,14 +499,14 @@ impl DataStore { .filter(dsl::id.eq(disk_id)) .set(&DiskUpdate { pantry_address: None }) .check_if_exists::(disk_id) - .execute_and_check(self.pool_authorized(opctx).await?) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) .await .map(|r| match r.status { UpdateStatus::Updated => true, UpdateStatus::NotUpdatedButExists => false, }) .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_disk), ) @@ -571,7 +571,7 @@ impl DataStore { ok_to_delete_states: &[api::external::DiskState], ) -> Result { use db::schema::disk::dsl; - let pool = self.pool(); + let conn = self.pool_connection_unauthorized().await?; let now = Utc::now(); let ok_to_delete_state_labels: Vec<_> = @@ -585,10 +585,10 @@ impl DataStore { .filter(dsl::attach_instance_id.is_null()) .set((dsl::disk_state.eq(destroyed), dsl::time_deleted.eq(now))) .check_if_exists::(*disk_id) - .execute_and_check(pool) + .execute_and_check(&conn) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::Disk, diff --git a/nexus/db-queries/src/db/datastore/dns.rs b/nexus/db-queries/src/db/datastore/dns.rs index ddf2718930..d9704594b1 100644 --- a/nexus/db-queries/src/db/datastore/dns.rs +++ b/nexus/db-queries/src/db/datastore/dns.rs @@ -6,7 +6,7 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::DnsGroup; use crate::db::model::DnsName; @@ -15,9 +15,10 @@ use crate::db::model::DnsZone; use crate::db::model::Generation; use crate::db::model::InitialDnsGroup; use crate::db::pagination::paginated; +use crate::db::pool::DbConnection; use crate::db::TransactionError; +use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; -use async_bb8_diesel::PoolError; use diesel::prelude::*; use nexus_types::internal_api::params::DnsConfigParams; use nexus_types::internal_api::params::DnsConfigZone; @@ -51,9 +52,9 @@ impl DataStore { paginated(dsl::dns_zone, dsl::zone_name, pagparams) .filter(dsl::dns_group.eq(dns_group)) .select(DnsZone::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// List all DNS zones in a DNS group without pagination @@ -65,25 +66,18 @@ impl DataStore { opctx: &OpContext, dns_group: DnsGroup, ) -> ListResultVec { - let conn = self.pool_authorized(opctx).await?; - self.dns_zones_list_all_on_connection(opctx, conn, dns_group).await + let conn = self.pool_connection_authorized(opctx).await?; + self.dns_zones_list_all_on_connection(opctx, &conn, dns_group).await } /// Variant of [`Self::dns_zones_list_all`] which may be called from a /// transaction context. - pub(crate) async fn dns_zones_list_all_on_connection( + pub(crate) async fn dns_zones_list_all_on_connection( &self, opctx: &OpContext, - conn: &(impl async_bb8_diesel::AsyncConnection< - crate::db::pool::DbConnection, - ConnErr, - > + Sync), + conn: &async_bb8_diesel::Connection, dns_group: DnsGroup, - ) -> ListResultVec - where - ConnErr: From + Send + 'static, - ConnErr: Into, - { + ) -> ListResultVec { use db::schema::dns_zone::dsl; const LIMIT: usize = 5; @@ -95,9 +89,7 @@ impl DataStore { .select(DnsZone::as_select()) .load_async(conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e.into(), ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; bail_unless!( list.len() < LIMIT, @@ -116,25 +108,18 @@ impl DataStore { ) -> LookupResult { self.dns_group_latest_version_conn( opctx, - self.pool_authorized(opctx).await?, + &*self.pool_connection_authorized(opctx).await?, dns_group, ) .await } - pub async fn dns_group_latest_version_conn( + pub async fn dns_group_latest_version_conn( &self, opctx: &OpContext, - conn: &(impl async_bb8_diesel::AsyncConnection< - crate::db::pool::DbConnection, - ConnErr, - > + Sync), + conn: &async_bb8_diesel::Connection, dns_group: DnsGroup, - ) -> LookupResult - where - ConnErr: From + Send + 'static, - ConnErr: Into, - { + ) -> LookupResult { opctx.authorize(authz::Action::Read, &authz::DNS_CONFIG).await?; use db::schema::dns_version::dsl; let versions = dsl::dns_version @@ -144,9 +129,7 @@ impl DataStore { .select(DnsVersion::as_select()) .load_async(conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e.into(), ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; bail_unless!( versions.len() == 1, @@ -178,11 +161,9 @@ impl DataStore { .or(dsl::version_removed.gt(version)), ) .select(DnsName::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })? + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? .into_iter() .filter_map(|n: DnsName| match n.records() { Ok(records) => Some((n.name, records)), @@ -326,17 +307,10 @@ impl DataStore { } /// Load initial data for a DNS group into the database - pub async fn load_dns_data( - conn: &(impl async_bb8_diesel::AsyncConnection< - crate::db::pool::DbConnection, - ConnErr, - > + Sync), + pub async fn load_dns_data( + conn: &async_bb8_diesel::Connection, dns: InitialDnsGroup, - ) -> Result<(), Error> - where - ConnErr: From + Send + 'static, - ConnErr: Into, - { + ) -> Result<(), Error> { { use db::schema::dns_zone::dsl; diesel::insert_into(dsl::dns_zone) @@ -346,10 +320,7 @@ impl DataStore { .execute_async(conn) .await .map_err(|e| { - public_error_from_diesel_pool( - e.into(), - ErrorHandler::Server, - ) + public_error_from_diesel(e, ErrorHandler::Server) })?; } @@ -362,10 +333,7 @@ impl DataStore { .execute_async(conn) .await .map_err(|e| { - public_error_from_diesel_pool( - e.into(), - ErrorHandler::Server, - ) + public_error_from_diesel(e, ErrorHandler::Server) })?; } @@ -378,10 +346,7 @@ impl DataStore { .execute_async(conn) .await .map_err(|e| { - public_error_from_diesel_pool( - e.into(), - ErrorHandler::Server, - ) + public_error_from_diesel(e, ErrorHandler::Server) })?; } @@ -407,20 +372,12 @@ impl DataStore { /// **Callers almost certainly want to wake up the corresponding Nexus /// background task to cause these changes to be propagated to the /// corresponding DNS servers.** - pub async fn dns_update( + pub async fn dns_update( &self, opctx: &OpContext, - conn: &(impl async_bb8_diesel::AsyncConnection< - crate::db::pool::DbConnection, - ConnErr, - > + Sync), + conn: &async_bb8_diesel::Connection, update: DnsVersionUpdateBuilder, - ) -> Result<(), Error> - where - ConnErr: From + Send + 'static, - ConnErr: Into, - TransactionError: From, - { + ) -> Result<(), Error> { opctx.authorize(authz::Action::Modify, &authz::DNS_CONFIG).await?; let zones = self @@ -438,28 +395,21 @@ impl DataStore { match result { Ok(()) => Ok(()), Err(TransactionError::CustomError(e)) => Err(e), - Err(TransactionError::Pool(e)) => { - Err(public_error_from_diesel_pool(e, ErrorHandler::Server)) + Err(TransactionError::Connection(e)) => { + Err(public_error_from_diesel(e, ErrorHandler::Server)) } } } // This must only be used inside a transaction. Otherwise, it may make // invalid changes to the database state. Use `dns_update()` instead. - async fn dns_update_internal( + async fn dns_update_internal( &self, opctx: &OpContext, - conn: &(impl async_bb8_diesel::AsyncConnection< - crate::db::pool::DbConnection, - ConnErr, - > + Sync), + conn: &async_bb8_diesel::Connection, update: DnsVersionUpdateBuilder, zones: Vec, - ) -> Result<(), Error> - where - ConnErr: From + Send + 'static, - ConnErr: Into, - { + ) -> Result<(), Error> { // TODO-scalability TODO-performance This would be much better as a CTE // for all the usual reasons described in RFD 192. Using an interactive // transaction here means that either we wind up holding database locks @@ -507,10 +457,7 @@ impl DataStore { .execute_async(conn) .await .map_err(|e| { - public_error_from_diesel_pool( - e.into(), - ErrorHandler::Server, - ) + public_error_from_diesel(e, ErrorHandler::Server) })?; } @@ -534,9 +481,7 @@ impl DataStore { .set(dsl::version_removed.eq(new_version_num)) .execute_async(conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e.into(), ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; bail_unless!( nremoved == ntoremove, @@ -552,10 +497,7 @@ impl DataStore { .execute_async(conn) .await .map_err(|e| { - public_error_from_diesel_pool( - e.into(), - ErrorHandler::Server, - ) + public_error_from_diesel(e, ErrorHandler::Server) })?; bail_unless!( @@ -749,8 +691,8 @@ mod test { comment: "test suite".to_string(), }) .execute_async( - datastore - .pool_for_tests() + &*datastore + .pool_connection_for_tests() .await .expect("failed to get datastore connection"), ) @@ -810,8 +752,8 @@ mod test { HashMap::new(), ); { - let conn = datastore.pool_for_tests().await.unwrap(); - DataStore::load_dns_data(conn, initial) + let conn = datastore.pool_connection_for_tests().await.unwrap(); + DataStore::load_dns_data(&conn, initial) .await .expect("failed to load initial DNS zone"); } @@ -850,8 +792,8 @@ mod test { ]), ); { - let conn = datastore.pool_for_tests().await.unwrap(); - DataStore::load_dns_data(conn, initial) + let conn = datastore.pool_connection_for_tests().await.unwrap(); + DataStore::load_dns_data(&conn, initial) .await .expect("failed to load initial DNS zone"); } @@ -1026,7 +968,9 @@ mod test { zone_name: "z1.foo".to_string(), }, ]) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } @@ -1042,7 +986,9 @@ mod test { vi1.clone(), vi2.clone(), ]) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } @@ -1142,7 +1088,9 @@ mod test { ) .unwrap(), ]) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } @@ -1288,7 +1236,9 @@ mod test { zone_name: "z1.foo".to_string(), }, ]) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap_err(); assert!(error @@ -1317,7 +1267,9 @@ mod test { comment: "test suite 4".to_string(), }, ]) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap_err(); assert!(error @@ -1349,7 +1301,9 @@ mod test { ) .unwrap(), ]) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap_err(); assert!(error @@ -1470,7 +1424,9 @@ mod test { dns_zone2.clone(), dns_zone3.clone(), ]) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } @@ -1494,7 +1450,9 @@ mod test { comment: "test suite 8".to_string(), }, ]) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } @@ -1523,8 +1481,8 @@ mod test { update.add_name(String::from("n1"), records1.clone()).unwrap(); update.add_name(String::from("n2"), records2.clone()).unwrap(); - let conn = datastore.pool_for_tests().await.unwrap(); - datastore.dns_update(&opctx, conn, update).await.unwrap(); + let conn = datastore.pool_connection_for_tests().await.unwrap(); + datastore.dns_update(&opctx, &conn, update).await.unwrap(); } // Verify the new config. @@ -1556,8 +1514,8 @@ mod test { update.remove_name(String::from("n1")).unwrap(); update.add_name(String::from("n1"), records12.clone()).unwrap(); - let conn = datastore.pool_for_tests().await.unwrap(); - datastore.dns_update(&opctx, conn, update).await.unwrap(); + let conn = datastore.pool_connection_for_tests().await.unwrap(); + datastore.dns_update(&opctx, &conn, update).await.unwrap(); } let dns_config = datastore @@ -1586,8 +1544,8 @@ mod test { ); update.remove_name(String::from("n1")).unwrap(); - let conn = datastore.pool_for_tests().await.unwrap(); - datastore.dns_update(&opctx, conn, update).await.unwrap(); + let conn = datastore.pool_connection_for_tests().await.unwrap(); + datastore.dns_update(&opctx, &conn, update).await.unwrap(); } let dns_config = datastore @@ -1613,8 +1571,8 @@ mod test { ); update.add_name(String::from("n1"), records2.clone()).unwrap(); - let conn = datastore.pool_for_tests().await.unwrap(); - datastore.dns_update(&opctx, conn, update).await.unwrap(); + let conn = datastore.pool_connection_for_tests().await.unwrap(); + datastore.dns_update(&opctx, &conn, update).await.unwrap(); } let dns_config = datastore @@ -1644,8 +1602,8 @@ mod test { ); update1.remove_name(String::from("n1")).unwrap(); - let conn1 = datastore.pool_for_tests().await.unwrap(); - let conn2 = datastore.pool_for_tests().await.unwrap(); + let conn1 = datastore.pool_connection_for_tests().await.unwrap(); + let conn2 = datastore.pool_connection_for_tests().await.unwrap(); let (wait1_tx, wait1_rx) = tokio::sync::oneshot::channel(); let (wait2_tx, wait2_rx) = tokio::sync::oneshot::channel(); @@ -1680,7 +1638,7 @@ mod test { String::from("the test suite"), ); update2.add_name(String::from("n1"), records1.clone()).unwrap(); - datastore.dns_update(&opctx, conn2, update2).await.unwrap(); + datastore.dns_update(&opctx, &conn2, update2).await.unwrap(); // Now let the first one finish. wait2_tx.send(()).unwrap(); @@ -1723,9 +1681,9 @@ mod test { ); update.remove_name(String::from("n4")).unwrap(); - let conn = datastore.pool_for_tests().await.unwrap(); + let conn = datastore.pool_connection_for_tests().await.unwrap(); let error = - datastore.dns_update(&opctx, conn, update).await.unwrap_err(); + datastore.dns_update(&opctx, &conn, update).await.unwrap_err(); assert_eq!( error.to_string(), "Internal Error: updated wrong number of dns_name \ @@ -1748,9 +1706,9 @@ mod test { ); update.add_name(String::from("n2"), records1.clone()).unwrap(); - let conn = datastore.pool_for_tests().await.unwrap(); + let conn = datastore.pool_connection_for_tests().await.unwrap(); let error = - datastore.dns_update(&opctx, conn, update).await.unwrap_err(); + datastore.dns_update(&opctx, &conn, update).await.unwrap_err(); let msg = error.to_string(); assert!(msg.starts_with("Internal Error: ")); assert!(msg.contains("violates unique constraint")); diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 8f5e9ba4c1..268b284a0a 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -9,7 +9,7 @@ use crate::authz; use crate::authz::ApiResource; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::lookup::LookupPath; use crate::db::model::ExternalIp; @@ -20,7 +20,7 @@ use crate::db::pool::DbConnection; use crate::db::queries::external_ip::NextExternalIp; use crate::db::update_and_check::UpdateAndCheck; use crate::db::update_and_check::UpdateStatus; -use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; +use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; use nexus_types::identity::Resource; @@ -131,29 +131,22 @@ impl DataStore { opctx: &OpContext, data: IncompleteExternalIp, ) -> CreateResult { - let conn = self.pool_authorized(opctx).await?; - Self::allocate_external_ip_on_connection(conn, data).await + let conn = self.pool_connection_authorized(opctx).await?; + Self::allocate_external_ip_on_connection(&conn, data).await } /// Variant of [Self::allocate_external_ip] which may be called from a /// transaction context. - pub(crate) async fn allocate_external_ip_on_connection( - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + pub(crate) async fn allocate_external_ip_on_connection( + conn: &async_bb8_diesel::Connection, data: IncompleteExternalIp, - ) -> CreateResult - where - ConnErr: From + Send + 'static, - PoolError: From, - { + ) -> CreateResult { let explicit_ip = data.explicit_ip().is_some(); NextExternalIp::new(data).get_result_async(conn).await.map_err(|e| { use async_bb8_diesel::ConnectionError::Query; - use async_bb8_diesel::PoolError::Connection; use diesel::result::Error::NotFound; - let e = PoolError::from(e); match e { - Connection(Query(NotFound)) => { + Query(NotFound) => { if explicit_ip { Error::invalid_request( "Requested external IP address not available", @@ -164,7 +157,7 @@ impl DataStore { ) } } - _ => crate::db::queries::external_ip::from_pool(e), + _ => crate::db::queries::external_ip::from_diesel(e), } }) } @@ -238,13 +231,13 @@ impl DataStore { .filter(dsl::id.eq(ip_id)) .set(dsl::time_deleted.eq(now)) .check_if_exists::(ip_id) - .execute_and_check(self.pool_authorized(opctx).await?) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) .await .map(|r| match r.status { UpdateStatus::Updated => true, UpdateStatus::NotUpdatedButExists => false, }) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Delete all external IP addresses associated with the provided instance @@ -268,9 +261,9 @@ impl DataStore { .filter(dsl::parent_id.eq(instance_id)) .filter(dsl::kind.ne(IpKind::Floating)) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Fetch all external IP addresses of any kind for the provided instance @@ -285,8 +278,8 @@ impl DataStore { .filter(dsl::parent_id.eq(instance_id)) .filter(dsl::time_deleted.is_null()) .select(ExternalIp::as_select()) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/identity_provider.rs b/nexus/db-queries/src/db/datastore/identity_provider.rs index 4d725d1cf4..fdc9a020e7 100644 --- a/nexus/db-queries/src/db/datastore/identity_provider.rs +++ b/nexus/db-queries/src/db/datastore/identity_provider.rs @@ -8,7 +8,7 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Resource; use crate::db::model::IdentityProvider; @@ -46,9 +46,11 @@ impl DataStore { .filter(dsl::silo_id.eq(authz_idp_list.silo().id())) .filter(dsl::time_deleted.is_null()) .select(IdentityProvider::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn saml_identity_provider_create( @@ -61,7 +63,7 @@ impl DataStore { assert_eq!(provider.silo_id, authz_idp_list.silo().id()); let name = provider.identity().name.to_string(); - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { // insert silo identity provider record with type Saml @@ -94,7 +96,7 @@ impl DataStore { }) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::SamlIdentityProvider, diff --git a/nexus/db-queries/src/db/datastore/image.rs b/nexus/db-queries/src/db/datastore/image.rs index 17bdb6fae0..e44da013cd 100644 --- a/nexus/db-queries/src/db/datastore/image.rs +++ b/nexus/db-queries/src/db/datastore/image.rs @@ -4,7 +4,7 @@ use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::Image; use crate::db::model::Project; @@ -52,9 +52,11 @@ impl DataStore { .filter(project_dsl::time_deleted.is_null()) .filter(project_dsl::project_id.eq(authz_project.id())) .select(ProjectImage::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) .map(|v| v.into_iter().map(|v| v.into()).collect()) } @@ -80,9 +82,11 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::silo_id.eq(authz_silo.id())) .select(SiloImage::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) .map(|v| v.into_iter().map(|v| v.into()).collect()) } @@ -107,19 +111,19 @@ impl DataStore { .do_update() .set(dsl::time_modified.eq(dsl::time_modified)), ) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) + .insert_and_get_result_async( + &*self.pool_connection_authorized(opctx).await?, + ) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => authz_silo.not_found(), - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::ProjectImage, - name.as_str(), - ), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::ProjectImage, + name.as_str(), + ), + ), })?; Ok(image) } @@ -145,19 +149,19 @@ impl DataStore { .do_update() .set(dsl::time_modified.eq(dsl::time_modified)), ) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) + .insert_and_get_result_async( + &*self.pool_connection_authorized(opctx).await?, + ) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => authz_project.not_found(), - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::ProjectImage, - name.as_str(), - ), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::ProjectImage, + name.as_str(), + ), + ), })?; Ok(image) } @@ -181,10 +185,10 @@ impl DataStore { dsl::time_modified.eq(Utc::now()), )) .returning(Image::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::SiloImage, @@ -215,10 +219,10 @@ impl DataStore { dsl::time_modified.eq(Utc::now()), )) .returning(Image::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::ProjectImage, diff --git a/nexus/db-queries/src/db/datastore/instance.rs b/nexus/db-queries/src/db/datastore/instance.rs index 1f347d2378..46ca07a74a 100644 --- a/nexus/db-queries/src/db/datastore/instance.rs +++ b/nexus/db-queries/src/db/datastore/instance.rs @@ -13,7 +13,7 @@ use crate::db::collection_detach_many::DatastoreDetachManyTarget; use crate::db::collection_detach_many::DetachManyError; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Resource; use crate::db::lookup::LookupPath; @@ -84,19 +84,16 @@ impl DataStore { .do_update() .set(dsl::time_modified.eq(dsl::time_modified)), ) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) + .insert_and_get_result_async( + &*self.pool_connection_authorized(opctx).await?, + ) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => authz_project.not_found(), - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Instance, - name.as_str(), - ), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict(ResourceType::Instance, name.as_str()), + ), })?; bail_unless!( @@ -135,9 +132,9 @@ impl DataStore { .filter(dsl::project_id.eq(authz_project.id())) .filter(dsl::time_deleted.is_null()) .select(Instance::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Fetches information about an Instance that the caller has previously @@ -178,10 +175,10 @@ impl DataStore { .filter(dsl::id.eq(authz_instance.id())) .filter(dsl::time_deleted.is_not_null()) .select(Instance::as_select()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::Instance, @@ -225,14 +222,14 @@ impl DataStore { ) .set(new_runtime.clone()) .check_if_exists::(*instance_id) - .execute_and_check(self.pool()) + .execute_and_check(&*self.pool_connection_unauthorized().await?) .await .map(|r| match r.status { UpdateStatus::Updated => true, UpdateStatus::NotUpdatedButExists => false, }) .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::Instance, @@ -288,7 +285,9 @@ impl DataStore { disk::dsl::slot.eq(Option::::None), )), ) - .detach_and_get_result_async(self.pool_authorized(opctx).await?) + .detach_and_get_result_async( + &*self.pool_connection_authorized(opctx).await?, + ) .await .map_err(|e| match e { DetachManyError::CollectionNotFound => Error::not_found_by_id( @@ -309,7 +308,7 @@ impl DataStore { } } DetachManyError::DatabaseError(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) } })?; diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 1248edf7a8..bd3148f2f7 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -10,8 +10,8 @@ use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::diesel_pool_result_optional; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::diesel_result_optional; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::fixed_data::silo::INTERNAL_SILO_ID; use crate::db::identity::Resource; @@ -22,7 +22,7 @@ use crate::db::model::Name; use crate::db::pagination::paginated; use crate::db::pool::DbConnection; use crate::db::queries::ip_pool::FilterOverlappingIpRanges; -use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; +use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; use ipnetwork::IpNetwork; @@ -65,9 +65,9 @@ impl DataStore { .filter(dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null())) .filter(dsl::time_deleted.is_null()) .select(db::model::IpPool::as_select()) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Look up the default IP pool for the current silo. If there is no default @@ -104,9 +104,11 @@ impl DataStore { // then by only taking the first result, we get the most specific one .order(dsl::silo_id.asc().nulls_last()) .select(IpPool::as_select()) - .first_async::(self.pool_authorized(opctx).await?) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Looks up an IP pool intended for internal services. @@ -127,9 +129,9 @@ impl DataStore { .filter(dsl::silo_id.eq(*INTERNAL_SILO_ID)) .filter(dsl::time_deleted.is_null()) .select(IpPool::as_select()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) .map(|ip_pool| { ( authz::IpPool::new( @@ -160,10 +162,10 @@ impl DataStore { diesel::insert_into(dsl::ip_pool) .values(pool) .returning(IpPool::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict(ResourceType::IpPool, &pool_name), ) @@ -181,16 +183,18 @@ impl DataStore { opctx.authorize(authz::Action::Delete, authz_pool).await?; // Verify there are no IP ranges still in this pool - let range = diesel_pool_result_optional( + let range = diesel_result_optional( ip_pool_range::dsl::ip_pool_range .filter(ip_pool_range::dsl::ip_pool_id.eq(authz_pool.id())) .filter(ip_pool_range::dsl::time_deleted.is_null()) .select(ip_pool_range::dsl::id) .limit(1) - .first_async::(self.pool_authorized(opctx).await?) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await, ) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if range.is_some() { return Err(Error::InvalidRequest { message: @@ -212,10 +216,10 @@ impl DataStore { .filter(dsl::id.eq(authz_pool.id())) .filter(dsl::rcgen.eq(db_pool.rcgen)) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_pool), ) @@ -247,10 +251,10 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .set(updates) .returning(IpPool::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_pool), ) @@ -269,10 +273,10 @@ impl DataStore { .filter(dsl::ip_pool_id.eq(authz_pool.id())) .filter(dsl::time_deleted.is_null()) .select(IpPoolRange::as_select()) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_pool), ) @@ -285,24 +289,19 @@ impl DataStore { authz_pool: &authz::IpPool, range: &IpRange, ) -> CreateResult { - let conn = self.pool_authorized(opctx).await?; - Self::ip_pool_add_range_on_connection(conn, opctx, authz_pool, range) + let conn = self.pool_connection_authorized(opctx).await?; + Self::ip_pool_add_range_on_connection(&conn, opctx, authz_pool, range) .await } /// Variant of [Self::ip_pool_add_range] which may be called from a /// transaction context. - pub(crate) async fn ip_pool_add_range_on_connection( - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + pub(crate) async fn ip_pool_add_range_on_connection( + conn: &async_bb8_diesel::Connection, opctx: &OpContext, authz_pool: &authz::IpPool, range: &IpRange, - ) -> CreateResult - where - ConnErr: From + Send + 'static, - PoolError: From, - { + ) -> CreateResult { use db::schema::ip_pool_range::dsl; opctx.authorize(authz::Action::CreateChild, authz_pool).await?; let pool_id = authz_pool.id(); @@ -315,13 +314,16 @@ impl DataStore { .await .map_err(|e| { use async_bb8_diesel::ConnectionError::Query; - use async_bb8_diesel::PoolError::Connection; use diesel::result::Error::NotFound; match e { - AsyncInsertError::DatabaseError(Connection(Query( - NotFound, - ))) => { + AsyncInsertError::CollectionNotFound => { + Error::ObjectNotFound { + type_name: ResourceType::IpPool, + lookup_type: LookupType::ById(pool_id), + } + } + AsyncInsertError::DatabaseError(Query(NotFound)) => { // We've filtered out the IP addresses the client provided, // i.e., there's some overlap with existing addresses. Error::invalid_request( @@ -334,14 +336,8 @@ impl DataStore { .as_str(), ) } - AsyncInsertError::CollectionNotFound => { - Error::ObjectNotFound { - type_name: ResourceType::IpPool, - lookup_type: LookupType::ById(pool_id), - } - } AsyncInsertError::DatabaseError(err) => { - public_error_from_diesel_pool(err, ErrorHandler::Server) + public_error_from_diesel(err, ErrorHandler::Server) } } }) @@ -366,19 +362,18 @@ impl DataStore { // Fetch the range itself, if it exists. We'll need to protect against // concurrent inserts of new external IPs from the target range by // comparing the rcgen. - let range = diesel_pool_result_optional( + let conn = self.pool_connection_authorized(opctx).await?; + let range = diesel_result_optional( dsl::ip_pool_range .filter(dsl::ip_pool_id.eq(pool_id)) .filter(dsl::first_address.eq(first_net)) .filter(dsl::last_address.eq(last_net)) .filter(dsl::time_deleted.is_null()) .select(IpPoolRange::as_select()) - .get_result_async::( - self.pool_authorized(opctx).await?, - ) + .get_result_async::(&*conn) .await, ) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))? + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? .ok_or_else(|| { Error::invalid_request( format!( @@ -397,9 +392,9 @@ impl DataStore { .filter(external_ip::dsl::ip_pool_range_id.eq(range_id)) .filter(external_ip::dsl::time_deleted.is_null()), )) - .get_result_async::(self.pool_authorized(opctx).await?) + .get_result_async::(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if has_children { return Err(Error::invalid_request( "IP pool ranges cannot be deleted while \ @@ -419,9 +414,9 @@ impl DataStore { .filter(dsl::rcgen.eq(rcgen)), ) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if updated_rows == 1 { Ok(()) } else { diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index f653675728..ff1df710bb 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -24,7 +24,7 @@ use crate::authz; use crate::context::OpContext; use crate::db::{ self, - error::{public_error_from_diesel_pool, ErrorHandler}, + error::{public_error_from_diesel, ErrorHandler}, }; use ::oximeter::types::ProducerRegistry; use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager}; @@ -200,16 +200,7 @@ impl DataStore { .unwrap(); } - // TODO-security This should be deprecated in favor of pool_authorized(), - // which gives us the chance to do a minimal security check before hitting - // the database. Eventually, this function should only be used for doing - // authentication in the first place (since we can't do an authz check in - // that case). - fn pool(&self) -> &bb8::Pool> { - self.pool.pool() - } - - pub(super) async fn pool_authorized( + async fn pool_authorized( &self, opctx: &OpContext, ) -> Result<&bb8::Pool>, Error> { @@ -217,12 +208,41 @@ impl DataStore { Ok(self.pool.pool()) } + /// Returns a connection to a connection from the database connection pool. + pub(super) async fn pool_connection_authorized( + &self, + opctx: &OpContext, + ) -> Result>, Error> + { + let pool = self.pool_authorized(opctx).await?; + let connection = pool.get().await.map_err(|err| { + Error::unavail(&format!("Failed to access DB connection: {err}")) + })?; + Ok(connection) + } + + /// Returns an unauthorized connection to a connection from the database + /// connection pool. + /// + /// TODO-security: This should be deprecated in favor of + /// "pool_connection_authorized". + pub(super) async fn pool_connection_unauthorized( + &self, + ) -> Result>, Error> + { + let connection = self.pool.pool().get().await.map_err(|err| { + Error::unavail(&format!("Failed to access DB connection: {err}")) + })?; + Ok(connection) + } + /// For testing only. This isn't cfg(test) because nexus needs access to it. #[doc(hidden)] - pub async fn pool_for_tests( + pub async fn pool_connection_for_tests( &self, - ) -> Result<&bb8::Pool>, Error> { - Ok(self.pool.pool()) + ) -> Result>, Error> + { + self.pool_connection_unauthorized().await } /// Return the next available IPv6 address for an Oxide service running on @@ -238,10 +258,10 @@ impl DataStore { ) .set(dsl::last_used_address.eq(dsl::last_used_address + 1)) .returning(dsl::last_used_address) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::Sled, @@ -266,19 +286,17 @@ impl DataStore { #[cfg(test)] async fn test_try_table_scan(&self, opctx: &OpContext) -> Error { use db::schema::project::dsl; - let conn = self.pool_authorized(opctx).await; + let conn = self.pool_connection_authorized(opctx).await; if let Err(error) = conn { return error; } let result = dsl::project .select(diesel::dsl::count_star()) - .first_async::(conn.unwrap()) + .first_async::(&*conn.unwrap()) .await; match result { Ok(_) => Error::internal_error("table scan unexpectedly succeeded"), - Err(error) => { - public_error_from_diesel_pool(error, ErrorHandler::Server) - } + Err(error) => public_error_from_diesel(error, ErrorHandler::Server), } } } @@ -1000,9 +1018,9 @@ mod test { let pool = db::Pool::new(&logctx.log, &cfg); let datastore = DataStore::new(&logctx.log, Arc::new(pool), None).await.unwrap(); - + let conn = datastore.pool_connection_for_tests().await.unwrap(); let explanation = DataStore::get_allocated_regions_query(Uuid::nil()) - .explain_async(datastore.pool()) + .explain_async(&conn) .await .unwrap(); assert!( @@ -1027,7 +1045,7 @@ mod test { .values(values) .returning(VpcSubnet::as_returning()); println!("{}", diesel::debug_query(&query)); - let explanation = query.explain_async(datastore.pool()).await.unwrap(); + let explanation = query.explain_async(&conn).await.unwrap(); assert!( !explanation.contains("FULL SCAN"), "Found an unexpected FULL SCAN: {}", @@ -1403,6 +1421,7 @@ mod test { ); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; + let conn = datastore.pool_connection_for_tests().await.unwrap(); // Create a few records. let now = Utc::now(); @@ -1429,7 +1448,7 @@ mod test { .collect::>(); diesel::insert_into(dsl::external_ip) .values(ips.clone()) - .execute_async(datastore.pool()) + .execute_async(&*conn) .await .unwrap(); @@ -1464,6 +1483,7 @@ mod test { dev::test_setup_log("test_deallocate_external_ip_is_idempotent"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; + let conn = datastore.pool_connection_for_tests().await.unwrap(); // Create a record. let now = Utc::now(); @@ -1487,7 +1507,7 @@ mod test { }; diesel::insert_into(dsl::external_ip) .values(ip.clone()) - .execute_async(datastore.pool()) + .execute_async(&*conn) .await .unwrap(); @@ -1522,13 +1542,13 @@ mod test { use crate::db::model::IpKind; use crate::db::schema::external_ip::dsl; use async_bb8_diesel::ConnectionError::Query; - use async_bb8_diesel::PoolError::Connection; use diesel::result::DatabaseErrorKind::CheckViolation; use diesel::result::Error::DatabaseError; let logctx = dev::test_setup_log("test_external_ip_check_constraints"); let mut db = test_setup_database(&logctx.log).await; let (_opctx, datastore) = datastore_test(&logctx, &db).await; + let conn = datastore.pool_connection_for_tests().await.unwrap(); let now = Utc::now(); // Create a mostly-populated record, for a floating IP @@ -1582,7 +1602,7 @@ mod test { }; let res = diesel::insert_into(dsl::external_ip) .values(new_ip) - .execute_async(datastore.pool()) + .execute_async(&*conn) .await; if name.is_some() && description.is_some() { // Name/description must be non-NULL, instance ID can be @@ -1607,10 +1627,10 @@ mod test { assert!( matches!( err, - Connection(Query(DatabaseError( + Query(DatabaseError( CheckViolation, _ - ))) + )) ), "Expected a CHECK violation when inserting a \ Floating IP record with NULL name and/or description", @@ -1639,7 +1659,7 @@ mod test { }; let res = diesel::insert_into(dsl::external_ip) .values(new_ip.clone()) - .execute_async(datastore.pool()) + .execute_async(&*conn) .await; let ip_type = if is_service { "Service" } else { "Instance" }; @@ -1656,10 +1676,10 @@ mod test { assert!( matches!( err, - Connection(Query(DatabaseError( + Query(DatabaseError( CheckViolation, _ - ))) + )) ), "Expected a CHECK violation when inserting an \ Ephemeral Service IP", @@ -1687,10 +1707,10 @@ mod test { assert!( matches!( err, - Connection(Query(DatabaseError( + Query(DatabaseError( CheckViolation, _ - ))) + )) ), "Expected a CHECK violation when inserting a \ {:?} IP record with non-NULL name, description, \ diff --git a/nexus/db-queries/src/db/datastore/network_interface.rs b/nexus/db-queries/src/db/datastore/network_interface.rs index af1068d6bf..3d7b8afa71 100644 --- a/nexus/db-queries/src/db/datastore/network_interface.rs +++ b/nexus/db-queries/src/db/datastore/network_interface.rs @@ -11,7 +11,7 @@ use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; use crate::db::cte_utils::BoxedQuery; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::model::IncompleteNetworkInterface; @@ -27,7 +27,6 @@ use crate::db::pool::DbConnection; use crate::db::queries::network_interface; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; -use async_bb8_diesel::PoolError; use chrono::Utc; use diesel::prelude::*; use omicron_common::api::external; @@ -156,21 +155,17 @@ impl DataStore { interface: IncompleteNetworkInterface, ) -> Result { let conn = self - .pool_authorized(opctx) + .pool_connection_authorized(opctx) .await .map_err(network_interface::InsertError::External)?; - self.create_network_interface_raw_conn(conn, interface).await + self.create_network_interface_raw_conn(&conn, interface).await } - pub(crate) async fn create_network_interface_raw_conn( + pub(crate) async fn create_network_interface_raw_conn( &self, - conn: &(impl AsyncConnection + Sync), + conn: &async_bb8_diesel::Connection, interface: IncompleteNetworkInterface, - ) -> Result - where - ConnErr: From + Send + 'static, - PoolError: From, - { + ) -> Result { use db::schema::network_interface::dsl; let subnet_id = interface.subnet.identity.id; let query = network_interface::InsertQuery::new(interface.clone()); @@ -190,7 +185,7 @@ impl DataStore { ) } AsyncInsertError::DatabaseError(e) => { - network_interface::InsertError::from_pool(e, &interface) + network_interface::InsertError::from_diesel(e, &interface) } }) } @@ -210,10 +205,10 @@ impl DataStore { .filter(dsl::kind.eq(NetworkInterfaceKind::Instance)) .filter(dsl::time_deleted.is_null()) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_instance), ) @@ -243,13 +238,14 @@ impl DataStore { query .clone() .execute_async( - self.pool_authorized(opctx) + &*self + .pool_connection_authorized(opctx) .await .map_err(network_interface::DeleteError::External)?, ) .await .map_err(|e| { - network_interface::DeleteError::from_pool(e, &query) + network_interface::DeleteError::from_diesel(e, &query) })?; Ok(()) } @@ -291,11 +287,11 @@ impl DataStore { network_interface::is_primary, network_interface::slot, )) - .get_results_async::(self.pool_authorized(opctx).await?) + .get_results_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; Ok(rows .into_iter() .map(sled_client_types::NetworkInterface::from) @@ -386,10 +382,10 @@ impl DataStore { .filter(dsl::instance_id.eq(authz_instance.id())) .select(InstanceNetworkInterface::as_select()) .load_async::( - self.pool_authorized(opctx).await?, + &*self.pool_connection_authorized(opctx).await?, ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Update a network interface associated with a given instance. @@ -471,9 +467,9 @@ impl DataStore { } type TxnError = TransactionError; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; if primary { - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { let instance_state = instance_query .get_result_async(&conn) .await? @@ -517,7 +513,7 @@ impl DataStore { // be done there. The other columns always need to be updated, and // we're only hitting a single row. Note that we still need to // verify the instance is stopped. - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { let instance_state = instance_query .get_result_async(&conn) .await? diff --git a/nexus/db-queries/src/db/datastore/oximeter.rs b/nexus/db-queries/src/db/datastore/oximeter.rs index 178c2466a7..c9b3a59b05 100644 --- a/nexus/db-queries/src/db/datastore/oximeter.rs +++ b/nexus/db-queries/src/db/datastore/oximeter.rs @@ -6,7 +6,7 @@ use super::DataStore; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::OximeterInfo; use crate::db::model::ProducerEndpoint; @@ -41,10 +41,10 @@ impl DataStore { dsl::ip.eq(info.ip), dsl::port.eq(info.port), )) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::Oximeter, @@ -62,9 +62,11 @@ impl DataStore { ) -> ListResultVec { use db::schema::oximeter::dsl; paginated(dsl::oximeter, dsl::id, page_params) - .load_async::(self.pool()) + .load_async::( + &*self.pool_connection_unauthorized().await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } // Create a record for a new producer endpoint @@ -86,10 +88,10 @@ impl DataStore { dsl::interval.eq(producer.interval), dsl::base_route.eq(producer.base_route.clone()), )) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::MetricProducer, @@ -111,10 +113,10 @@ impl DataStore { .filter(dsl::oximeter_id.eq(oximeter_id)) .order_by((dsl::oximeter_id, dsl::id)) .select(ProducerEndpoint::as_select()) - .load_async(self.pool()) + .load_async(&*self.pool_connection_unauthorized().await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::MetricProducer, diff --git a/nexus/db-queries/src/db/datastore/physical_disk.rs b/nexus/db-queries/src/db/datastore/physical_disk.rs index ec9f29d27d..3c83b91d21 100644 --- a/nexus/db-queries/src/db/datastore/physical_disk.rs +++ b/nexus/db-queries/src/db/datastore/physical_disk.rs @@ -10,7 +10,7 @@ use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::PhysicalDisk; use crate::db::model::Sled; @@ -60,7 +60,9 @@ impl DataStore { dsl::time_modified.eq(now), )), ) - .insert_and_get_result_async(self.pool()) + .insert_and_get_result_async( + &*self.pool_connection_authorized(&opctx).await?, + ) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { @@ -68,7 +70,7 @@ impl DataStore { lookup_type: LookupType::ById(sled_id), }, AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) } })?; @@ -85,9 +87,9 @@ impl DataStore { paginated(dsl::physical_disk, dsl::id, pagparams) .filter(dsl::time_deleted.is_null()) .select(PhysicalDisk::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn sled_list_physical_disks( @@ -102,9 +104,9 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::sled_id.eq(sled_id)) .select(PhysicalDisk::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Deletes a disk from the database. @@ -125,10 +127,10 @@ impl DataStore { .filter(dsl::model.eq(model)) .filter(dsl::sled_id.eq(sled_id)) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map(|_rows_modified| ()) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/project.rs b/nexus/db-queries/src/db/datastore/project.rs index b3759f9cce..0285679cd5 100644 --- a/nexus/db-queries/src/db/datastore/project.rs +++ b/nexus/db-queries/src/db/datastore/project.rs @@ -11,8 +11,8 @@ use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::diesel_pool_result_optional; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::diesel_result_optional; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::fixed_data::project::SERVICES_PROJECT; @@ -25,7 +25,7 @@ use crate::db::model::ProjectUpdate; use crate::db::model::Silo; use crate::db::model::VirtualProvisioningCollection; use crate::db::pagination::paginated; -use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl, PoolError}; +use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; use chrono::Utc; use diesel::prelude::*; use omicron_common::api::external::http_pagination::PaginatedBy; @@ -60,16 +60,16 @@ macro_rules! generate_fn_to_ensure_none_in_project { ) -> DeleteResult { use db::schema::$i; - let maybe_label = diesel_pool_result_optional( + let maybe_label = diesel_result_optional( $i::dsl::$i .filter($i::dsl::project_id.eq(authz_project.id())) .filter($i::dsl::time_deleted.is_null()) .select($i::dsl::$label) .limit(1) - .first_async::<$label_ty>(self.pool_authorized(opctx).await?) + .first_async::<$label_ty>(&*self.pool_connection_authorized(opctx).await?) .await, ) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if let Some(label) = maybe_label { let object = stringify!($i).replace('_', " "); @@ -155,7 +155,7 @@ impl DataStore { let name = project.name().as_str().to_string(); let db_project = self - .pool_authorized(opctx) + .pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { let project: Project = Silo::insert_resource( @@ -169,7 +169,7 @@ impl DataStore { authz_silo_inner.not_found() } AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::Project, @@ -193,8 +193,8 @@ impl DataStore { .await .map_err(|e| match e { TransactionError::CustomError(e) => e, - TransactionError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TransactionError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } })?; @@ -233,7 +233,7 @@ impl DataStore { use db::schema::project::dsl; type TxnError = TransactionError; - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { let now = Utc::now(); @@ -246,8 +246,8 @@ impl DataStore { .execute_async(&conn) .await .map_err(|e| { - public_error_from_diesel_pool( - PoolError::from(e), + public_error_from_diesel( + e, ErrorHandler::NotFoundByResource(authz_project), ) })?; @@ -270,8 +270,8 @@ impl DataStore { .await .map_err(|e| match e { TxnError::CustomError(e) => e, - TxnError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TxnError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } })?; Ok(()) @@ -300,9 +300,9 @@ impl DataStore { .filter(dsl::silo_id.eq(authz_silo.id())) .filter(dsl::time_deleted.is_null()) .select(Project::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Updates a project (clobbering update -- no etag) @@ -320,10 +320,10 @@ impl DataStore { .filter(dsl::id.eq(authz_project.id())) .set(updates) .returning(Project::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_project), ) @@ -355,8 +355,8 @@ impl DataStore { .filter(dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null())) .filter(dsl::time_deleted.is_null()) .select(db::model::IpPool::as_select()) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 54346b31c0..1be3e1ee4c 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -12,7 +12,7 @@ use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::fixed_data::silo::INTERNAL_SILO_ID; @@ -28,7 +28,6 @@ use crate::db::pagination::paginated; use crate::db::pool::DbConnection; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; -use async_bb8_diesel::PoolError; use chrono::Utc; use diesel::prelude::*; use diesel::upsert::excluded; @@ -80,7 +79,7 @@ enum RackInitError { AddingNic(Error), ServiceInsert(Error), DatasetInsert { err: AsyncInsertError, zpool_id: Uuid }, - RackUpdate { err: PoolError, rack_id: Uuid }, + RackUpdate { err: async_bb8_diesel::ConnectionError, rack_id: Uuid }, DnsSerialization(Error), Silo(Error), RoleAssignment(Error), @@ -101,7 +100,7 @@ impl From for Error { lookup_type: LookupType::ById(zpool_id), }, AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) } }, TxnError::CustomError(RackInitError::ServiceInsert(err)) => { @@ -113,7 +112,7 @@ impl From for Error { TxnError::CustomError(RackInitError::RackUpdate { err, rack_id, - }) => public_error_from_diesel_pool( + }) => public_error_from_diesel( err, ErrorHandler::NotFoundByLookup( ResourceType::Rack, @@ -138,7 +137,7 @@ impl From for Error { err )) } - TxnError::Pool(e) => { + TxnError::Connection(e) => { Error::internal_error(&format!("Transaction error: {}", e)) } } @@ -155,9 +154,9 @@ impl DataStore { use db::schema::rack::dsl; paginated(dsl::rack, dsl::id, pagparams) .select(Rack::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Stores a new rack in the database. @@ -177,10 +176,10 @@ impl DataStore { // This is a no-op, since we conflicted on the ID. .set(dsl::id.eq(excluded(dsl::id))) .returning(Rack::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::Rack, @@ -194,25 +193,17 @@ impl DataStore { // which comes from the transaction created in `rack_set_initialized`. #[allow(clippy::too_many_arguments)] - async fn rack_create_recovery_silo( + async fn rack_create_recovery_silo( &self, opctx: &OpContext, - conn: &(impl AsyncConnection + Sync), + conn: &async_bb8_diesel::Connection, log: &slog::Logger, recovery_silo: external_params::SiloCreate, recovery_silo_fq_dns_name: String, recovery_user_id: external_params::UserId, recovery_user_password_hash: omicron_passwords::PasswordHashString, dns_update: DnsVersionUpdateBuilder, - ) -> Result<(), TxnError> - where - ConnError: From + Send + 'static, - PoolError: From, - TransactionError: From, - TxnError: From, - async_bb8_diesel::Connection: - AsyncConnection, - { + ) -> Result<(), TxnError> { let db_silo = self .silo_create_conn( conn, @@ -289,17 +280,13 @@ impl DataStore { Ok(()) } - async fn rack_populate_service_records( + async fn rack_populate_service_records( &self, - conn: &(impl AsyncConnection + Sync), + conn: &async_bb8_diesel::Connection, log: &slog::Logger, service_pool: &db::model::IpPool, service: internal_params::ServicePutRequest, - ) -> Result<(), TxnError> - where - ConnError: From + Send + 'static, - PoolError: From, - { + ) -> Result<(), TxnError> { use internal_params::ServiceKind; let service_db = db::model::Service::new( @@ -431,7 +418,7 @@ impl DataStore { // the low-frequency of calls, this optimization has been deferred. let log = opctx.log.clone(); let rack = self - .pool_authorized(opctx) + .pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { // Early exit if the rack has already been initialized. @@ -443,7 +430,7 @@ impl DataStore { .map_err(|e| { warn!(log, "Initializing Rack: Rack UUID not found"); TxnError::CustomError(RackInitError::RackUpdate { - err: PoolError::from(e), + err: e, rack_id, }) })?; @@ -548,9 +535,9 @@ impl DataStore { .returning(Rack::as_returning()) .get_result_async::(&conn) .await - .map_err(|e| { + .map_err(|err| { TxnError::CustomError(RackInitError::RackUpdate { - err: PoolError::from(e), + err, rack_id, }) })?; @@ -612,7 +599,7 @@ impl DataStore { use crate::db::schema::external_ip::dsl as extip_dsl; use crate::db::schema::service::dsl as service_dsl; type TxnError = TransactionError; - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { let ips = extip_dsl::external_ip @@ -644,8 +631,8 @@ impl DataStore { .await .map_err(|error: TxnError| match error { TransactionError::CustomError(err) => err, - TransactionError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TransactionError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } }) } @@ -879,7 +866,7 @@ mod test { async fn [](db: &DataStore) -> Vec<$model> { use crate::db::schema::$table::dsl; use nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL; - db.pool_for_tests() + db.pool_connection_for_tests() .await .unwrap() .transaction_async(|conn| async move { diff --git a/nexus/db-queries/src/db/datastore/region.rs b/nexus/db-queries/src/db/datastore/region.rs index 6bfea9085d..5bc79b9481 100644 --- a/nexus/db-queries/src/db/datastore/region.rs +++ b/nexus/db-queries/src/db/datastore/region.rs @@ -9,7 +9,7 @@ use super::RegionAllocationStrategy; use super::RunnableQuery; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::lookup::LookupPath; @@ -51,9 +51,11 @@ impl DataStore { volume_id: Uuid, ) -> Result, Error> { Self::get_allocated_regions_query(volume_id) - .get_results_async::<(Dataset, Region)>(self.pool()) + .get_results_async::<(Dataset, Region)>( + &*self.pool_connection_unauthorized().await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } async fn get_block_size_from_disk_source( @@ -136,9 +138,11 @@ impl DataStore { extent_count, allocation_strategy, ) - .get_results_async(self.pool()) + .get_results_async(&*self.pool_connection_authorized(&opctx).await?) .await - .map_err(|e| crate::db::queries::region_allocation::from_pool(e))?; + .map_err(|e| { + crate::db::queries::region_allocation::from_diesel(e) + })?; Ok(dataset_and_regions) } @@ -168,8 +172,9 @@ impl DataStore { // transaction" error. let transaction = { |region_ids: Vec| async { - self.pool() - .transaction(move |conn| { + self.pool_connection_unauthorized() + .await? + .transaction_async(|conn| async move { use db::schema::dataset::dsl as dataset_dsl; use db::schema::region::dsl as region_dsl; @@ -177,7 +182,7 @@ impl DataStore { let datasets = diesel::delete(region_dsl::region) .filter(region_dsl::id.eq_any(region_ids)) .returning(region_dsl::dataset_id) - .get_results::(conn)?; + .get_results_async::(&conn).await?; // Update datasets to which the regions belonged. for dataset in datasets { @@ -191,7 +196,7 @@ impl DataStore { * region_dsl::extent_count, )) .nullable() - .get_result(conn)?; + .get_result_async(&conn).await?; let dataset_total_occupied_size: i64 = if let Some( dataset_total_occupied_size, @@ -220,7 +225,7 @@ impl DataStore { dataset_dsl::size_used .eq(dataset_total_occupied_size), ) - .execute(conn)?; + .execute_async(&conn).await?; } Ok(()) @@ -269,10 +274,10 @@ impl DataStore { * region_dsl::extent_count, )) .nullable() - .get_result_async(self.pool()) + .get_result_async(&*self.pool_connection_unauthorized().await?) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) })?; if let Some(total_occupied_size) = total_occupied_size { diff --git a/nexus/db-queries/src/db/datastore/region_snapshot.rs b/nexus/db-queries/src/db/datastore/region_snapshot.rs index dab3a90bcb..0a707e4504 100644 --- a/nexus/db-queries/src/db/datastore/region_snapshot.rs +++ b/nexus/db-queries/src/db/datastore/region_snapshot.rs @@ -6,7 +6,7 @@ use super::DataStore; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::RegionSnapshot; use async_bb8_diesel::AsyncRunQueryDsl; @@ -25,10 +25,10 @@ impl DataStore { diesel::insert_into(dsl::region_snapshot) .values(region_snapshot.clone()) .on_conflict_do_nothing() - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await .map(|_| ()) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn region_snapshot_remove( @@ -43,9 +43,9 @@ impl DataStore { .filter(dsl::dataset_id.eq(dataset_id)) .filter(dsl::region_id.eq(region_id)) .filter(dsl::snapshot_id.eq(snapshot_id)) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await .map(|_rows_deleted| ()) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/role.rs b/nexus/db-queries/src/db/datastore/role.rs index ba217ff350..f1198c239b 100644 --- a/nexus/db-queries/src/db/datastore/role.rs +++ b/nexus/db-queries/src/db/datastore/role.rs @@ -11,7 +11,7 @@ use crate::context::OpContext; use crate::db; use crate::db::datastore::RunnableQuery; use crate::db::datastore::RunnableQueryNoReturn; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::fixed_data::role_assignment::BUILTIN_ROLE_ASSIGNMENTS; @@ -24,7 +24,6 @@ use crate::db::pagination::paginated_multicolumn; use crate::db::pool::DbConnection; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; -use async_bb8_diesel::PoolError; use diesel::prelude::*; use nexus_types::external_api::shared; use omicron_common::api::external::DataPageParams; @@ -43,15 +42,17 @@ impl DataStore { ) -> ListResultVec { use db::schema::role_builtin::dsl; opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + + let conn = self.pool_connection_authorized(opctx).await?; paginated_multicolumn( dsl::role_builtin, (dsl::resource_type, dsl::role_name), pagparams, ) .select(RoleBuiltin::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Load built-in roles into the database @@ -75,15 +76,14 @@ impl DataStore { .collect::>(); debug!(opctx.log, "attempting to create built-in roles"); + let conn = self.pool_connection_authorized(opctx).await?; let count = diesel::insert_into(dsl::role_builtin) .values(builtin_roles) .on_conflict((dsl::resource_type, dsl::role_name)) .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; info!(opctx.log, "created {} built-in roles", count); Ok(()) } @@ -99,6 +99,7 @@ impl DataStore { opctx.authorize(authz::Action::Modify, &authz::DATABASE).await?; debug!(opctx.log, "attempting to create built-in role assignments"); + let conn = self.pool_connection_authorized(opctx).await?; let count = diesel::insert_into(dsl::role_assignment) .values(&*BUILTIN_ROLE_ASSIGNMENTS) .on_conflict(( @@ -109,11 +110,9 @@ impl DataStore { dsl::role_name, )) .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; info!(opctx.log, "created {} built-in role assignments", count); Ok(()) } @@ -141,7 +140,7 @@ impl DataStore { // into some hurt by assigning loads of roles to someone and having that // person attempt to access anything. - self.pool_authorized(opctx).await? + self.pool_connection_authorized(opctx).await? .transaction_async(|conn| async move { let mut role_assignments = dsl::role_assignment .filter(dsl::identity_type.eq(identity_type.clone())) @@ -175,7 +174,7 @@ impl DataStore { Ok(role_assignments) }) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Fetches all of the externally-visible role assignments for the specified @@ -196,28 +195,19 @@ impl DataStore { opctx: &OpContext, authz_resource: &T, ) -> ListResultVec { - self.role_assignment_fetch_visible_conn( - opctx, - authz_resource, - self.pool_authorized(opctx).await?, - ) - .await + let conn = self.pool_connection_authorized(opctx).await?; + self.role_assignment_fetch_visible_conn(opctx, authz_resource, &conn) + .await } pub async fn role_assignment_fetch_visible_conn< T: authz::ApiResourceWithRoles + AuthorizedResource + Clone, - ConnErr, >( &self, opctx: &OpContext, authz_resource: &T, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), - ) -> ListResultVec - where - ConnErr: From + Send + 'static, - PoolError: From, - { + conn: &async_bb8_diesel::Connection, + ) -> ListResultVec { opctx.authorize(authz::Action::ReadPolicy, authz_resource).await?; let resource_type = authz_resource.resource_type(); let resource_id = authz_resource.resource_id(); @@ -231,9 +221,7 @@ impl DataStore { .select(RoleAssignment::as_select()) .load_async::(conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e.into(), ErrorHandler::Server) - }) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Removes all existing externally-visble role assignments on @@ -283,7 +271,7 @@ impl DataStore { // We might instead want to first-class the idea of Policies in the // database so that we can build up a whole new Policy in batches and // then flip the resource over to using it. - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { delete_old_query.execute_async(&conn).await?; @@ -292,8 +280,8 @@ impl DataStore { .await .map_err(|e| match e { TransactionError::CustomError(e) => e, - TransactionError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TransactionError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } }) } diff --git a/nexus/db-queries/src/db/datastore/saga.rs b/nexus/db-queries/src/db/datastore/saga.rs index 91e69e3fe5..2ec0c40799 100644 --- a/nexus/db-queries/src/db/datastore/saga.rs +++ b/nexus/db-queries/src/db/datastore/saga.rs @@ -6,7 +6,7 @@ use super::DataStore; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::Generation; use crate::db::pagination::paginated; @@ -30,11 +30,9 @@ impl DataStore { diesel::insert_into(dsl::saga) .values(saga.clone()) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; Ok(()) } @@ -48,10 +46,10 @@ impl DataStore { // owning this saga. diesel::insert_into(dsl::saga_node_event) .values(event.clone()) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict(ResourceType::SagaDbg, "Saga Event"), ) @@ -75,10 +73,10 @@ impl DataStore { .filter(dsl::adopt_generation.eq(current_adopt_generation)) .set(dsl::saga_state.eq(db::saga_types::SagaCachedState(new_state))) .check_if_exists::(saga_id) - .execute_and_check(self.pool()) + .execute_and_check(&*self.pool_connection_unauthorized().await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::SagaDbg, @@ -117,10 +115,10 @@ impl DataStore { steno::SagaCachedState::Done, ))) .filter(dsl::current_sec.eq(*sec_id)) - .load_async(self.pool()) + .load_async(&*self.pool_connection_unauthorized().await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::SagaDbg, @@ -138,10 +136,12 @@ impl DataStore { use db::schema::saga_node_event::dsl; paginated(dsl::saga_node_event, dsl::saga_id, &pagparams) .filter(dsl::saga_id.eq(id)) - .load_async::(self.pool()) + .load_async::( + &*self.pool_connection_unauthorized().await?, + ) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::SagaDbg, diff --git a/nexus/db-queries/src/db/datastore/service.rs b/nexus/db-queries/src/db/datastore/service.rs index b2c8505fea..40bf250abe 100644 --- a/nexus/db-queries/src/db/datastore/service.rs +++ b/nexus/db-queries/src/db/datastore/service.rs @@ -10,16 +10,14 @@ use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Asset; use crate::db::model::Service; use crate::db::model::Sled; use crate::db::pagination::paginated; use crate::db::pool::DbConnection; -use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; -use async_bb8_diesel::PoolError; use chrono::Utc; use diesel::prelude::*; use diesel::upsert::excluded; @@ -39,20 +37,16 @@ impl DataStore { opctx: &OpContext, service: Service, ) -> CreateResult { - let conn = self.pool_authorized(opctx).await?; - self.service_upsert_conn(conn, service).await + let conn = self.pool_connection_authorized(opctx).await?; + self.service_upsert_conn(&conn, service).await } /// Stores a new service in the database (using an existing db connection). - pub(crate) async fn service_upsert_conn( + pub(crate) async fn service_upsert_conn( &self, - conn: &(impl AsyncConnection + Sync), + conn: &async_bb8_diesel::Connection, service: Service, - ) -> CreateResult - where - ConnError: From + Send + 'static, - PoolError: From, - { + ) -> CreateResult { use db::schema::service::dsl; let service_id = service.id(); @@ -78,15 +72,13 @@ impl DataStore { type_name: ResourceType::Sled, lookup_type: LookupType::ById(sled_id), }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Service, - &service_id.to_string(), - ), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::Service, + &service_id.to_string(), + ), + ), }) } @@ -102,8 +94,8 @@ impl DataStore { paginated(dsl::service, dsl::id, pagparams) .filter(dsl::kind.eq(kind)) .select(Service::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/silo.rs b/nexus/db-queries/src/db/datastore/silo.rs index ed2b97257e..5e909b84c4 100644 --- a/nexus/db-queries/src/db/datastore/silo.rs +++ b/nexus/db-queries/src/db/datastore/silo.rs @@ -10,8 +10,8 @@ use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::datastore::RunnableQuery; -use crate::db::error::diesel_pool_result_optional; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::diesel_result_optional; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::fixed_data::silo::{DEFAULT_SILO, INTERNAL_SILO}; @@ -24,7 +24,6 @@ use crate::db::pagination::paginated; use crate::db::pool::DbConnection; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; -use async_bb8_diesel::PoolError; use chrono::Utc; use diesel::prelude::*; use nexus_db_model::Certificate; @@ -66,11 +65,9 @@ impl DataStore { .values([&*DEFAULT_SILO, &*INTERNAL_SILO]) .on_conflict(dsl::id) .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; info!(opctx.log, "created {} built-in silos", count); self.virtual_provisioning_collection_create( @@ -126,9 +123,9 @@ impl DataStore { new_silo_dns_names: &[String], dns_update: DnsVersionUpdateBuilder, ) -> CreateResult { - let conn = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; self.silo_create_conn( - conn, + &conn, opctx, nexus_opctx, new_silo_params, @@ -138,27 +135,15 @@ impl DataStore { .await } - pub async fn silo_create_conn( + pub async fn silo_create_conn( &self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + conn: &async_bb8_diesel::Connection, opctx: &OpContext, nexus_opctx: &OpContext, new_silo_params: params::SiloCreate, new_silo_dns_names: &[String], dns_update: DnsVersionUpdateBuilder, - ) -> CreateResult - where - ConnErr: From + Send + 'static, - PoolError: From, - TransactionError: From, - - CalleeConnErr: From + Send + 'static, - PoolError: From, - TransactionError: From, - async_bb8_diesel::Connection: - AsyncConnection, - { + ) -> CreateResult { let silo_id = Uuid::new_v4(); let silo_group_id = Uuid::new_v4(); @@ -220,8 +205,8 @@ impl DataStore { .get_result_async(&conn) .await .map_err(|e| { - public_error_from_diesel_pool( - e.into(), + public_error_from_diesel( + e, ErrorHandler::Conflict( ResourceType::Silo, new_silo_params.identity.name.as_str(), @@ -276,8 +261,8 @@ impl DataStore { .await .map_err(|e| match e { TransactionError::CustomError(e) => e, - TransactionError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TransactionError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } }) } @@ -294,9 +279,9 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::discoverable.eq(true)) .select(Silo::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn silos_list( @@ -325,9 +310,9 @@ impl DataStore { query .select(Silo::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn silo_delete( @@ -348,19 +333,21 @@ impl DataStore { use db::schema::silo_user; use db::schema::silo_user_password_hash; + let conn = self.pool_connection_authorized(opctx).await?; + // Make sure there are no projects present within this silo. let id = authz_silo.id(); let rcgen = db_silo.rcgen; - let project_found = diesel_pool_result_optional( + let project_found = diesel_result_optional( project::dsl::project .filter(project::dsl::silo_id.eq(id)) .filter(project::dsl::time_deleted.is_null()) .select(project::dsl::id) .limit(1) - .first_async::(self.pool_authorized(opctx).await?) + .first_async::(&*conn) .await, ) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if project_found.is_some() { return Err(Error::InvalidRequest { @@ -371,48 +358,47 @@ impl DataStore { let now = Utc::now(); type TxnError = TransactionError; - self.pool_authorized(opctx) - .await? - .transaction_async(|conn| async move { - let updated_rows = diesel::update(silo::dsl::silo) - .filter(silo::dsl::time_deleted.is_null()) - .filter(silo::dsl::id.eq(id)) - .filter(silo::dsl::rcgen.eq(rcgen)) - .set(silo::dsl::time_deleted.eq(now)) - .execute_async(&conn) - .await - .map_err(|e| { - public_error_from_diesel_pool( - PoolError::from(e), - ErrorHandler::NotFoundByResource(authz_silo), - ) - })?; - - if updated_rows == 0 { - return Err(TxnError::CustomError(Error::InvalidRequest { - message: "silo deletion failed due to concurrent modification" + conn.transaction_async(|conn| async move { + let updated_rows = diesel::update(silo::dsl::silo) + .filter(silo::dsl::time_deleted.is_null()) + .filter(silo::dsl::id.eq(id)) + .filter(silo::dsl::rcgen.eq(rcgen)) + .set(silo::dsl::time_deleted.eq(now)) + .execute_async(&conn) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_silo), + ) + })?; + + if updated_rows == 0 { + return Err(TxnError::CustomError(Error::InvalidRequest { + message: + "silo deletion failed due to concurrent modification" .to_string(), - })); - } + })); + } - self.virtual_provisioning_collection_delete_on_connection( - &conn, - id, - ).await?; + self.virtual_provisioning_collection_delete_on_connection( + &conn, id, + ) + .await?; - self.dns_update(dns_opctx, &conn, dns_update).await?; + self.dns_update(dns_opctx, &conn, dns_update).await?; - info!(opctx.log, "deleted silo {}", id); + info!(opctx.log, "deleted silo {}", id); - Ok(()) - }) - .await - .map_err(|e| match e { - TxnError::CustomError(e) => e, - TxnError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) - } - })?; + Ok(()) + }) + .await + .map_err(|e| match e { + TxnError::CustomError(e) => e, + TxnError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) + } + })?; // TODO-correctness This needs to happen in a saga or some other // mechanism that ensures it happens even if we crash at this point. @@ -429,9 +415,9 @@ impl DataStore { .select(silo_user::dsl::id), ), ) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; debug!( opctx.log, @@ -442,11 +428,9 @@ impl DataStore { .filter(silo_user::dsl::silo_id.eq(id)) .filter(silo_user::dsl::time_deleted.is_null()) .set(silo_user::dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; debug!( opctx.log, @@ -464,10 +448,10 @@ impl DataStore { .select(silo_group::dsl::id), ), ) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) })?; debug!( @@ -480,11 +464,9 @@ impl DataStore { .filter(silo_group::dsl::silo_id.eq(id)) .filter(silo_group::dsl::time_deleted.is_null()) .set(silo_group::dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; debug!( opctx.log, @@ -498,11 +480,9 @@ impl DataStore { .filter(idp_dsl::silo_id.eq(id)) .filter(idp_dsl::time_deleted.is_null()) .set(idp_dsl::time_deleted.eq(Utc::now())) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; debug!(opctx.log, "deleted {} silo IdPs for silo {}", updated_rows, id); @@ -512,11 +492,9 @@ impl DataStore { .filter(saml_idp_dsl::silo_id.eq(id)) .filter(saml_idp_dsl::time_deleted.is_null()) .set(saml_idp_dsl::time_deleted.eq(Utc::now())) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; debug!( opctx.log, @@ -530,11 +508,9 @@ impl DataStore { .filter(cert_dsl::silo_id.eq(id)) .filter(cert_dsl::time_deleted.is_null()) .set(cert_dsl::time_deleted.eq(Utc::now())) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; debug!(opctx.log, "deleted {} silo IdPs for silo {}", updated_rows, id); diff --git a/nexus/db-queries/src/db/datastore/silo_group.rs b/nexus/db-queries/src/db/datastore/silo_group.rs index 0261dc5542..d13986bb2d 100644 --- a/nexus/db-queries/src/db/datastore/silo_group.rs +++ b/nexus/db-queries/src/db/datastore/silo_group.rs @@ -9,7 +9,7 @@ use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::datastore::RunnableQuery; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::model::SiloGroup; @@ -56,11 +56,9 @@ impl DataStore { DataStore::silo_group_ensure_query(opctx, authz_silo, silo_group) .await? - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; Ok(self .silo_group_optional_lookup(opctx, authz_silo, external_id) @@ -83,10 +81,10 @@ impl DataStore { .filter(dsl::external_id.eq(external_id)) .filter(dsl::time_deleted.is_null()) .select(db::model::SiloGroup::as_select()) - .first_async(self.pool_authorized(opctx).await?) + .first_async(&*self.pool_connection_authorized(opctx).await?) .await .optional() - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn silo_group_membership_for_user( @@ -101,9 +99,9 @@ impl DataStore { dsl::silo_group_membership .filter(dsl::silo_user_id.eq(silo_user_id)) .select(SiloGroupMembership::as_returning()) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn silo_groups_for_self( @@ -125,9 +123,9 @@ impl DataStore { .filter(sgm::silo_user_id.eq(actor.actor_id())) .filter(sg::time_deleted.is_null()) .select(SiloGroup::as_returning()) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Update a silo user's group membership: @@ -147,7 +145,7 @@ impl DataStore { ) -> UpdateResult<()> { opctx.authorize(authz::Action::Modify, authz_silo_user).await?; - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { use db::schema::silo_group_membership::dsl; @@ -166,7 +164,7 @@ impl DataStore { .iter() .map(|group_id| db::model::SiloGroupMembership { silo_group_id: *group_id, - silo_user_id: silo_user_id, + silo_user_id, }) .collect(); @@ -178,7 +176,7 @@ impl DataStore { Ok(()) }) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn silo_group_delete( @@ -197,7 +195,7 @@ impl DataStore { let group_id = authz_silo_group.id(); - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { use db::schema::silo_group_membership; @@ -240,10 +238,9 @@ impl DataStore { id )), - TxnError::Pool(pool_error) => public_error_from_diesel_pool( - pool_error, - ErrorHandler::Server, - ), + TxnError::Connection(error) => { + public_error_from_diesel(error, ErrorHandler::Server) + } }) } @@ -260,8 +257,10 @@ impl DataStore { .filter(dsl::silo_id.eq(authz_silo.id())) .filter(dsl::time_deleted.is_null()) .select(SiloGroup::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/silo_user.rs b/nexus/db-queries/src/db/datastore/silo_user.rs index e0fcf6c469..6084f8c2ab 100644 --- a/nexus/db-queries/src/db/datastore/silo_user.rs +++ b/nexus/db-queries/src/db/datastore/silo_user.rs @@ -10,7 +10,7 @@ use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::datastore::IdentityMetadataCreateParams; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::Name; use crate::db::model::Silo; @@ -53,13 +53,14 @@ impl DataStore { use db::schema::silo_user::dsl; let silo_user_external_id = silo_user.external_id.clone(); + let conn = self.pool_connection_unauthorized().await?; diesel::insert_into(dsl::silo_user) .values(silo_user) .returning(SiloUser::as_returning()) - .get_result_async(self.pool()) + .get_result_async(&*conn) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::SiloUser, @@ -91,7 +92,7 @@ impl DataStore { // TODO-robustness We might consider the RFD 192 "rcgen" pattern as well // so that people can't, say, login while we do this. let authz_silo_user_id = authz_silo_user.id(); - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|mut conn| async move { // Delete the user record. @@ -148,7 +149,7 @@ impl DataStore { }) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_silo_user), ) @@ -176,11 +177,11 @@ impl DataStore { .filter(dsl::external_id.eq(external_id.to_string())) .filter(dsl::time_deleted.is_null()) .select(SiloUser::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })? + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? .pop() .map(|db_silo_user| { let authz_silo_user = authz::SiloUser::new( @@ -208,9 +209,11 @@ impl DataStore { .filter(silo_id.eq(authz_silo_user_list.silo().id())) .filter(time_deleted.is_null()) .select(SiloUser::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn silo_group_users_list( @@ -237,9 +240,11 @@ impl DataStore { ), )) .select(SiloUser::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Updates or deletes the password hash for a given Silo user @@ -280,18 +285,18 @@ impl DataStore { .on_conflict(dsl::silo_user_id) .do_update() .set(SiloUserPasswordUpdate::new(hash_for_update)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) })?; } else { diesel::delete(dsl::silo_user_password_hash) .filter(dsl::silo_user_id.eq(authz_silo_user.id())) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) })?; } @@ -323,12 +328,10 @@ impl DataStore { .filter(dsl::silo_user_id.eq(authz_silo_user.id())) .select(SiloUserPasswordHash::as_select()) .load_async::( - self.pool_authorized(opctx).await?, + &*self.pool_connection_authorized(opctx).await?, ) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })? + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? .pop()) } @@ -341,9 +344,11 @@ impl DataStore { opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; paginated(dsl::user_builtin, dsl::name, pagparams) .select(UserBuiltin::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Load built-in users into the database @@ -383,11 +388,9 @@ impl DataStore { .values(builtin_users) .on_conflict(dsl::id) .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; info!(opctx.log, "created {} built-in users", count); Ok(()) @@ -410,11 +413,9 @@ impl DataStore { .values(users) .on_conflict(dsl::id) .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; info!(opctx.log, "created {} silo users", count); Ok(()) @@ -437,11 +438,9 @@ impl DataStore { dsl::role_name, )) .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; info!(opctx.log, "created {} silo user role assignments", count); Ok(()) diff --git a/nexus/db-queries/src/db/datastore/sled.rs b/nexus/db-queries/src/db/datastore/sled.rs index a70ec26d8c..ec6cca0071 100644 --- a/nexus/db-queries/src/db/datastore/sled.rs +++ b/nexus/db-queries/src/db/datastore/sled.rs @@ -8,7 +8,7 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::identity::Asset; @@ -46,10 +46,10 @@ impl DataStore { dsl::reservoir_size.eq(sled.reservoir_size), )) .returning(Sled::as_returning()) - .get_result_async(self.pool()) + .get_result_async(&*self.pool_connection_unauthorized().await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::Sled, @@ -68,9 +68,9 @@ impl DataStore { use db::schema::sled::dsl; paginated(dsl::sled, dsl::id, pagparams) .select(Sled::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn sled_reservation_create( @@ -87,7 +87,7 @@ impl DataStore { } type TxnError = TransactionError; - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { use db::schema::sled_resource::dsl as resource_dsl; @@ -183,8 +183,8 @@ impl DataStore { "No sleds can fit the requested instance", ) } - TxnError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TxnError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } }) } @@ -197,11 +197,9 @@ impl DataStore { use db::schema::sled_resource::dsl as resource_dsl; diesel::delete(resource_dsl::sled_resource) .filter(resource_dsl::id.eq(resource_id)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; Ok(()) } } diff --git a/nexus/db-queries/src/db/datastore/sled_instance.rs b/nexus/db-queries/src/db/datastore/sled_instance.rs index 9ba6861cec..dbdd696d70 100644 --- a/nexus/db-queries/src/db/datastore/sled_instance.rs +++ b/nexus/db-queries/src/db/datastore/sled_instance.rs @@ -3,7 +3,7 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::pagination::paginated; use async_bb8_diesel::AsyncRunQueryDsl; @@ -25,8 +25,10 @@ impl DataStore { paginated(dsl::sled_instance, dsl::id, &pagparams) .filter(dsl::active_sled_id.eq(authz_sled.id())) .select(SledInstance::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/snapshot.rs b/nexus/db-queries/src/db/datastore/snapshot.rs index d8db6d72a4..29fbb38e88 100644 --- a/nexus/db-queries/src/db/datastore/snapshot.rs +++ b/nexus/db-queries/src/db/datastore/snapshot.rs @@ -10,7 +10,7 @@ use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::Generation; use crate::db::model::Name; @@ -63,7 +63,7 @@ impl DataStore { let project_id = snapshot.project_id; let snapshot: Snapshot = self - .pool_authorized(opctx) + .pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { use db::schema::snapshot::dsl; @@ -157,16 +157,13 @@ impl DataStore { } } AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Server, - ) + public_error_from_diesel(e, ErrorHandler::Server) } }, }, - TxnError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TxnError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } })?; @@ -203,10 +200,10 @@ impl DataStore { .filter(dsl::gen.eq(old_gen)) .set((dsl::state.eq(new_state), dsl::gen.eq(next_gen))) .returning(Snapshot::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_snapshot), ) @@ -235,9 +232,9 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::project_id.eq(authz_project.id())) .select(Snapshot::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn project_delete_snapshot( @@ -273,11 +270,9 @@ impl DataStore { dsl::state.eq(SnapshotState::Destroyed), )) .check_if_exists::(snapshot_id) - .execute_async(self.pool_authorized(&opctx).await?) + .execute_async(&*self.pool_connection_authorized(&opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if updated_rows == 0 { // Either: diff --git a/nexus/db-queries/src/db/datastore/ssh_key.rs b/nexus/db-queries/src/db/datastore/ssh_key.rs index 622a54d740..c925903e12 100644 --- a/nexus/db-queries/src/db/datastore/ssh_key.rs +++ b/nexus/db-queries/src/db/datastore/ssh_key.rs @@ -8,7 +8,7 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Resource; use crate::db::model::Name; @@ -48,9 +48,9 @@ impl DataStore { .filter(dsl::silo_user_id.eq(authz_user.id())) .filter(dsl::time_deleted.is_null()) .select(SshKey::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Create a new SSH public key for a user. @@ -68,10 +68,10 @@ impl DataStore { diesel::insert_into(dsl::ssh_key) .values(ssh_key) .returning(SshKey::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict(ResourceType::SshKey, &name), ) @@ -92,10 +92,10 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .set(dsl::time_deleted.eq(Utc::now())) .check_if_exists::(authz_ssh_key.id()) - .execute_and_check(self.pool_authorized(opctx).await?) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_ssh_key), ) diff --git a/nexus/db-queries/src/db/datastore/switch.rs b/nexus/db-queries/src/db/datastore/switch.rs index 56cfb9a96a..148f4577de 100644 --- a/nexus/db-queries/src/db/datastore/switch.rs +++ b/nexus/db-queries/src/db/datastore/switch.rs @@ -2,7 +2,7 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Asset; use crate::db::model::Switch; @@ -20,6 +20,8 @@ impl DataStore { /// Stores a new switch in the database. pub async fn switch_upsert(&self, switch: Switch) -> CreateResult { use db::schema::switch::dsl; + + let conn = self.pool_connection_unauthorized().await?; diesel::insert_into(dsl::switch) .values(switch.clone()) .on_conflict(dsl::id) @@ -29,10 +31,10 @@ impl DataStore { dsl::rack_id.eq(switch.rack_id), )) .returning(Switch::as_returning()) - .get_result_async(self.pool()) + .get_result_async(&*conn) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::Switch, @@ -51,8 +53,8 @@ impl DataStore { use db::schema::switch::dsl; paginated(dsl::switch, dsl::id, pagparams) .select(Switch::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/switch_interface.rs b/nexus/db-queries/src/db/datastore/switch_interface.rs index 5c26dc5431..498064ce37 100644 --- a/nexus/db-queries/src/db/datastore/switch_interface.rs +++ b/nexus/db-queries/src/db/datastore/switch_interface.rs @@ -9,14 +9,12 @@ use crate::db; use crate::db::datastore::address_lot::{ ReserveBlockError, ReserveBlockTxnError, }; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::model::LoopbackAddress; use crate::db::pagination::paginated; -use async_bb8_diesel::{ - AsyncConnection, AsyncRunQueryDsl, ConnectionError, PoolError, -}; +use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl, ConnectionError}; use diesel::result::Error as DieselError; use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use ipnetwork::IpNetwork; @@ -44,14 +42,14 @@ impl DataStore { type TxnError = TransactionError; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; let inet = IpNetwork::new(params.address, params.mask) .map_err(|_| Error::invalid_request("invalid address"))?; // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { let lot_id = authz_address_lot.id(); let (block, rsvd_block) = crate::db::datastore::address_lot::try_reserve_block( @@ -67,7 +65,9 @@ impl DataStore { LoopbackAddressCreateError::ReserveBlock(err), ) } - ReserveBlockTxnError::Pool(err) => TxnError::Pool(err), + ReserveBlockTxnError::Connection(err) => { + TxnError::Connection(err) + } })?; // Address block reserved, now create the loopback address. @@ -103,17 +103,17 @@ impl DataStore { ReserveBlockError::AddressNotInLot, ), ) => Error::invalid_request("address not in lot"), - TxnError::Pool(e) => match e { - PoolError::Connection(ConnectionError::Query( - DieselError::DatabaseError(_, _), - )) => public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::LoopbackAddress, - &format!("lo {}", inet), - ), - ), - _ => public_error_from_diesel_pool(e, ErrorHandler::Server), + TxnError::Connection(e) => match e { + ConnectionError::Query(DieselError::DatabaseError(_, _)) => { + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::LoopbackAddress, + &format!("lo {}", inet), + ), + ) + } + _ => public_error_from_diesel(e, ErrorHandler::Server), }, }) } @@ -128,11 +128,11 @@ impl DataStore { let id = authz_loopback_address.id(); - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { let la = diesel::delete(dsl::loopback_address) .filter(dsl::id.eq(id)) .returning(LoopbackAddress::as_returning()) @@ -147,7 +147,7 @@ impl DataStore { Ok(()) }) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn loopback_address_get( @@ -160,15 +160,15 @@ impl DataStore { let id = authz_loopback_address.id(); - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; loopback_dsl::loopback_address .filter(loopback_address::id.eq(id)) .select(LoopbackAddress::as_select()) .limit(1) - .first_async::(pool) + .first_async::(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn loopback_address_list( @@ -180,8 +180,8 @@ impl DataStore { paginated(dsl::loopback_address, dsl::id, &pagparams) .select(LoopbackAddress::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/switch_port.rs b/nexus/db-queries/src/db/datastore/switch_port.rs index 33dfd56359..940fedb473 100644 --- a/nexus/db-queries/src/db/datastore/switch_port.rs +++ b/nexus/db-queries/src/db/datastore/switch_port.rs @@ -9,7 +9,7 @@ use crate::db::datastore::address_lot::{ ReserveBlockError, ReserveBlockTxnError, }; use crate::db::datastore::UpdatePrecondition; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::model::{ @@ -20,9 +20,7 @@ use crate::db::model::{ SwitchVlanInterfaceConfig, }; use crate::db::pagination::paginated; -use async_bb8_diesel::{ - AsyncConnection, AsyncRunQueryDsl, ConnectionError, PoolError, -}; +use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl, ConnectionError}; use diesel::result::Error as DieselError; use diesel::{ ExpressionMethods, JoinOnDsl, NullableExpressionMethods, QueryDsl, @@ -128,11 +126,11 @@ impl DataStore { } type TxnError = TransactionError; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { // create the top level port settings object let port_settings = SwitchPortSettings::new(¶ms.identity); @@ -371,7 +369,7 @@ impl DataStore { SwitchPortSettingsCreateError::ReserveBlock(err) ) } - ReserveBlockTxnError::Pool(err) => TxnError::Pool(err), + ReserveBlockTxnError::Connection(err) => TxnError::Connection(err), })?; address_config.push(SwitchPortAddressConfig::new( @@ -418,17 +416,17 @@ impl DataStore { ReserveBlockError::AddressNotInLot ) ) => Error::invalid_request("address not in lot"), - TxnError::Pool(e) => match e { - PoolError::Connection(ConnectionError::Query( + TxnError::Connection(e) => match e { + ConnectionError::Query( DieselError::DatabaseError(_, _), - )) => public_error_from_diesel_pool( + ) => public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::SwitchPortSettings, params.identity.name.as_str(), ), ), - _ => public_error_from_diesel_pool(e, ErrorHandler::Server), + _ => public_error_from_diesel(e, ErrorHandler::Server), }, }) } @@ -446,7 +444,7 @@ impl DataStore { } type TxnError = TransactionError; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; let selector = match ¶ms.port_settings { None => return Err(Error::invalid_request("name or id required")), @@ -455,7 +453,7 @@ impl DataStore { // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { use db::schema::switch_port_settings; let id = match selector { @@ -601,15 +599,15 @@ impl DataStore { SwitchPortSettingsDeleteError::SwitchPortSettingsNotFound) => { Error::invalid_request("port settings not found") } - TxnError::Pool(e) => match e { - PoolError::Connection(ConnectionError::Query( + TxnError::Connection(e) => match e { + ConnectionError::Query( DieselError::DatabaseError(_, _), - )) => { + ) => { let name = match ¶ms.port_settings { Some(name_or_id) => name_or_id.to_string(), None => String::new(), }; - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::SwitchPortSettings, @@ -617,7 +615,7 @@ impl DataStore { ), ) }, - _ => public_error_from_diesel_pool(e, ErrorHandler::Server), + _ => public_error_from_diesel(e, ErrorHandler::Server), }, }) } @@ -641,9 +639,9 @@ impl DataStore { } .filter(dsl::time_deleted.is_null()) .select(SwitchPortSettings::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn switch_port_settings_get( @@ -657,11 +655,11 @@ impl DataStore { } type TxnError = TransactionError; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { // get the top level port settings object use db::schema::switch_port_settings::dsl as port_settings_dsl; @@ -806,12 +804,12 @@ impl DataStore { SwitchPortSettingsGetError::NotFound(name)) => { Error::not_found_by_name(ResourceType::SwitchPortSettings, &name) } - TxnError::Pool(e) => match e { - PoolError::Connection(ConnectionError::Query( + TxnError::Connection(e) => match e { + ConnectionError::Query( DieselError::DatabaseError(_, _), - )) => { + ) => { let name = name_or_id.to_string(); - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::SwitchPortSettings, @@ -819,7 +817,7 @@ impl DataStore { ), ) }, - _ => public_error_from_diesel_pool(e, ErrorHandler::Server), + _ => public_error_from_diesel(e, ErrorHandler::Server), }, }) } @@ -839,7 +837,7 @@ impl DataStore { } type TxnError = TransactionError; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; let switch_port = SwitchPort::new( rack_id, switch_location.to_string(), @@ -848,7 +846,7 @@ impl DataStore { // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { use db::schema::rack; use db::schema::rack::dsl as rack_dsl; rack_dsl::rack @@ -880,17 +878,20 @@ impl DataStore { TxnError::CustomError(SwitchPortCreateError::RackNotFound) => { Error::invalid_request("rack not found") } - TxnError::Pool(e) => match e { - PoolError::Connection(ConnectionError::Query( - DieselError::DatabaseError(_, _), - )) => public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::SwitchPort, - &format!("{}/{}/{}", rack_id, &switch_location, &port,), - ), - ), - _ => public_error_from_diesel_pool(e, ErrorHandler::Server), + TxnError::Connection(e) => match e { + ConnectionError::Query(DieselError::DatabaseError(_, _)) => { + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::SwitchPort, + &format!( + "{}/{}/{}", + rack_id, &switch_location, &port, + ), + ), + ) + } + _ => public_error_from_diesel(e, ErrorHandler::Server), }, }) } @@ -908,11 +909,11 @@ impl DataStore { } type TxnError = TransactionError; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { use db::schema::switch_port; use db::schema::switch_port::dsl as switch_port_dsl; @@ -957,8 +958,8 @@ impl DataStore { TxnError::CustomError(SwitchPortDeleteError::ActiveSettings) => { Error::invalid_request("must clear port settings first") } - TxnError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TxnError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } }) } @@ -972,9 +973,9 @@ impl DataStore { paginated(dsl::switch_port, dsl::id, pagparams) .select(SwitchPort::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn switch_port_get( @@ -985,15 +986,15 @@ impl DataStore { use db::schema::switch_port; use db::schema::switch_port::dsl as switch_port_dsl; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; switch_port_dsl::switch_port .filter(switch_port::id.eq(id)) .select(SwitchPort::as_select()) .limit(1) - .first_async::(pool) + .first_async::(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn switch_port_set_settings_id( @@ -1006,17 +1007,17 @@ impl DataStore { use db::schema::switch_port; use db::schema::switch_port::dsl as switch_port_dsl; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; match current { UpdatePrecondition::DontCare => { diesel::update(switch_port_dsl::switch_port) .filter(switch_port::id.eq(switch_port_id)) .set(switch_port::port_settings_id.eq(port_settings_id)) - .execute_async(pool) + .execute_async(&*conn) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) })?; } UpdatePrecondition::Null => { @@ -1024,10 +1025,10 @@ impl DataStore { .filter(switch_port::id.eq(switch_port_id)) .filter(switch_port::port_settings_id.is_null()) .set(switch_port::port_settings_id.eq(port_settings_id)) - .execute_async(pool) + .execute_async(&*conn) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) })?; } UpdatePrecondition::Value(current_id) => { @@ -1035,10 +1036,10 @@ impl DataStore { .filter(switch_port::id.eq(switch_port_id)) .filter(switch_port::port_settings_id.eq(current_id)) .set(switch_port::port_settings_id.eq(port_settings_id)) - .execute_async(pool) + .execute_async(&*conn) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) })?; } } @@ -1056,7 +1057,7 @@ impl DataStore { use db::schema::switch_port; use db::schema::switch_port::dsl as switch_port_dsl; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; let id: Uuid = switch_port_dsl::switch_port .filter(switch_port::rack_id.eq(rack_id)) .filter( @@ -1065,7 +1066,7 @@ impl DataStore { .filter(switch_port::port_name.eq(port_name.to_string())) .select(switch_port::id) .limit(1) - .first_async::(pool) + .first_async::(&*conn) .await .map_err(|_| { Error::not_found_by_name(ResourceType::SwitchPort, &port_name) @@ -1082,7 +1083,7 @@ impl DataStore { use db::schema::switch_port_settings; use db::schema::switch_port_settings::dsl as port_settings_dsl; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; let db_name = name.to_string(); let id = port_settings_dsl::switch_port_settings @@ -1090,7 +1091,7 @@ impl DataStore { .filter(switch_port_settings::name.eq(db_name)) .select(switch_port_settings::id) .limit(1) - .first_async::(pool) + .first_async::(&*conn) .await .map_err(|_| { Error::not_found_by_name( @@ -1122,8 +1123,10 @@ impl DataStore { // pagination in the future, or maybe a way to constrain the query to // a rack? .limit(64) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/update.rs b/nexus/db-queries/src/db/datastore/update.rs index 851ee66bd9..5a3e3b27e4 100644 --- a/nexus/db-queries/src/db/datastore/update.rs +++ b/nexus/db-queries/src/db/datastore/update.rs @@ -9,7 +9,7 @@ use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::error::{ - public_error_from_diesel_pool, ErrorHandler, TransactionError, + public_error_from_diesel, ErrorHandler, TransactionError, }; use crate::db::model::{ ComponentUpdate, SemverVersion, SystemUpdate, UpdateArtifact, @@ -42,9 +42,9 @@ impl DataStore { .do_update() .set(artifact.clone()) .returning(UpdateArtifact::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn update_artifact_hard_delete_outdated( @@ -60,10 +60,10 @@ impl DataStore { use db::schema::update_artifact::dsl; diesel::delete(dsl::update_artifact) .filter(dsl::targets_role_version.lt(current_targets_role_version)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map(|_rows_deleted| ()) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) .internal_context("deleting outdated available artifacts") } @@ -84,10 +84,10 @@ impl DataStore { // to add more metadata to this model .set(time_modified.eq(Utc::now())) .returning(SystemUpdate::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::SystemUpdate, @@ -112,10 +112,10 @@ impl DataStore { system_update .filter(version.eq(target)) .select(SystemUpdate::as_select()) - .first_async(self.pool_authorized(opctx).await?) + .first_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::SystemUpdate, @@ -141,7 +141,7 @@ impl DataStore { let version_string = update.version.to_string(); - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { let db_update = diesel::insert_into(component_update::table) @@ -164,7 +164,7 @@ impl DataStore { .await .map_err(|e| match e { TransactionError::CustomError(e) => e, - TransactionError::Pool(e) => public_error_from_diesel_pool( + TransactionError::Connection(e) => public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::ComponentUpdate, @@ -186,9 +186,9 @@ impl DataStore { paginated(system_update, id, pagparams) .select(SystemUpdate::as_select()) .order(version.desc()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn system_update_components_list( @@ -205,9 +205,9 @@ impl DataStore { .inner_join(join_table::table) .filter(join_table::columns::system_update_id.eq(system_update_id)) .select(ComponentUpdate::as_select()) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn create_updateable_component( @@ -226,10 +226,10 @@ impl DataStore { diesel::insert_into(updateable_component) .values(component.clone()) .returning(UpdateableComponent::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::UpdateableComponent, @@ -250,9 +250,9 @@ impl DataStore { paginated(updateable_component, id, pagparams) .select(UpdateableComponent::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn lowest_component_system_version( @@ -266,9 +266,9 @@ impl DataStore { updateable_component .select(system_version) .order(system_version.asc()) - .first_async(self.pool_authorized(opctx).await?) + .first_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn highest_component_system_version( @@ -282,9 +282,9 @@ impl DataStore { updateable_component .select(system_version) .order(system_version.desc()) - .first_async(self.pool_authorized(opctx).await?) + .first_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn create_update_deployment( @@ -299,10 +299,10 @@ impl DataStore { diesel::insert_into(update_deployment) .values(deployment.clone()) .returning(UpdateDeployment::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::UpdateDeployment, @@ -330,10 +330,10 @@ impl DataStore { time_modified.eq(diesel::dsl::now), )) .returning(UpdateDeployment::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::UpdateDeployment, @@ -354,9 +354,9 @@ impl DataStore { paginated(update_deployment, id, pagparams) .select(UpdateDeployment::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn latest_update_deployment( @@ -370,8 +370,8 @@ impl DataStore { update_deployment .select(UpdateDeployment::as_returning()) .order(time_created.desc()) - .first_async(self.pool_authorized(opctx).await?) + .first_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs b/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs index 404b071ad9..18ff58735e 100644 --- a/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs +++ b/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs @@ -7,13 +7,13 @@ use super::DataStore; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::ByteCount; use crate::db::model::VirtualProvisioningCollection; use crate::db::pool::DbConnection; use crate::db::queries::virtual_provisioning_collection_update::VirtualProvisioningCollectionUpdate; -use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; +use async_bb8_diesel::AsyncRunQueryDsl; use diesel::prelude::*; use omicron_common::api::external::{DeleteResult, Error}; use uuid::Uuid; @@ -46,26 +46,19 @@ impl DataStore { opctx: &OpContext, virtual_provisioning_collection: VirtualProvisioningCollection, ) -> Result, Error> { - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; self.virtual_provisioning_collection_create_on_connection( - pool, + &conn, virtual_provisioning_collection, ) .await } - pub(crate) async fn virtual_provisioning_collection_create_on_connection< - ConnErr, - >( + pub(crate) async fn virtual_provisioning_collection_create_on_connection( &self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + conn: &async_bb8_diesel::Connection, virtual_provisioning_collection: VirtualProvisioningCollection, - ) -> Result, Error> - where - ConnErr: From + Send + 'static, - PoolError: From, - { + ) -> Result, Error> { use db::schema::virtual_provisioning_collection::dsl; let provisions: Vec = @@ -75,10 +68,7 @@ impl DataStore { .get_results_async(conn) .await .map_err(|e| { - public_error_from_diesel_pool( - PoolError::from(e), - ErrorHandler::Server, - ) + public_error_from_diesel(e, ErrorHandler::Server) })?; self.virtual_provisioning_collection_producer .append_all_metrics(&provisions)?; @@ -96,10 +86,12 @@ impl DataStore { dsl::virtual_provisioning_collection .find(id) .select(VirtualProvisioningCollection::as_select()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async( + &*self.pool_connection_authorized(opctx).await?, + ) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) })?; Ok(virtual_provisioning_collection) } @@ -110,24 +102,17 @@ impl DataStore { opctx: &OpContext, id: Uuid, ) -> DeleteResult { - let pool = self.pool_authorized(opctx).await?; - self.virtual_provisioning_collection_delete_on_connection(pool, id) + let conn = self.pool_connection_authorized(opctx).await?; + self.virtual_provisioning_collection_delete_on_connection(&conn, id) .await } /// Delete a [`VirtualProvisioningCollection`] object. - pub(crate) async fn virtual_provisioning_collection_delete_on_connection< - ConnErr, - >( + pub(crate) async fn virtual_provisioning_collection_delete_on_connection( &self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + conn: &async_bb8_diesel::Connection, id: Uuid, - ) -> DeleteResult - where - ConnErr: From + Send + 'static, - PoolError: From, - { + ) -> DeleteResult { use db::schema::virtual_provisioning_collection::dsl; // NOTE: We don't really need to extract the value we're deleting from @@ -138,12 +123,7 @@ impl DataStore { .returning(VirtualProvisioningCollection::as_select()) .get_result_async(conn) .await - .map_err(|e| { - public_error_from_diesel_pool( - PoolError::from(e), - ErrorHandler::Server, - ) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; assert!( collection.is_empty(), "Collection deleted while non-empty: {collection:?}" @@ -209,11 +189,9 @@ impl DataStore { project_id, storage_type, ) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; self.virtual_provisioning_collection_producer .append_disk_metrics(&provisions)?; Ok(provisions) @@ -265,11 +243,9 @@ impl DataStore { disk_byte_diff, project_id, ) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; self.virtual_provisioning_collection_producer .append_disk_metrics(&provisions)?; Ok(provisions) @@ -288,11 +264,9 @@ impl DataStore { VirtualProvisioningCollectionUpdate::new_insert_instance( id, cpus_diff, ram_diff, project_id, ) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; self.virtual_provisioning_collection_producer .append_cpu_metrics(&provisions)?; Ok(provisions) @@ -311,11 +285,9 @@ impl DataStore { VirtualProvisioningCollectionUpdate::new_delete_instance( id, cpus_diff, ram_diff, project_id, ) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; self.virtual_provisioning_collection_producer .append_cpu_metrics(&provisions)?; Ok(provisions) diff --git a/nexus/db-queries/src/db/datastore/volume.rs b/nexus/db-queries/src/db/datastore/volume.rs index 901cf16f63..b3e82886de 100644 --- a/nexus/db-queries/src/db/datastore/volume.rs +++ b/nexus/db-queries/src/db/datastore/volume.rs @@ -6,7 +6,7 @@ use super::DataStore; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::identity::Asset; @@ -19,7 +19,6 @@ use async_bb8_diesel::AsyncRunQueryDsl; use async_bb8_diesel::OptionalExtension; use chrono::Utc; use diesel::prelude::*; -use diesel::OptionalExtension as DieselOptionalExtension; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; @@ -65,19 +64,18 @@ impl DataStore { crucible_targets }; - self.pool() - .transaction(move |conn| { + self.pool_connection_unauthorized() + .await? + .transaction_async(|conn| async move { let maybe_volume: Option = dsl::volume .filter(dsl::id.eq(volume.id())) .select(Volume::as_select()) - .first(conn) + .first_async(&conn) + .await .optional() .map_err(|e| { TxnError::CustomError(VolumeCreationError::Public( - public_error_from_diesel_pool( - e.into(), - ErrorHandler::Server, - ), + public_error_from_diesel(e, ErrorHandler::Server), )) })?; @@ -97,11 +95,12 @@ impl DataStore { .on_conflict(dsl::id) .do_nothing() .returning(Volume::as_returning()) - .get_result(conn) + .get_result_async(&conn) + .await .map_err(|e| { TxnError::CustomError(VolumeCreationError::Public( - public_error_from_diesel_pool( - e.into(), + public_error_from_diesel( + e, ErrorHandler::Conflict( ResourceType::Volume, volume.id().to_string().as_str(), @@ -124,11 +123,12 @@ impl DataStore { rs_dsl::volume_references .eq(rs_dsl::volume_references + 1), ) - .execute(conn) + .execute_async(&conn) + .await .map_err(|e| { TxnError::CustomError(VolumeCreationError::Public( - public_error_from_diesel_pool( - e.into(), + public_error_from_diesel( + e, ErrorHandler::Server, ), )) @@ -156,10 +156,10 @@ impl DataStore { dsl::volume .filter(dsl::id.eq(volume_id)) .select(Volume::as_select()) - .first_async::(self.pool()) + .first_async::(&*self.pool_connection_unauthorized().await?) .await .optional() - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Delete the volume if it exists. If it was already deleted, this is a @@ -169,10 +169,10 @@ impl DataStore { diesel::delete(dsl::volume) .filter(dsl::id.eq(volume_id)) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await .map(|_| ()) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Checkout a copy of the Volume from the database. @@ -206,13 +206,15 @@ impl DataStore { // types that require it). The generation number (along with the // rest of the volume data) that was in the database is what is // returned to the caller. - self.pool() - .transaction(move |conn| { + self.pool_connection_unauthorized() + .await? + .transaction_async(|conn| async move { // Grab the volume in question. let volume = dsl::volume .filter(dsl::id.eq(volume_id)) .select(Volume::as_select()) - .get_result(conn)?; + .get_result_async(&conn) + .await?; // Turn the volume.data into the VolumeConstructionRequest let vcr: VolumeConstructionRequest = @@ -289,7 +291,8 @@ impl DataStore { diesel::update(volume_dsl::volume) .filter(volume_dsl::id.eq(volume_id)) .set(volume_dsl::data.eq(new_volume_data)) - .execute(conn)?; + .execute_async(&conn) + .await?; // This should update just one row. If it does // not, then something is terribly wrong in the @@ -332,10 +335,7 @@ impl DataStore { .await .map_err(|e| match e { TxnError::CustomError(VolumeGetError::DieselError(e)) => { - public_error_from_diesel_pool( - e.into(), - ErrorHandler::Server, - ) + public_error_from_diesel(e.into(), ErrorHandler::Server) } _ => { @@ -478,9 +478,9 @@ impl DataStore { Region::as_select(), Volume::as_select(), )) - .load_async(self.pool()) + .load_async(&*self.pool_connection_unauthorized().await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn read_only_resources_associated_with_volume( @@ -576,8 +576,9 @@ impl DataStore { // // TODO it would be nice to make this transaction_async, but I couldn't // get the async optional extension to work. - self.pool() - .transaction(move |conn| { + self.pool_connection_unauthorized() + .await? + .transaction_async(|conn| async move { // Grab the volume in question. If the volume record was already // hard-deleted, assume clean-up has occurred and return an empty // CrucibleResources. If the volume record was soft-deleted, then @@ -588,7 +589,8 @@ impl DataStore { let volume = volume_dsl::volume .filter(volume_dsl::id.eq(volume_id)) .select(Volume::as_select()) - .get_result(conn) + .get_result_async(&conn) + .await .optional()?; let volume = if let Some(v) = volume { @@ -643,10 +645,11 @@ impl DataStore { diesel::update(dsl::region_snapshot) .filter( dsl::snapshot_addr - .eq_any(&crucible_targets.read_only_targets), + .eq_any(crucible_targets.read_only_targets.clone()), ) .set(dsl::volume_references.eq(dsl::volume_references - 1)) - .execute(conn)?; + .execute_async(&conn) + .await?; // Return what results can be cleaned up let result = CrucibleResources::V1(CrucibleResourcesV1 { @@ -681,7 +684,8 @@ impl DataStore { .or(dsl::volume_references.is_null()), ) .select((Dataset::as_select(), Region::as_select())) - .get_results::<(Dataset, Region)>(conn)? + .get_results_async::<(Dataset, Region)>(&conn) + .await? }, // A volume (for a disk or snapshot) may reference another nested @@ -707,11 +711,9 @@ impl DataStore { // delete a read-only downstairs running for a // snapshot that doesn't exist will return a 404, // causing the saga to error and unwind. - .filter( - dsl::snapshot_addr.eq_any( - &crucible_targets.read_only_targets, - ), - ) + .filter(dsl::snapshot_addr.eq_any( + crucible_targets.read_only_targets.clone(), + )) .filter(dsl::volume_references.eq(0)) .inner_join( dataset_dsl::dataset @@ -721,7 +723,10 @@ impl DataStore { Dataset::as_select(), RegionSnapshot::as_select(), )) - .get_results::<(Dataset, RegionSnapshot)>(conn)? + .get_results_async::<(Dataset, RegionSnapshot)>( + &conn, + ) + .await? }, }); @@ -742,7 +747,8 @@ impl DataStore { })?, ), )) - .execute(conn)?; + .execute_async(&conn) + .await?; Ok(result) }) @@ -750,10 +756,7 @@ impl DataStore { .map_err(|e| match e { TxnError::CustomError( DecreaseCrucibleResourcesError::DieselError(e), - ) => public_error_from_diesel_pool( - e.into(), - ErrorHandler::Server, - ), + ) => public_error_from_diesel(e.into(), ErrorHandler::Server), _ => { Error::internal_error(&format!("Transaction error: {}", e)) @@ -799,8 +802,9 @@ impl DataStore { // data from original volume_id. // - Put the new temp VCR into the temp volume.data, update the // temp_volume in the database. - self.pool() - .transaction(move |conn| { + self.pool_connection_unauthorized() + .await? + .transaction_async(|conn| async move { // Grab the volume in question. If the volume record was already // deleted then we can just return. let volume = { @@ -809,7 +813,8 @@ impl DataStore { let volume = dsl::volume .filter(dsl::id.eq(volume_id)) .select(Volume::as_select()) - .get_result(conn) + .get_result_async(&conn) + .await .optional()?; let volume = if let Some(v) = volume { @@ -882,7 +887,8 @@ impl DataStore { let num_updated = diesel::update(volume_dsl::volume) .filter(volume_dsl::id.eq(volume_id)) .set(volume_dsl::data.eq(new_volume_data)) - .execute(conn)?; + .execute_async(&conn) + .await?; // This should update just one row. If it does // not, then something is terribly wrong in the @@ -920,7 +926,8 @@ impl DataStore { .filter(volume_dsl::id.eq(temp_volume_id)) .filter(volume_dsl::time_deleted.is_null()) .set(volume_dsl::data.eq(rop_volume_data)) - .execute(conn)?; + .execute_async(&conn) + .await?; if num_updated != 1 { return Err(TxnError::CustomError( RemoveReadOnlyParentError::UnexpectedDatabaseUpdate(num_updated, 1), @@ -946,7 +953,7 @@ impl DataStore { .map_err(|e| match e { TxnError::CustomError( RemoveReadOnlyParentError::DieselError(e), - ) => public_error_from_diesel_pool( + ) => public_error_from_diesel( e.into(), ErrorHandler::Server, ), diff --git a/nexus/db-queries/src/db/datastore/vpc.rs b/nexus/db-queries/src/db/datastore/vpc.rs index f82270a27f..af7ea93456 100644 --- a/nexus/db-queries/src/db/datastore/vpc.rs +++ b/nexus/db-queries/src/db/datastore/vpc.rs @@ -10,8 +10,8 @@ use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::diesel_pool_result_optional; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::diesel_result_optional; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::fixed_data::vpc::SERVICES_VPC_ID; @@ -279,9 +279,9 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::project_id.eq(authz_project.id())) .select(Vpc::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn project_create_vpc( @@ -312,23 +312,22 @@ impl DataStore { let name = vpc_query.vpc.identity.name.clone(); let project_id = vpc_query.vpc.project_id; + let conn = self.pool_connection_authorized(opctx).await?; let vpc: Vpc = Project::insert_resource( project_id, diesel::insert_into(dsl::vpc).values(vpc_query), ) - .insert_and_get_result_async(self.pool()) + .insert_and_get_result_async(&conn) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { type_name: ResourceType::Project, lookup_type: LookupType::ById(project_id), }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict(ResourceType::Vpc, name.as_str()), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict(ResourceType::Vpc, name.as_str()), + ), })?; Ok(( authz::Vpc::new( @@ -354,10 +353,10 @@ impl DataStore { .filter(dsl::id.eq(authz_vpc.id())) .set(updates) .returning(Vpc::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_vpc), ) @@ -390,16 +389,18 @@ impl DataStore { // but we can't have NICs be a child of both tables at this point, and // we need to prevent VPC Subnets from being deleted while they have // NICs in them as well. - if diesel_pool_result_optional( + if diesel_result_optional( vpc_subnet::dsl::vpc_subnet .filter(vpc_subnet::dsl::vpc_id.eq(authz_vpc.id())) .filter(vpc_subnet::dsl::time_deleted.is_null()) .select(vpc_subnet::dsl::id) .limit(1) - .first_async::(self.pool_authorized(opctx).await?) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await, ) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))? + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? .is_some() { return Err(Error::InvalidRequest { @@ -416,10 +417,10 @@ impl DataStore { .filter(dsl::id.eq(authz_vpc.id())) .filter(dsl::subnet_gen.eq(db_vpc.subnet_gen)) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_vpc), ) @@ -448,14 +449,15 @@ impl DataStore { opctx.authorize(authz::Action::Read, authz_vpc).await?; use db::schema::vpc_firewall_rule::dsl; + let conn = self.pool_connection_authorized(opctx).await?; dsl::vpc_firewall_rule .filter(dsl::time_deleted.is_null()) .filter(dsl::vpc_id.eq(authz_vpc.id())) .order(dsl::name.asc()) .select(VpcFirewallRule::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn vpc_delete_all_firewall_rules( @@ -466,16 +468,17 @@ impl DataStore { opctx.authorize(authz::Action::Modify, authz_vpc).await?; use db::schema::vpc_firewall_rule::dsl; + let conn = self.pool_connection_authorized(opctx).await?; let now = Utc::now(); // TODO-performance: Paginate this update to avoid long queries diesel::update(dsl::vpc_firewall_rule) .filter(dsl::time_deleted.is_null()) .filter(dsl::vpc_id.eq(authz_vpc.id())) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_vpc), ) @@ -525,7 +528,7 @@ impl DataStore { // hold a transaction open across multiple roundtrips from the database, // but for now we're using a transaction due to the severely decreased // legibility of CTEs via diesel right now. - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { delete_old_query.execute_async(&conn).await?; @@ -553,7 +556,7 @@ impl DataStore { TxnError::CustomError( FirewallUpdateError::CollectionNotFound, ) => Error::not_found_by_id(ResourceType::Vpc, &authz_vpc.id()), - TxnError::Pool(e) => public_error_from_diesel_pool( + TxnError::Connection(e) => public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_vpc), ), @@ -604,11 +607,12 @@ impl DataStore { sleds = sleds.filter(sled::id.eq_any(sleds_filter.to_vec())); } + let conn = self.pool_connection_unauthorized().await?; sleds .intersect(instance_query.union(service_query)) - .get_results_async(self.pool()) + .get_results_async(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn vpc_subnet_list( @@ -620,6 +624,7 @@ impl DataStore { opctx.authorize(authz::Action::ListChildren, authz_vpc).await?; use db::schema::vpc_subnet::dsl; + let conn = self.pool_connection_authorized(opctx).await?; match pagparams { PaginatedBy::Id(pagparams) => { paginated(dsl::vpc_subnet, dsl::id, &pagparams) @@ -633,9 +638,9 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::vpc_id.eq(authz_vpc.id())) .select(VpcSubnet::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Insert a VPC Subnet, checking for unique IP address ranges. @@ -668,12 +673,17 @@ impl DataStore { ) -> Result { use db::schema::vpc_subnet::dsl; let values = FilterConflictingVpcSubnetRangesQuery::new(subnet.clone()); + let conn = self + .pool_connection_unauthorized() + .await + .map_err(SubnetError::External)?; + diesel::insert_into(dsl::vpc_subnet) .values(values) .returning(VpcSubnet::as_returning()) - .get_result_async(self.pool()) + .get_result_async(&*conn) .await - .map_err(|e| SubnetError::from_pool(e, &subnet)) + .map_err(|e| SubnetError::from_diesel(e, &subnet)) } pub async fn vpc_delete_subnet( @@ -687,17 +697,19 @@ impl DataStore { use db::schema::network_interface; use db::schema::vpc_subnet::dsl; + let conn = self.pool_connection_authorized(opctx).await?; + // Verify there are no child network interfaces in this VPC Subnet - if diesel_pool_result_optional( + if diesel_result_optional( network_interface::dsl::network_interface .filter(network_interface::dsl::subnet_id.eq(authz_subnet.id())) .filter(network_interface::dsl::time_deleted.is_null()) .select(network_interface::dsl::id) .limit(1) - .first_async::(self.pool_authorized(opctx).await?) + .first_async::(&*conn) .await, ) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))? + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? .is_some() { return Err(Error::InvalidRequest { @@ -715,10 +727,10 @@ impl DataStore { .filter(dsl::id.eq(authz_subnet.id())) .filter(dsl::rcgen.eq(db_subnet.rcgen)) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_subnet), ) @@ -748,10 +760,10 @@ impl DataStore { .filter(dsl::id.eq(authz_subnet.id())) .set(updates) .returning(VpcSubnet::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_subnet), ) @@ -782,10 +794,10 @@ impl DataStore { .filter(dsl::subnet_id.eq(authz_subnet.id())) .select(InstanceNetworkInterface::as_select()) .load_async::( - self.pool_authorized(opctx).await?, + &*self.pool_connection_authorized(opctx).await?, ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn vpc_router_list( @@ -810,9 +822,11 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::vpc_id.eq(authz_vpc.id())) .select(VpcRouter::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn vpc_create_router( @@ -830,10 +844,10 @@ impl DataStore { .on_conflict(dsl::id) .do_nothing() .returning(VpcRouter::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::VpcRouter, @@ -864,10 +878,10 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::id.eq(authz_router.id())) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_router), ) @@ -889,10 +903,10 @@ impl DataStore { .filter(dsl::id.eq(authz_router.id())) .set(updates) .returning(VpcRouter::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_router), ) @@ -922,10 +936,10 @@ impl DataStore { .filter(dsl::vpc_router_id.eq(authz_router.id())) .select(RouterRoute::as_select()) .load_async::( - self.pool_authorized(opctx).await?, + &*self.pool_connection_authorized(opctx).await?, ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn router_create_route( @@ -945,22 +959,22 @@ impl DataStore { router_id, diesel::insert_into(dsl::router_route).values(route), ) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) + .insert_and_get_result_async( + &*self.pool_connection_authorized(opctx).await?, + ) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { type_name: ResourceType::VpcRouter, lookup_type: LookupType::ById(router_id), }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::RouterRoute, - name.as_str(), - ), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::RouterRoute, + name.as_str(), + ), + ), }) } @@ -977,10 +991,10 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::id.eq(authz_route.id())) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_route), ) @@ -1002,10 +1016,10 @@ impl DataStore { .filter(dsl::id.eq(authz_route.id())) .set(route_update) .returning(RouterRoute::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_route), ) @@ -1037,11 +1051,11 @@ impl DataStore { vpc_subnet::ipv4_block, vpc_subnet::ipv6_block, )) - .get_results_async::(self.pool()) + .get_results_async::( + &*self.pool_connection_unauthorized().await?, + ) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; let mut result = BTreeMap::new(); for subnet in subnets { @@ -1063,10 +1077,10 @@ impl DataStore { .filter(dsl::vni.eq(vni)) .filter(dsl::time_deleted.is_null()) .select(Vpc::as_select()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::Vpc, diff --git a/nexus/db-queries/src/db/datastore/zpool.rs b/nexus/db-queries/src/db/datastore/zpool.rs index b2fb6cdf7a..5d6c0844ef 100644 --- a/nexus/db-queries/src/db/datastore/zpool.rs +++ b/nexus/db-queries/src/db/datastore/zpool.rs @@ -8,7 +8,7 @@ use super::DataStore; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Asset; use crate::db::model::Sled; @@ -39,22 +39,22 @@ impl DataStore { dsl::total_size.eq(excluded(dsl::total_size)), )), ) - .insert_and_get_result_async(self.pool()) + .insert_and_get_result_async( + &*self.pool_connection_unauthorized().await?, + ) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { type_name: ResourceType::Sled, lookup_type: LookupType::ById(sled_id), }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Zpool, - &zpool.id().to_string(), - ), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::Zpool, + &zpool.id().to_string(), + ), + ), }) } } diff --git a/nexus/db-queries/src/db/error.rs b/nexus/db-queries/src/db/error.rs index 59094d2e0b..f7402bb8c7 100644 --- a/nexus/db-queries/src/db/error.rs +++ b/nexus/db-queries/src/db/error.rs @@ -4,7 +4,7 @@ //! Error handling and conversions. -use async_bb8_diesel::{ConnectionError, PoolError, PoolResult}; +use async_bb8_diesel::ConnectionError; use diesel::result::DatabaseErrorInformation; use diesel::result::DatabaseErrorKind as DieselErrorKind; use diesel::result::Error as DieselError; @@ -25,23 +25,15 @@ pub enum TransactionError { /// /// This error covers failure due to accessing the DB pool or errors /// propagated from the DB itself. - #[error("Pool error: {0}")] - Pool(#[from] async_bb8_diesel::PoolError), + #[error("Connection error: {0}")] + Connection(#[from] async_bb8_diesel::ConnectionError), } // Maps a "diesel error" into a "pool error", which // is already contained within the error type. impl From for TransactionError { fn from(err: DieselError) -> Self { - Self::Pool(PoolError::Connection(ConnectionError::Query(err))) - } -} - -// Maps a "connection error" into a "pool error", which -// is already contained within the error type. -impl From for TransactionError { - fn from(err: async_bb8_diesel::ConnectionError) -> Self { - Self::Pool(PoolError::Connection(err)) + Self::Connection(ConnectionError::Query(err)) } } @@ -58,22 +50,16 @@ impl TransactionError { /// [1]: https://www.cockroachlabs.com/docs/v23.1/transaction-retry-error-reference#client-side-retry-handling pub fn retry_transaction(&self) -> bool { match &self { - TransactionError::Pool(e) => match e { - PoolError::Connection(ConnectionError::Query( - DieselError::DatabaseError(kind, boxed_error_information), - )) => match kind { - DieselErrorKind::SerializationFailure => { - return boxed_error_information - .message() - .starts_with("restart transaction"); - } - - _ => false, - }, - + TransactionError::Connection(ConnectionError::Query( + DieselError::DatabaseError(kind, boxed_error_information), + )) => match kind { + DieselErrorKind::SerializationFailure => { + return boxed_error_information + .message() + .starts_with("restart transaction"); + } _ => false, }, - _ => false, } } @@ -110,14 +96,12 @@ fn format_database_error( /// Like [`diesel::result::OptionalExtension::optional`]. This turns Ok(v) /// into Ok(Some(v)), Err("NotFound") into Ok(None), and leave all other values /// unchanged. -pub fn diesel_pool_result_optional( - result: PoolResult, -) -> PoolResult> { +pub fn diesel_result_optional( + result: Result, +) -> Result, ConnectionError> { match result { Ok(v) => Ok(Some(v)), - Err(PoolError::Connection(ConnectionError::Query( - DieselError::NotFound, - ))) => Ok(None), + Err(ConnectionError::Query(DieselError::NotFound)) => Ok(None), Err(e) => Err(e), } } @@ -153,57 +137,46 @@ pub enum ErrorHandler<'a> { Server, } -/// Converts a Diesel pool error to a public-facing error. +/// Converts a Diesel connection error to a public-facing error. /// /// [`ErrorHandler`] may be used to add additional handlers for the error /// being returned. -pub fn public_error_from_diesel_pool( - error: PoolError, +pub fn public_error_from_diesel( + error: ConnectionError, handler: ErrorHandler<'_>, ) -> PublicError { - public_error_from_diesel_pool_helper(error, |error| match handler { - ErrorHandler::NotFoundByResource(resource) => { - public_error_from_diesel_lookup( - error, - resource.resource_type(), - resource.lookup_type(), - ) - } - ErrorHandler::NotFoundByLookup(resource_type, lookup_type) => { - public_error_from_diesel_lookup(error, resource_type, &lookup_type) - } - ErrorHandler::Conflict(resource_type, object_name) => { - public_error_from_diesel_create(error, resource_type, object_name) - } - ErrorHandler::Server => PublicError::internal_error(&format!( - "unexpected database error: {:#}", + match error { + ConnectionError::Connection(error) => PublicError::unavail(&format!( + "Failed to access connection pool: {}", error )), - }) -} - -/// Handles the common cases for all pool errors (particularly around transient -/// errors while delegating the special case of -/// `PoolError::Connection(ConnectionError::Query(diesel_error))` to -/// `make_query_error(diesel_error)`, allowing the caller to decide how to -/// format a message for that case. -fn public_error_from_diesel_pool_helper( - error: PoolError, - make_query_error: F, -) -> PublicError -where - F: FnOnce(DieselError) -> PublicError, -{ - match error { - PoolError::Connection(error) => match error { - ConnectionError::Connection(error) => PublicError::unavail( - &format!("Failed to access connection pool: {}", error), - ), - ConnectionError::Query(error) => make_query_error(error), + ConnectionError::Query(error) => match handler { + ErrorHandler::NotFoundByResource(resource) => { + public_error_from_diesel_lookup( + error, + resource.resource_type(), + resource.lookup_type(), + ) + } + ErrorHandler::NotFoundByLookup(resource_type, lookup_type) => { + public_error_from_diesel_lookup( + error, + resource_type, + &lookup_type, + ) + } + ErrorHandler::Conflict(resource_type, object_name) => { + public_error_from_diesel_create( + error, + resource_type, + object_name, + ) + } + ErrorHandler::Server => PublicError::internal_error(&format!( + "unexpected database error: {:#}", + error + )), }, - PoolError::Timeout => { - PublicError::unavail("Timeout accessing connection pool") - } } } diff --git a/nexus/db-queries/src/db/explain.rs b/nexus/db-queries/src/db/explain.rs index de834eb301..fc8098b876 100644 --- a/nexus/db-queries/src/db/explain.rs +++ b/nexus/db-queries/src/db/explain.rs @@ -5,7 +5,7 @@ //! Utility allowing Diesel to EXPLAIN queries. use super::pool::DbConnection; -use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager, PoolError}; +use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionError}; use async_trait::async_trait; use diesel::pg::Pg; use diesel::prelude::*; @@ -48,8 +48,8 @@ pub trait ExplainableAsync { /// Asynchronously issues an explain statement. async fn explain_async( self, - pool: &bb8::Pool>, - ) -> Result; + conn: &async_bb8_diesel::Connection, + ) -> Result; } #[async_trait] @@ -64,10 +64,10 @@ where { async fn explain_async( self, - pool: &bb8::Pool>, - ) -> Result { + conn: &async_bb8_diesel::Connection, + ) -> Result { Ok(ExplainStatement { query: self } - .get_results_async::(pool) + .get_results_async::(conn) .await? .join("\n")) } @@ -167,6 +167,7 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); + let conn = pool.pool().get().await.unwrap(); create_schema(&pool).await; @@ -174,7 +175,7 @@ mod test { let explanation = dsl::test_users .filter(dsl::id.eq(Uuid::nil())) .select(User::as_select()) - .explain_async(pool.pool()) + .explain_async(&conn) .await .unwrap(); @@ -190,6 +191,7 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); + let conn = pool.pool().get().await.unwrap(); create_schema(&pool).await; @@ -197,7 +199,7 @@ mod test { let explanation = dsl::test_users .filter(dsl::age.eq(2)) .select(User::as_select()) - .explain_async(pool.pool()) + .explain_async(&conn) .await .unwrap(); diff --git a/nexus/db-queries/src/db/lookup.rs b/nexus/db-queries/src/db/lookup.rs index e7e7bb47fc..72a32f562c 100644 --- a/nexus/db-queries/src/db/lookup.rs +++ b/nexus/db-queries/src/db/lookup.rs @@ -11,7 +11,7 @@ use crate::{ authz, context::OpContext, db, - db::error::{public_error_from_diesel_pool, ErrorHandler}, + db::error::{public_error_from_diesel, ErrorHandler}, }; use async_bb8_diesel::AsyncRunQueryDsl; use db_macros::lookup_resource; diff --git a/nexus/db-queries/src/db/pagination.rs b/nexus/db-queries/src/db/pagination.rs index 50da36c156..dd7daab14f 100644 --- a/nexus/db-queries/src/db/pagination.rs +++ b/nexus/db-queries/src/db/pagination.rs @@ -214,14 +214,12 @@ mod test { async fn populate_users(pool: &db::Pool, values: &Vec<(i64, i64)>) { use schema::test_users::dsl; + let conn = pool.pool().get().await.unwrap(); + // The indexes here work around the check that prevents full table // scans. - pool.pool() - .get() - .await - .unwrap() - .batch_execute_async( - "CREATE TABLE test_users ( + conn.batch_execute_async( + "CREATE TABLE test_users ( id UUID PRIMARY KEY, age INT NOT NULL, height INT NOT NULL @@ -229,9 +227,9 @@ mod test { CREATE INDEX ON test_users (age, height); CREATE INDEX ON test_users (height, age);", - ) - .await - .unwrap(); + ) + .await + .unwrap(); let users: Vec = values .iter() @@ -244,7 +242,7 @@ mod test { diesel::insert_into(dsl::test_users) .values(users) - .execute_async(pool.pool()) + .execute_async(&*conn) .await .unwrap(); } @@ -254,7 +252,8 @@ mod test { pool: &db::Pool, query: BoxedQuery, ) -> Vec { - query.select(User::as_select()).load_async(pool.pool()).await.unwrap() + let conn = pool.pool().get().await.unwrap(); + query.select(User::as_select()).load_async(&*conn).await.unwrap() } #[tokio::test] diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index e5f57181fa..18360e1045 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -42,7 +42,7 @@ const REALLOCATION_WITH_DIFFERENT_IP_SENTINEL: &'static str = "Reallocation of IP with different value"; /// Translates a generic pool error to an external error. -pub fn from_pool(e: async_bb8_diesel::PoolError) -> external::Error { +pub fn from_diesel(e: async_bb8_diesel::ConnectionError) -> external::Error { use crate::db::error; let sentinels = [REALLOCATION_WITH_DIFFERENT_IP_SENTINEL]; @@ -58,7 +58,7 @@ pub fn from_pool(e: async_bb8_diesel::PoolError) -> external::Error { } } - error::public_error_from_diesel_pool(e, error::ErrorHandler::Server) + error::public_error_from_diesel(e, error::ErrorHandler::Server) } const MAX_PORT: u16 = u16::MAX; @@ -877,15 +877,16 @@ mod tests { is_default, ); + let conn = self + .db_datastore + .pool_connection_authorized(&self.opctx) + .await + .unwrap(); + use crate::db::schema::ip_pool::dsl as ip_pool_dsl; diesel::insert_into(ip_pool_dsl::ip_pool) .values(pool.clone()) - .execute_async( - self.db_datastore - .pool_authorized(&self.opctx) - .await - .unwrap(), - ) + .execute_async(&*conn) .await .expect("Failed to create IP Pool"); @@ -895,16 +896,16 @@ mod tests { async fn initialize_ip_pool(&self, name: &str, range: IpRange) { // Find the target IP pool use crate::db::schema::ip_pool::dsl as ip_pool_dsl; + let conn = self + .db_datastore + .pool_connection_authorized(&self.opctx) + .await + .unwrap(); let pool = ip_pool_dsl::ip_pool .filter(ip_pool_dsl::name.eq(name.to_string())) .filter(ip_pool_dsl::time_deleted.is_null()) .select(IpPool::as_select()) - .get_result_async( - self.db_datastore - .pool_authorized(&self.opctx) - .await - .unwrap(), - ) + .get_result_async(&*conn) .await .expect("Failed to 'SELECT' IP Pool"); @@ -915,7 +916,11 @@ mod tests { ) .values(pool_range) .execute_async( - self.db_datastore.pool_authorized(&self.opctx).await.unwrap(), + &*self + .db_datastore + .pool_connection_authorized(&self.opctx) + .await + .unwrap(), ) .await .expect("Failed to create IP Pool range"); diff --git a/nexus/db-queries/src/db/queries/network_interface.rs b/nexus/db-queries/src/db/queries/network_interface.rs index 5bb9da928e..877daad9e3 100644 --- a/nexus/db-queries/src/db/queries/network_interface.rs +++ b/nexus/db-queries/src/db/queries/network_interface.rs @@ -125,22 +125,21 @@ impl InsertError { /// can generate, especially the intentional errors that indicate either IP /// address exhaustion or an attempt to attach an interface to an instance /// that is already associated with another VPC. - pub fn from_pool( - e: async_bb8_diesel::PoolError, + pub fn from_diesel( + e: async_bb8_diesel::ConnectionError, interface: &IncompleteNetworkInterface, ) -> Self { use crate::db::error; use async_bb8_diesel::ConnectionError; - use async_bb8_diesel::PoolError; use diesel::result::Error; match e { // Catch the specific errors designed to communicate the failures we // want to distinguish - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(_, _), - )) => decode_database_error(e, interface), + ConnectionError::Query(Error::DatabaseError(_, _)) => { + decode_database_error(e, interface) + } // Any other error at all is a bug - _ => InsertError::External(error::public_error_from_diesel_pool( + _ => InsertError::External(error::public_error_from_diesel( e, error::ErrorHandler::Server, )), @@ -224,12 +223,11 @@ impl InsertError { /// As such, it naturally is extremely tightly coupled to the database itself, /// including the software version and our schema. fn decode_database_error( - err: async_bb8_diesel::PoolError, + err: async_bb8_diesel::ConnectionError, interface: &IncompleteNetworkInterface, ) -> InsertError { use crate::db::error; use async_bb8_diesel::ConnectionError; - use async_bb8_diesel::PoolError; use diesel::result::DatabaseErrorKind; use diesel::result::Error; @@ -294,8 +292,9 @@ fn decode_database_error( // If the address allocation subquery fails, we'll attempt to insert // NULL for the `ip` column. This checks that the non-NULL constraint on // that colum has been violated. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::NotNullViolation, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::NotNullViolation, + ref info, )) if info.message() == IP_EXHAUSTION_ERROR_MESSAGE => { InsertError::NoAvailableIpAddresses } @@ -304,16 +303,18 @@ fn decode_database_error( // `push_ensure_unique_vpc_expression` subquery, which generates a // UUID parsing error if the resource (e.g. instance) we want to attach // to is already associated with another VPC. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::Unknown, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, )) if info.message() == MULTIPLE_VPC_ERROR_MESSAGE => { InsertError::ResourceSpansMultipleVpcs(interface.parent_id) } // This checks the constraint on the interface slot numbers, used to // limit total number of interfaces per resource to a maximum number. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::CheckViolation, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::CheckViolation, + ref info, )) if info.message() == NO_SLOTS_AVAILABLE_ERROR_MESSAGE => { InsertError::NoSlotsAvailable } @@ -321,8 +322,9 @@ fn decode_database_error( // If the MAC allocation subquery fails, we'll attempt to insert NULL // for the `mac` column. This checks that the non-NULL constraint on // that column has been violated. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::NotNullViolation, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::NotNullViolation, + ref info, )) if info.message() == MAC_EXHAUSTION_ERROR_MESSAGE => { InsertError::NoMacAddrressesAvailable } @@ -331,8 +333,9 @@ fn decode_database_error( // `push_ensure_unique_vpc_subnet_expression` subquery, which generates // a UUID parsing error if the resource has another interface in the VPC // Subnet of the one we're trying to insert. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::Unknown, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, )) if info.message() == NON_UNIQUE_VPC_SUBNET_ERROR_MESSAGE => { InsertError::NonUniqueVpcSubnets } @@ -340,8 +343,9 @@ fn decode_database_error( // This catches the UUID-cast failure intentionally introduced by // `push_instance_state_verification_subquery`, which verifies that // the instance is actually stopped when running this query. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::Unknown, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, )) if info.message() == INSTANCE_BAD_STATE_ERROR_MESSAGE => { assert_eq!(interface.kind, NetworkInterfaceKind::Instance); InsertError::InstanceMustBeStopped(interface.parent_id) @@ -349,16 +353,18 @@ fn decode_database_error( // This catches the UUID-cast failure intentionally introduced by // `push_instance_state_verification_subquery`, which verifies that // the instance doesn't even exist when running this query. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::Unknown, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, )) if info.message() == NO_INSTANCE_ERROR_MESSAGE => { assert_eq!(interface.kind, NetworkInterfaceKind::Instance); InsertError::InstanceNotFound(interface.parent_id) } // This path looks specifically at constraint names. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::UniqueViolation, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::UniqueViolation, + ref info, )) => match info.constraint_name() { // Constraint violated if a user-requested IP address has // already been assigned within the same VPC Subnet. @@ -385,7 +391,7 @@ fn decode_database_error( external::ResourceType::ServiceNetworkInterface } }; - InsertError::External(error::public_error_from_diesel_pool( + InsertError::External(error::public_error_from_diesel( err, error::ErrorHandler::Conflict( resource_type, @@ -402,14 +408,14 @@ fn decode_database_error( ) } // Any other constraint violation is a bug - _ => InsertError::External(error::public_error_from_diesel_pool( + _ => InsertError::External(error::public_error_from_diesel( err, error::ErrorHandler::Server, )), }, // Any other error at all is a bug - _ => InsertError::External(error::public_error_from_diesel_pool( + _ => InsertError::External(error::public_error_from_diesel( err, error::ErrorHandler::Server, )), @@ -1544,25 +1550,24 @@ impl DeleteError { /// can generate, specifically the intentional errors that indicate that /// either the instance is still running, or that the instance has one or /// more secondary interfaces. - pub fn from_pool( - e: async_bb8_diesel::PoolError, + pub fn from_diesel( + e: async_bb8_diesel::ConnectionError, query: &DeleteQuery, ) -> Self { use crate::db::error; use async_bb8_diesel::ConnectionError; - use async_bb8_diesel::PoolError; use diesel::result::Error; match e { // Catch the specific errors designed to communicate the failures we // want to distinguish - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(_, _), - )) => decode_delete_network_interface_database_error( - e, - query.parent_id, - ), + ConnectionError::Query(Error::DatabaseError(_, _)) => { + decode_delete_network_interface_database_error( + e, + query.parent_id, + ) + } // Any other error at all is a bug - _ => DeleteError::External(error::public_error_from_diesel_pool( + _ => DeleteError::External(error::public_error_from_diesel( e, error::ErrorHandler::Server, )), @@ -1603,12 +1608,11 @@ impl DeleteError { /// As such, it naturally is extremely tightly coupled to the database itself, /// including the software version and our schema. fn decode_delete_network_interface_database_error( - err: async_bb8_diesel::PoolError, + err: async_bb8_diesel::ConnectionError, parent_id: Uuid, ) -> DeleteError { use crate::db::error; use async_bb8_diesel::ConnectionError; - use async_bb8_diesel::PoolError; use diesel::result::DatabaseErrorKind; use diesel::result::Error; @@ -1623,8 +1627,9 @@ fn decode_delete_network_interface_database_error( // first CTE, which generates a UUID parsing error if we're trying to // delete the primary interface, and the instance also has one or more // secondaries. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::Unknown, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, )) if info.message() == HAS_SECONDARIES_ERROR_MESSAGE => { DeleteError::SecondariesExist(parent_id) } @@ -1632,22 +1637,24 @@ fn decode_delete_network_interface_database_error( // This catches the UUID-cast failure intentionally introduced by // `push_instance_state_verification_subquery`, which verifies that // the instance can be worked on when running this query. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::Unknown, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, )) if info.message() == INSTANCE_BAD_STATE_ERROR_MESSAGE => { DeleteError::InstanceBadState(parent_id) } // This catches the UUID-cast failure intentionally introduced by // `push_instance_state_verification_subquery`, which verifies that // the instance doesn't even exist when running this query. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::Unknown, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, )) if info.message() == NO_INSTANCE_ERROR_MESSAGE => { DeleteError::InstanceNotFound(parent_id) } // Any other error at all is a bug - _ => DeleteError::External(error::public_error_from_diesel_pool( + _ => DeleteError::External(error::public_error_from_diesel( err, error::ErrorHandler::Server, )), @@ -1883,16 +1890,18 @@ mod tests { db_datastore.project_create(&opctx, project).await.unwrap(); use crate::db::schema::vpc_subnet::dsl::vpc_subnet; - let p = db_datastore.pool_authorized(&opctx).await.unwrap(); + let conn = + db_datastore.pool_connection_authorized(&opctx).await.unwrap(); let net1 = Network::new(n_subnets); let net2 = Network::new(n_subnets); for subnet in net1.subnets.iter().chain(net2.subnets.iter()) { diesel::insert_into(vpc_subnet) .values(subnet.clone()) - .execute_async(p) + .execute_async(&*conn) .await .unwrap(); } + drop(conn); Self { logctx, opctx, diff --git a/nexus/db-queries/src/db/queries/next_item.rs b/nexus/db-queries/src/db/queries/next_item.rs index 3ba09788a0..007aec943d 100644 --- a/nexus/db-queries/src/db/queries/next_item.rs +++ b/nexus/db-queries/src/db/queries/next_item.rs @@ -593,6 +593,7 @@ mod tests { let mut db = test_setup_database(&log).await; let cfg = crate::db::Config { url: db.pg_config().clone() }; let pool = Arc::new(crate::db::Pool::new(&logctx.log, &cfg)); + let conn = pool.pool().get().await.unwrap(); // We're going to operate on a separate table, for simplicity. setup_test_schema(&pool).await; @@ -607,7 +608,7 @@ mod tests { let it = diesel::insert_into(item::dsl::item) .values(query) .returning(Item::as_returning()) - .get_result_async(pool.pool()) + .get_result_async(&*conn) .await .unwrap(); assert_eq!(it.value, 0); @@ -616,7 +617,7 @@ mod tests { let it = diesel::insert_into(item::dsl::item) .values(query) .returning(Item::as_returning()) - .get_result_async(pool.pool()) + .get_result_async(&*conn) .await .unwrap(); assert_eq!(it.value, 1); @@ -628,7 +629,7 @@ mod tests { let it = diesel::insert_into(item::dsl::item) .values(query) .returning(Item::as_returning()) - .get_result_async(pool.pool()) + .get_result_async(&*conn) .await .unwrap(); assert_eq!(it.value, 10); @@ -638,7 +639,7 @@ mod tests { let it = diesel::insert_into(item::dsl::item) .values(query) .returning(Item::as_returning()) - .get_result_async(pool.pool()) + .get_result_async(&*conn) .await .unwrap(); assert_eq!(it.value, 2); diff --git a/nexus/db-queries/src/db/queries/region_allocation.rs b/nexus/db-queries/src/db/queries/region_allocation.rs index 674a525c5c..b071ee3f44 100644 --- a/nexus/db-queries/src/db/queries/region_allocation.rs +++ b/nexus/db-queries/src/db/queries/region_allocation.rs @@ -36,7 +36,7 @@ const NOT_ENOUGH_UNIQUE_ZPOOLS_SENTINEL: &'static str = /// Translates a generic pool error to an external error based /// on messages which may be emitted during region provisioning. -pub fn from_pool(e: async_bb8_diesel::PoolError) -> external::Error { +pub fn from_diesel(e: async_bb8_diesel::ConnectionError) -> external::Error { use crate::db::error; let sentinels = [ @@ -66,7 +66,7 @@ pub fn from_pool(e: async_bb8_diesel::PoolError) -> external::Error { } } - error::public_error_from_diesel_pool(e, error::ErrorHandler::Server) + error::public_error_from_diesel(e, error::ErrorHandler::Server) } /// A subquery to find all old regions associated with a particular volume. diff --git a/nexus/db-queries/src/db/queries/vpc_subnet.rs b/nexus/db-queries/src/db/queries/vpc_subnet.rs index 78da549620..bbb229da1e 100644 --- a/nexus/db-queries/src/db/queries/vpc_subnet.rs +++ b/nexus/db-queries/src/db/queries/vpc_subnet.rs @@ -28,13 +28,12 @@ pub enum SubnetError { impl SubnetError { /// Construct a `SubnetError` from a Diesel error, catching the desired /// cases and building useful errors. - pub fn from_pool( - e: async_bb8_diesel::PoolError, + pub fn from_diesel( + e: async_bb8_diesel::ConnectionError, subnet: &VpcSubnet, ) -> Self { use crate::db::error; use async_bb8_diesel::ConnectionError; - use async_bb8_diesel::PoolError; use diesel::result::DatabaseErrorKind; use diesel::result::Error; const IPV4_OVERLAP_ERROR_MESSAGE: &str = @@ -44,33 +43,27 @@ impl SubnetError { const NAME_CONFLICT_CONSTRAINT: &str = "vpc_subnet_vpc_id_name_key"; match e { // Attempt to insert overlapping IPv4 subnet - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError( - DatabaseErrorKind::NotNullViolation, - ref info, - ), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::NotNullViolation, + ref info, )) if info.message() == IPV4_OVERLAP_ERROR_MESSAGE => { SubnetError::OverlappingIpRange(subnet.ipv4_block.0 .0.into()) } // Attempt to insert overlapping IPv6 subnet - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError( - DatabaseErrorKind::NotNullViolation, - ref info, - ), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::NotNullViolation, + ref info, )) if info.message() == IPV6_OVERLAP_ERROR_MESSAGE => { SubnetError::OverlappingIpRange(subnet.ipv6_block.0 .0.into()) } // Conflicting name for the subnet within a VPC - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError( - DatabaseErrorKind::UniqueViolation, - ref info, - ), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::UniqueViolation, + ref info, )) if info.constraint_name() == Some(NAME_CONFLICT_CONSTRAINT) => { - SubnetError::External(error::public_error_from_diesel_pool( + SubnetError::External(error::public_error_from_diesel( e, error::ErrorHandler::Conflict( external::ResourceType::VpcSubnet, @@ -80,7 +73,7 @@ impl SubnetError { } // Any other error at all is a bug - _ => SubnetError::External(error::public_error_from_diesel_pool( + _ => SubnetError::External(error::public_error_from_diesel( e, error::ErrorHandler::Server, )), diff --git a/nexus/db-queries/src/db/true_or_cast_error.rs b/nexus/db-queries/src/db/true_or_cast_error.rs index 6f14cd4642..e04d865182 100644 --- a/nexus/db-queries/src/db/true_or_cast_error.rs +++ b/nexus/db-queries/src/db/true_or_cast_error.rs @@ -77,11 +77,10 @@ where /// Returns one of the sentinels if it matches the expected value from /// a [`TrueOrCastError`]. pub fn matches_sentinel( - e: &async_bb8_diesel::PoolError, + e: &async_bb8_diesel::ConnectionError, sentinels: &[&'static str], ) -> Option<&'static str> { use async_bb8_diesel::ConnectionError; - use async_bb8_diesel::PoolError; use diesel::result::DatabaseErrorKind; use diesel::result::Error; @@ -94,8 +93,9 @@ pub fn matches_sentinel( match e { // Catch the specific errors designed to communicate the failures we // want to distinguish. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::Unknown, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, )) => { for sentinel in sentinels { if info.message() == bool_parse_error(sentinel) { diff --git a/nexus/db-queries/src/db/update_and_check.rs b/nexus/db-queries/src/db/update_and_check.rs index 8c7845b61b..96cb3e4c79 100644 --- a/nexus/db-queries/src/db/update_and_check.rs +++ b/nexus/db-queries/src/db/update_and_check.rs @@ -5,7 +5,7 @@ //! CTE implementation for "UPDATE with extended return status". use super::pool::DbConnection; -use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; +use async_bb8_diesel::AsyncRunQueryDsl; use diesel::associations::HasTable; use diesel::pg::Pg; use diesel::prelude::*; @@ -153,16 +153,13 @@ where /// - Ok(Row exists and was updated) /// - Ok(Row exists, but was not updated) /// - Error (row doesn't exist, or other diesel error) - pub async fn execute_and_check( + pub async fn execute_and_check( self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), - ) -> Result, PoolError> + conn: &async_bb8_diesel::Connection, + ) -> Result, async_bb8_diesel::ConnectionError> where // We require this bound to ensure that "Self" is runnable as query. Self: LoadQuery<'static, DbConnection, (Option, Option, Q)>, - ConnErr: From + Send + 'static, - PoolError: From, { let (id0, id1, found) = self.get_result_async::<(Option, Option, Q)>(conn).await?; diff --git a/nexus/src/app/background/dns_config.rs b/nexus/src/app/background/dns_config.rs index c0aaa267a2..654e9c0bf1 100644 --- a/nexus/src/app/background/dns_config.rs +++ b/nexus/src/app/background/dns_config.rs @@ -220,7 +220,9 @@ mod test { { use nexus_db_queries::db::schema::dns_version::dsl; diesel::delete(dsl::dns_version.filter(dsl::version.eq(2))) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } @@ -236,7 +238,7 @@ mod test { // Similarly, wipe all of the state and verify that we handle that okay. datastore - .pool_for_tests() + .pool_connection_for_tests() .await .unwrap() .transaction_async(|conn| async move { diff --git a/nexus/src/app/background/dns_servers.rs b/nexus/src/app/background/dns_servers.rs index 419b94d360..3a75c09302 100644 --- a/nexus/src/app/background/dns_servers.rs +++ b/nexus/src/app/background/dns_servers.rs @@ -237,7 +237,9 @@ mod test { SocketAddrV6::new(Ipv6Addr::LOCALHOST, 1, 0, 0), ServiceKind::InternalDns, )) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } @@ -265,7 +267,9 @@ mod test { diesel::insert_into(dsl::service) .values(new_services) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } @@ -281,7 +285,9 @@ mod test { diesel::delete( dsl::service.filter(dsl::kind.eq(ServiceKind::InternalDns)), ) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } diff --git a/nexus/src/app/background/init.rs b/nexus/src/app/background/init.rs index 5d1568bcb5..aa949bbc9f 100644 --- a/nexus/src/app/background/init.rs +++ b/nexus/src/app/background/init.rs @@ -370,7 +370,7 @@ pub mod test { ) { type TxnError = TransactionError<()>; { - let conn = datastore.pool_for_tests().await.unwrap(); + let conn = datastore.pool_connection_for_tests().await.unwrap(); let _: Result<(), TxnError> = conn .transaction_async(|conn| async move { { diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 8e2e1d0a04..cca36cefa7 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -921,7 +921,9 @@ pub(crate) mod test { dsl::disk .filter(dsl::time_deleted.is_null()) .select(Disk::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .optional() .unwrap() @@ -935,7 +937,9 @@ pub(crate) mod test { dsl::volume .filter(dsl::time_deleted.is_null()) .select(Volume::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .optional() .unwrap() @@ -951,7 +955,7 @@ pub(crate) mod test { dsl::virtual_provisioning_resource .select(VirtualProvisioningResource::as_select()) .first_async::( - datastore.pool_for_tests().await.unwrap(), + &*datastore.pool_connection_for_tests().await.unwrap(), ) .await .optional() @@ -966,7 +970,7 @@ pub(crate) mod test { use nexus_db_queries::db::schema::virtual_provisioning_collection::dsl; datastore - .pool_for_tests() + .pool_connection_for_tests() .await .unwrap() .transaction_async(|conn| async move { diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index d5af080381..6fc93ce8db 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -1467,7 +1467,9 @@ pub mod test { dsl::instance .filter(dsl::time_deleted.is_null()) .select(Instance::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .optional() .unwrap() @@ -1484,7 +1486,7 @@ pub mod test { .filter(dsl::kind.eq(NetworkInterfaceKind::Instance)) .select(NetworkInterface::as_select()) .first_async::( - datastore.pool_for_tests().await.unwrap(), + &*datastore.pool_connection_for_tests().await.unwrap(), ) .await .optional() @@ -1501,7 +1503,7 @@ pub mod test { .filter(dsl::is_service.eq(false)) .select(ExternalIp::as_select()) .first_async::( - datastore.pool_for_tests().await.unwrap(), + &*datastore.pool_connection_for_tests().await.unwrap(), ) .await .optional() @@ -1516,7 +1518,7 @@ pub mod test { use nexus_db_queries::db::schema::sled_resource::dsl; datastore - .pool_for_tests() + .pool_connection_for_tests() .await .unwrap() .transaction_async(|conn| async move { @@ -1550,7 +1552,7 @@ pub mod test { use nexus_db_queries::db::model::VirtualProvisioningResource; use nexus_db_queries::db::schema::virtual_provisioning_resource::dsl; - datastore.pool_for_tests() + datastore.pool_connection_for_tests() .await .unwrap() .transaction_async(|conn| async move { @@ -1578,7 +1580,7 @@ pub mod test { use nexus_db_queries::db::schema::virtual_provisioning_collection::dsl; datastore - .pool_for_tests() + .pool_connection_for_tests() .await .unwrap() .transaction_async(|conn| async move { @@ -1615,7 +1617,9 @@ pub mod test { .filter(dsl::time_deleted.is_null()) .filter(dsl::name.eq(DISK_NAME)) .select(Disk::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap() .runtime_state diff --git a/nexus/src/app/sagas/project_create.rs b/nexus/src/app/sagas/project_create.rs index 65efabd8e9..1cbf9070ee 100644 --- a/nexus/src/app/sagas/project_create.rs +++ b/nexus/src/app/sagas/project_create.rs @@ -213,7 +213,9 @@ mod test { // ignore built-in services project .filter(dsl::id.ne(*SERVICES_PROJECT_ID)) .select(Project::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .optional() .unwrap() @@ -230,7 +232,7 @@ mod test { use nexus_db_queries::db::model::VirtualProvisioningCollection; use nexus_db_queries::db::schema::virtual_provisioning_collection::dsl; - datastore.pool_for_tests() + datastore.pool_connection_for_tests() .await .unwrap() .transaction_async(|conn| async move { diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index bcebd17021..b27f4a3a9b 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -1885,7 +1885,9 @@ mod test { dsl::snapshot .filter(dsl::time_deleted.is_null()) .select(Snapshot::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .optional() .unwrap() @@ -1899,7 +1901,7 @@ mod test { dsl::region_snapshot .select(RegionSnapshot::as_select()) .first_async::( - datastore.pool_for_tests().await.unwrap(), + &*datastore.pool_connection_for_tests().await.unwrap(), ) .await .optional() diff --git a/nexus/src/app/sagas/test_helpers.rs b/nexus/src/app/sagas/test_helpers.rs index 29f743a350..aa9334b682 100644 --- a/nexus/src/app/sagas/test_helpers.rs +++ b/nexus/src/app/sagas/test_helpers.rs @@ -398,7 +398,7 @@ pub(crate) async fn assert_no_failed_undo_steps( use nexus_db_queries::db::model::saga_types::SagaNodeEvent; let saga_node_events: Vec = datastore - .pool_for_tests() + .pool_connection_for_tests() .await .unwrap() .transaction_async(|conn| async move { diff --git a/nexus/src/app/sagas/vpc_create.rs b/nexus/src/app/sagas/vpc_create.rs index 97961a6fa1..85eed6616d 100644 --- a/nexus/src/app/sagas/vpc_create.rs +++ b/nexus/src/app/sagas/vpc_create.rs @@ -599,7 +599,9 @@ pub(crate) mod test { // ignore built-in services VPC .filter(dsl::id.ne(*SERVICES_VPC_ID)) .select(Vpc::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .optional() .unwrap() @@ -618,7 +620,9 @@ pub(crate) mod test { // ignore built-in services VPC .filter(dsl::vpc_id.ne(*SERVICES_VPC_ID)) .select(VpcRouter::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .optional() .unwrap() @@ -646,7 +650,7 @@ pub(crate) mod test { ) .select(RouterRoute::as_select()) .first_async::( - datastore.pool_for_tests().await.unwrap(), + &*datastore.pool_connection_for_tests().await.unwrap(), ) .await .optional() @@ -666,7 +670,9 @@ pub(crate) mod test { // ignore built-in services VPC .filter(dsl::vpc_id.ne(*SERVICES_VPC_ID)) .select(VpcSubnet::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .optional() .unwrap() @@ -686,7 +692,7 @@ pub(crate) mod test { .filter(dsl::vpc_id.ne(*SERVICES_VPC_ID)) .select(VpcFirewallRule::as_select()) .first_async::( - datastore.pool_for_tests().await.unwrap(), + &*datastore.pool_connection_for_tests().await.unwrap(), ) .await .optional() From 8a11c709b49eed5bbe980d2f3e69ddf115278914 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 29 Sep 2023 14:28:35 -0700 Subject: [PATCH 04/35] [schema] Add more strict CRDB index comparison (#4152) Adds more schema comparison for DB schemas, and adds a couple regression tests for the specific index mismatch. Fixes #4143 --- nexus/tests/integration_tests/schema.rs | 174 +++++++++++++++++++++++- 1 file changed, 172 insertions(+), 2 deletions(-) diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index 67dfa6c255..49abf67cc8 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -15,7 +15,7 @@ use omicron_common::api::internal::shared::SwitchLocation; use omicron_common::nexus_config::Config; use omicron_common::nexus_config::SchemaConfig; use omicron_test_utils::dev::db::CockroachInstance; -use pretty_assertions::assert_eq; +use pretty_assertions::{assert_eq, assert_ne}; use similar_asserts; use slog::Logger; use std::collections::{BTreeMap, BTreeSet}; @@ -132,6 +132,7 @@ enum AnySqlType { String(String), Bool(bool), Uuid(Uuid), + Int8(i64), // TODO: This isn't exhaustive, feel free to add more. // // These should only be necessary for rows where the database schema changes also choose to @@ -167,6 +168,9 @@ impl<'a> tokio_postgres::types::FromSql<'a> for AnySqlType { if Uuid::accepts(ty) { return Ok(AnySqlType::Uuid(Uuid::from_sql(ty, raw)?)); } + if i64::accepts(ty) { + return Ok(AnySqlType::Int8(i64::from_sql(ty, raw)?)); + } Err(anyhow::anyhow!( "Cannot parse type {ty}. If you're trying to use this type in a table which is populated \ during a schema migration, consider adding it to `AnySqlType`." @@ -432,6 +436,16 @@ const CHECK_CONSTRAINTS: [&'static str; 4] = [ "check_clause", ]; +const CONSTRAINT_COLUMN_USAGE: [&'static str; 7] = [ + "table_catalog", + "table_schema", + "table_name", + "column_name", + "constraint_catalog", + "constraint_schema", + "constraint_name", +]; + const KEY_COLUMN_USAGE: [&'static str; 7] = [ "constraint_catalog", "constraint_schema", @@ -456,29 +470,50 @@ const REFERENTIAL_CONSTRAINTS: [&'static str; 8] = [ const VIEWS: [&'static str; 4] = ["table_catalog", "table_schema", "table_name", "view_definition"]; -const STATISTICS: [&'static str; 8] = [ +const STATISTICS: [&'static str; 11] = [ "table_catalog", "table_schema", "table_name", "non_unique", "index_schema", "index_name", + "seq_in_index", "column_name", "direction", + "storing", + "implicit", ]; +const PG_INDEXES: [&'static str; 5] = + ["schemaname", "tablename", "indexname", "tablespace", "indexdef"]; + const TABLES: [&'static str; 4] = ["table_catalog", "table_schema", "table_name", "table_type"]; +const TABLE_CONSTRAINTS: [&'static str; 9] = [ + "constraint_catalog", + "constraint_schema", + "constraint_name", + "table_catalog", + "table_schema", + "table_name", + "constraint_type", + "is_deferrable", + "initially_deferred", +]; + #[derive(Eq, PartialEq, Debug)] struct InformationSchema { columns: Vec, check_constraints: Vec, + constraint_column_usage: Vec, key_column_usage: Vec, referential_constraints: Vec, views: Vec, statistics: Vec, + pg_indexes: Vec, tables: Vec, + table_constraints: Vec, } impl InformationSchema { @@ -490,6 +525,10 @@ impl InformationSchema { self.check_constraints, other.check_constraints ); + similar_asserts::assert_eq!( + self.constraint_column_usage, + other.constraint_column_usage + ); similar_asserts::assert_eq!( self.key_column_usage, other.key_column_usage @@ -500,7 +539,12 @@ impl InformationSchema { ); similar_asserts::assert_eq!(self.views, other.views); similar_asserts::assert_eq!(self.statistics, other.statistics); + similar_asserts::assert_eq!(self.pg_indexes, other.pg_indexes); similar_asserts::assert_eq!(self.tables, other.tables); + similar_asserts::assert_eq!( + self.table_constraints, + other.table_constraints + ); } async fn new(crdb: &CockroachInstance) -> Self { @@ -524,6 +568,14 @@ impl InformationSchema { ) .await; + let constraint_column_usage = query_crdb_for_rows_of_strings( + crdb, + CONSTRAINT_COLUMN_USAGE.as_slice().into(), + "information_schema.constraint_column_usage", + None, + ) + .await; + let key_column_usage = query_crdb_for_rows_of_strings( crdb, KEY_COLUMN_USAGE.as_slice().into(), @@ -556,6 +608,14 @@ impl InformationSchema { ) .await; + let pg_indexes = query_crdb_for_rows_of_strings( + crdb, + PG_INDEXES.as_slice().into(), + "pg_indexes", + Some("schemaname = 'public'"), + ) + .await; + let tables = query_crdb_for_rows_of_strings( crdb, TABLES.as_slice().into(), @@ -564,14 +624,25 @@ impl InformationSchema { ) .await; + let table_constraints = query_crdb_for_rows_of_strings( + crdb, + TABLE_CONSTRAINTS.as_slice().into(), + "information_schema.table_constraints", + Some("table_schema = 'public'"), + ) + .await; + Self { columns, check_constraints, + constraint_column_usage, key_column_usage, referential_constraints, views, statistics, + pg_indexes, tables, + table_constraints, } } @@ -659,3 +730,102 @@ async fn dbinit_equals_sum_of_all_up() { crdb.cleanup().await.unwrap(); logctx.cleanup_successful(); } + +// Returns the InformationSchema object for a database populated via `sql`. +async fn get_information_schema(log: &Logger, sql: &str) -> InformationSchema { + let populate = false; + let mut crdb = test_setup_just_crdb(&log, populate).await; + + let client = crdb.connect().await.expect("failed to connect"); + client.batch_execute(sql).await.expect("failed to apply SQL"); + + let observed_schema = InformationSchema::new(&crdb).await; + crdb.cleanup().await.unwrap(); + observed_schema +} + +// Reproduction case for https://github.com/oxidecomputer/omicron/issues/4143 +#[tokio::test] +async fn compare_index_creation_differing_where_clause() { + let config = load_test_config(); + let logctx = LogContext::new( + "compare_index_creation_differing_where_clause", + &config.pkg.log, + ); + let log = &logctx.log; + + let schema1 = get_information_schema(log, " + CREATE DATABASE omicron; + CREATE TABLE omicron.public.animal ( + id UUID PRIMARY KEY, + name TEXT, + time_deleted TIMESTAMPTZ + ); + + CREATE INDEX IF NOT EXISTS lookup_animal_by_name ON omicron.public.animal ( + name, id + ) WHERE name IS NOT NULL AND time_deleted IS NULL; + ").await; + + let schema2 = get_information_schema(log, " + CREATE DATABASE omicron; + CREATE TABLE omicron.public.animal ( + id UUID PRIMARY KEY, + name TEXT, + time_deleted TIMESTAMPTZ + ); + + CREATE INDEX IF NOT EXISTS lookup_animal_by_name ON omicron.public.animal ( + name, id + ) WHERE time_deleted IS NULL; + ").await; + + // pg_indexes includes a column "indexdef" that compares partial indexes. + // This should catch the differing "WHERE" clause. + assert_ne!(schema1.pg_indexes, schema2.pg_indexes); + + logctx.cleanup_successful(); +} + +// Reproduction case for https://github.com/oxidecomputer/omicron/issues/4143 +#[tokio::test] +async fn compare_index_creation_differing_columns() { + let config = load_test_config(); + let logctx = LogContext::new( + "compare_index_creation_differing_columns", + &config.pkg.log, + ); + let log = &logctx.log; + + let schema1 = get_information_schema(log, " + CREATE DATABASE omicron; + CREATE TABLE omicron.public.animal ( + id UUID PRIMARY KEY, + name TEXT, + time_deleted TIMESTAMPTZ + ); + + CREATE INDEX IF NOT EXISTS lookup_animal_by_name ON omicron.public.animal ( + name + ) WHERE name IS NOT NULL AND time_deleted IS NULL; + ").await; + + let schema2 = get_information_schema(log, " + CREATE DATABASE omicron; + CREATE TABLE omicron.public.animal ( + id UUID PRIMARY KEY, + name TEXT, + time_deleted TIMESTAMPTZ + ); + + CREATE INDEX IF NOT EXISTS lookup_animal_by_name ON omicron.public.animal ( + name, id + ) WHERE name IS NOT NULL AND time_deleted IS NULL; + ").await; + + // "statistics" identifies table indices. + // These tables should differ in the "implicit" column. + assert_ne!(schema1.statistics, schema2.statistics); + + logctx.cleanup_successful(); +} From d18ad53df61334dfe83017278c5326b0c810ebda Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Mon, 2 Oct 2023 06:25:05 -0700 Subject: [PATCH 05/35] Update for RoT staging/dev 1.0.2 (#4167) --- .github/buildomat/jobs/tuf-repo.sh | 2 +- tools/dvt_dock_version | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/buildomat/jobs/tuf-repo.sh b/.github/buildomat/jobs/tuf-repo.sh index a06468c6b2..fab6770564 100644 --- a/.github/buildomat/jobs/tuf-repo.sh +++ b/.github/buildomat/jobs/tuf-repo.sh @@ -218,7 +218,7 @@ EOF done } # usage: SERIES ROT_DIR ROT_VERSION BOARDS... -add_hubris_artifacts rot-staging-dev staging/dev cert-staging-dev-v1.0.0 "${ALL_BOARDS[@]}" +add_hubris_artifacts rot-staging-dev staging/dev cert-staging-dev-v1.0.2 "${ALL_BOARDS[@]}" add_hubris_artifacts rot-prod-rel prod/rel cert-prod-rel-v1.0.0 "${ALL_BOARDS[@]}" for series in "${SERIES_LIST[@]}"; do diff --git a/tools/dvt_dock_version b/tools/dvt_dock_version index d7c2d31948..e2151b846f 100644 --- a/tools/dvt_dock_version +++ b/tools/dvt_dock_version @@ -1 +1 @@ -COMMIT=3cc151e62af190062780389eeae78937c3041021 +COMMIT=65f1979c1d3f4d0874a64144941cc41b46a70c80 From 76464a18f12a321ad2f0f40d597057e168d3ce50 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 2 Oct 2023 09:17:51 -0700 Subject: [PATCH 06/35] [schema] More schema comparisons (sequences, views, constraints) (#4153) Builds on https://github.com/oxidecomputer/omicron/pull/4152 , and compares more of the CRDB schema. This PR: - Compares sequences (even though we don't have any in Omicron yet) and adds a test for them - Adds a test to compare views - Adds a test to compare constraints --- nexus/tests/integration_tests/schema.rs | 153 ++++++++++++++++++++++++ 1 file changed, 153 insertions(+) diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index 49abf67cc8..2c62f156e1 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -484,6 +484,21 @@ const STATISTICS: [&'static str; 11] = [ "implicit", ]; +const SEQUENCES: [&'static str; 12] = [ + "sequence_catalog", + "sequence_schema", + "sequence_name", + "data_type", + "numeric_precision", + "numeric_precision_radix", + "numeric_scale", + "start_value", + "minimum_value", + "maximum_value", + "increment", + "cycle_option", +]; + const PG_INDEXES: [&'static str; 5] = ["schemaname", "tablename", "indexname", "tablespace", "indexdef"]; @@ -511,6 +526,7 @@ struct InformationSchema { referential_constraints: Vec, views: Vec, statistics: Vec, + sequences: Vec, pg_indexes: Vec, tables: Vec, table_constraints: Vec, @@ -539,6 +555,7 @@ impl InformationSchema { ); similar_asserts::assert_eq!(self.views, other.views); similar_asserts::assert_eq!(self.statistics, other.statistics); + similar_asserts::assert_eq!(self.sequences, other.sequences); similar_asserts::assert_eq!(self.pg_indexes, other.pg_indexes); similar_asserts::assert_eq!(self.tables, other.tables); similar_asserts::assert_eq!( @@ -608,6 +625,14 @@ impl InformationSchema { ) .await; + let sequences = query_crdb_for_rows_of_strings( + crdb, + SEQUENCES.as_slice().into(), + "information_schema.sequences", + None, + ) + .await; + let pg_indexes = query_crdb_for_rows_of_strings( crdb, PG_INDEXES.as_slice().into(), @@ -640,6 +665,7 @@ impl InformationSchema { referential_constraints, views, statistics, + sequences, pg_indexes, tables, table_constraints, @@ -829,3 +855,130 @@ async fn compare_index_creation_differing_columns() { logctx.cleanup_successful(); } + +#[tokio::test] +async fn compare_view_differing_where_clause() { + let config = load_test_config(); + let logctx = + LogContext::new("compare_view_differing_where_clause", &config.pkg.log); + let log = &logctx.log; + + let schema1 = get_information_schema( + log, + " + CREATE DATABASE omicron; + CREATE TABLE omicron.public.animal ( + id UUID PRIMARY KEY, + name TEXT, + time_deleted TIMESTAMPTZ + ); + + CREATE VIEW live_view AS + SELECT animal.id, animal.name + FROM omicron.public.animal + WHERE animal.time_deleted IS NOT NULL; + ", + ) + .await; + + let schema2 = get_information_schema( + log, + " + CREATE DATABASE omicron; + CREATE TABLE omicron.public.animal ( + id UUID PRIMARY KEY, + name TEXT, + time_deleted TIMESTAMPTZ + ); + + CREATE VIEW live_view AS + SELECT animal.id, animal.name + FROM omicron.public.animal + WHERE animal.time_deleted IS NOT NULL AND animal.name = 'Thomas'; + ", + ) + .await; + + assert_ne!(schema1.views, schema2.views); + + logctx.cleanup_successful(); +} + +#[tokio::test] +async fn compare_sequence_differing_increment() { + let config = load_test_config(); + let logctx = LogContext::new( + "compare_sequence_differing_increment", + &config.pkg.log, + ); + let log = &logctx.log; + + let schema1 = get_information_schema( + log, + " + CREATE DATABASE omicron; + CREATE SEQUENCE omicron.public.myseq START 1 INCREMENT 1; + ", + ) + .await; + + let schema2 = get_information_schema( + log, + " + CREATE DATABASE omicron; + CREATE SEQUENCE omicron.public.myseq START 1 INCREMENT 2; + ", + ) + .await; + + assert_ne!(schema1.sequences, schema2.sequences); + + logctx.cleanup_successful(); +} + +#[tokio::test] +async fn compare_table_differing_constraint() { + let config = load_test_config(); + let logctx = + LogContext::new("compare_table_differing_constraint", &config.pkg.log); + let log = &logctx.log; + + let schema1 = get_information_schema( + log, + " + CREATE DATABASE omicron; + CREATE TABLE omicron.public.animal ( + id UUID PRIMARY KEY, + name TEXT, + time_deleted TIMESTAMPTZ, + + CONSTRAINT dead_animals_have_names CHECK ( + (time_deleted IS NULL) OR + (name IS NOT NULL) + ) + ); + ", + ) + .await; + + let schema2 = get_information_schema( + log, + " + CREATE DATABASE omicron; + CREATE TABLE omicron.public.animal ( + id UUID PRIMARY KEY, + name TEXT, + time_deleted TIMESTAMPTZ, + + CONSTRAINT dead_animals_have_names CHECK ( + (time_deleted IS NULL) OR + (name IS NULL) + ) + ); + ", + ) + .await; + + assert_ne!(schema1.check_constraints, schema2.check_constraints); + logctx.cleanup_successful(); +} From 777277d073d92333f94deee3da68149705c18a78 Mon Sep 17 00:00:00 2001 From: Adam Leventhal Date: Mon, 2 Oct 2023 10:26:11 -0700 Subject: [PATCH 07/35] we don't need workspace deps for dev-tools (and omdb is stale) (#4170) We don't seem to need the `omicron-dev` dependency in `Cargo.toml` and the `omdb` one refers to its old location. --- Cargo.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0e194394f9..63d8e0b2d6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -236,10 +236,8 @@ nexus-types = { path = "nexus/types" } num-integer = "0.1.45" num = { version = "0.4.1", default-features = false, features = [ "libm" ] } omicron-common = { path = "common" } -omicron-dev = { path = "dev-tools/omicron-dev" } omicron-gateway = { path = "gateway" } omicron-nexus = { path = "nexus" } -omicron-omdb = { path = "omdb" } omicron-package = { path = "package" } omicron-rpaths = { path = "rpaths" } omicron-sled-agent = { path = "sled-agent" } From 1c0553a682f21448755c700548107da4367d2443 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 2 Oct 2023 11:01:14 -0700 Subject: [PATCH 08/35] [omicron-package] Retry failed downloads automatically (#4168) - Automatically retries downloads, with a small amount of constant backoff - Provides config options for users to customize these retry attempts / backoffs - Extends progress bars to show when download failures occur Tested manually, by running: ```bash cargo run --bin=omicron-package -- --retry-count 5 package ``` Then cycling my machine's network connectivity. I observed the "Failed to download prebuilt messages", saw the attempts ticking down, and then reconnected. Once connectivity was restored, the download succeeded. Fixes https://github.com/oxidecomputer/omicron/issues/4165 --- package/src/bin/omicron-package.rs | 163 ++++++++++++++++++++++------- 1 file changed, 125 insertions(+), 38 deletions(-) diff --git a/package/src/bin/omicron-package.rs b/package/src/bin/omicron-package.rs index a0146eee50..ea490e54cf 100644 --- a/package/src/bin/omicron-package.rs +++ b/package/src/bin/omicron-package.rs @@ -41,6 +41,11 @@ enum SubCommand { Deploy(DeployCommand), } +fn parse_duration_ms(arg: &str) -> Result { + let ms = arg.parse()?; + Ok(std::time::Duration::from_millis(ms)) +} + #[derive(Debug, Parser)] #[clap(name = "packaging tool")] struct Args { @@ -77,6 +82,23 @@ struct Args { )] force: bool, + #[clap( + long, + help = "Number of retries to use when re-attempting failed package downloads", + action, + default_value_t = 10 + )] + retry_count: usize, + + #[clap( + long, + help = "Duration, in ms, to wait before re-attempting failed package downloads", + action, + value_parser = parse_duration_ms, + default_value = "1000", + )] + retry_duration: std::time::Duration, + #[clap(subcommand)] subcommand: SubCommand, } @@ -303,8 +325,63 @@ async fn get_sha256_digest(path: &PathBuf) -> Result { Ok(context.finish()) } +async fn download_prebuilt( + progress: &PackageProgress, + package_name: &str, + repo: &str, + commit: &str, + expected_digest: &Vec, + path: &Path, +) -> Result<()> { + progress.set_message("downloading prebuilt".into()); + let url = format!( + "https://buildomat.eng.oxide.computer/public/file/oxidecomputer/{}/image/{}/{}", + repo, + commit, + path.file_name().unwrap().to_string_lossy(), + ); + let response = reqwest::Client::new() + .get(&url) + .send() + .await + .with_context(|| format!("failed to get {url}"))?; + progress.set_length( + response + .content_length() + .ok_or_else(|| anyhow!("Missing Content Length"))?, + ); + let mut file = tokio::fs::File::create(&path) + .await + .with_context(|| format!("failed to create {path:?}"))?; + let mut stream = response.bytes_stream(); + let mut context = DigestContext::new(&SHA256); + while let Some(chunk) = stream.next().await { + let chunk = chunk + .with_context(|| format!("failed reading response from {url}"))?; + // Update the running SHA digest + context.update(&chunk); + // Update the downloaded file + file.write_all(&chunk) + .await + .with_context(|| format!("failed writing {path:?}"))?; + // Record progress in the UI + progress.increment(chunk.len().try_into().unwrap()); + } + + let digest = context.finish(); + if digest.as_ref() != expected_digest { + bail!( + "Digest mismatch downloading {package_name}: Saw {}, expected {}", + hex::encode(digest.as_ref()), + hex::encode(expected_digest) + ); + } + Ok(()) +} + // Ensures a package exists, either by creating it or downloading it. async fn get_package( + config: &Config, target: &Target, ui: &Arc, package_name: &String, @@ -328,45 +405,30 @@ async fn get_package( }; if should_download { - progress.set_message("downloading prebuilt".into()); - let url = format!( - "https://buildomat.eng.oxide.computer/public/file/oxidecomputer/{}/image/{}/{}", - repo, - commit, - path.as_path().file_name().unwrap().to_string_lossy(), - ); - let response = reqwest::Client::new() - .get(&url) - .send() - .await - .with_context(|| format!("failed to get {url}"))?; - progress.set_length( - response - .content_length() - .ok_or_else(|| anyhow!("Missing Content Length"))?, - ); - let mut file = tokio::fs::File::create(&path) + let mut attempts_left = config.retry_count + 1; + loop { + match download_prebuilt( + &progress, + package_name, + repo, + commit, + &expected_digest, + path.as_path(), + ) .await - .with_context(|| format!("failed to create {path:?}"))?; - let mut stream = response.bytes_stream(); - let mut context = DigestContext::new(&SHA256); - while let Some(chunk) = stream.next().await { - let chunk = chunk.with_context(|| { - format!("failed reading response from {url}") - })?; - // Update the running SHA digest - context.update(&chunk); - // Update the downloaded file - file.write_all(&chunk) - .await - .with_context(|| format!("failed writing {path:?}"))?; - // Record progress in the UI - progress.increment(chunk.len().try_into().unwrap()); - } - - let digest = context.finish(); - if digest.as_ref() != expected_digest { - bail!("Digest mismatch downloading {package_name}: Saw {}, expected {}", hex::encode(digest.as_ref()), hex::encode(expected_digest)); + { + Ok(()) => break, + Err(err) => { + attempts_left -= 1; + let msg = format!("Failed to download prebuilt ({attempts_left} attempts remaining)"); + progress.set_error_message(msg.into()); + if attempts_left == 0 { + bail!("Failed to download package: {err}"); + } + tokio::time::sleep(config.retry_duration).await; + progress.reset(); + } + } } } } @@ -463,6 +525,7 @@ async fn do_package(config: &Config, output_directory: &Path) -> Result<()> { None, |((package_name, package), ui)| async move { get_package( + &config, &config.target, &ui, package_name, @@ -761,6 +824,13 @@ fn completed_progress_style() -> ProgressStyle { .progress_chars("#>.") } +fn error_progress_style() -> ProgressStyle { + ProgressStyle::default_bar() + .template("[{elapsed_precise}] {bar:40.cyan/blue} {pos:>7}/{len:7} {msg:.red}") + .expect("Invalid template") + .progress_chars("#>.") +} + // Struct managing display of progress to UI. struct ProgressUI { multi: MultiProgress, @@ -782,10 +852,21 @@ impl PackageProgress { fn set_length(&self, total: u64) { self.pb.set_length(total); } + + fn set_error_message(&self, message: std::borrow::Cow<'static, str>) { + self.pb.set_style(error_progress_style()); + self.pb.set_message(format!("{}: {}", self.service_name, message)); + self.pb.tick(); + } + + fn reset(&self) { + self.pb.reset(); + } } impl Progress for PackageProgress { fn set_message(&self, message: std::borrow::Cow<'static, str>) { + self.pb.set_style(in_progress_style()); self.pb.set_message(format!("{}: {}", self.service_name, message)); self.pb.tick(); } @@ -820,6 +901,10 @@ struct Config { target: Target, // True if we should skip confirmations for destructive operations. force: bool, + // Number of times to retry failed downloads. + retry_count: usize, + // Duration to wait before retrying failed downloads. + retry_duration: std::time::Duration, } impl Config { @@ -886,6 +971,8 @@ async fn main() -> Result<()> { package_config, target, force: args.force, + retry_count: args.retry_count, + retry_duration: args.retry_duration, }) }; From e9210fc2d9987fb6a880356a10a8308bc0f81678 Mon Sep 17 00:00:00 2001 From: Rain Date: Mon, 2 Oct 2023 11:13:16 -0700 Subject: [PATCH 09/35] [workspace-hack] record exact versions (#4171) Exact versions work better with dependabot. This can be reverted with renovate if we so choose. --- .config/hakari.toml | 5 +- workspace-hack/Cargo.toml | 446 +++++++++++++++++++------------------- 2 files changed, 226 insertions(+), 225 deletions(-) diff --git a/.config/hakari.toml b/.config/hakari.toml index 9562f92300..62f15df276 100644 --- a/.config/hakari.toml +++ b/.config/hakari.toml @@ -22,8 +22,9 @@ platforms = [ # "x86_64-pc-windows-msvc", ] +# Write out exact versions rather than a semver range. (Defaults to false.) +exact-versions = true + [traversal-excludes] workspace-members = ["xtask"] -# Write out exact versions rather than a semver range. (Defaults to false.) -# exact-versions = true diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index d3e00b1831..820b2d2336 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -14,248 +14,248 @@ publish = false ### BEGIN HAKARI SECTION [dependencies] -anyhow = { version = "1", features = ["backtrace"] } -bit-set = { version = "0.5" } -bit-vec = { version = "0.6" } -bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1" } -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["serde"] } -bitvec = { version = "1" } -bstr-6f8ce4dd05d13bba = { package = "bstr", version = "0.2" } -bstr-dff4ba8e3ae991db = { package = "bstr", version = "1" } -bytes = { version = "1", features = ["serde"] } -chrono = { version = "0.4", features = ["alloc", "serde"] } -cipher = { version = "0.4", default-features = false, features = ["block-padding", "zeroize"] } -clap = { version = "4", features = ["derive", "env", "wrap_help"] } -clap_builder = { version = "4", default-features = false, features = ["color", "env", "std", "suggestions", "usage", "wrap_help"] } -console = { version = "0.15" } -const-oid = { version = "0.9", default-features = false, features = ["db", "std"] } -crossbeam-epoch = { version = "0.9" } -crossbeam-utils = { version = "0.8" } -crypto-common = { version = "0.1", default-features = false, features = ["getrandom", "std"] } -diesel = { version = "2", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } -digest = { version = "0.10", features = ["mac", "oid", "std"] } -either = { version = "1" } -flate2 = { version = "1" } -futures = { version = "0.3" } -futures-channel = { version = "0.3", features = ["sink"] } -futures-core = { version = "0.3" } -futures-io = { version = "0.3", default-features = false, features = ["std"] } -futures-sink = { version = "0.3" } -futures-task = { version = "0.3", default-features = false, features = ["std"] } -futures-util = { version = "0.3", features = ["channel", "io", "sink"] } +anyhow = { version = "1.0.75", features = ["backtrace"] } +bit-set = { version = "0.5.3" } +bit-vec = { version = "0.6.3" } +bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1.3.2" } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["serde"] } +bitvec = { version = "1.0.1" } +bstr-6f8ce4dd05d13bba = { package = "bstr", version = "0.2.17" } +bstr-dff4ba8e3ae991db = { package = "bstr", version = "1.6.0" } +bytes = { version = "1.5.0", features = ["serde"] } +chrono = { version = "0.4.31", features = ["alloc", "serde"] } +cipher = { version = "0.4.4", default-features = false, features = ["block-padding", "zeroize"] } +clap = { version = "4.4.3", features = ["derive", "env", "wrap_help"] } +clap_builder = { version = "4.4.2", default-features = false, features = ["color", "env", "std", "suggestions", "usage", "wrap_help"] } +console = { version = "0.15.7" } +const-oid = { version = "0.9.5", default-features = false, features = ["db", "std"] } +crossbeam-epoch = { version = "0.9.15" } +crossbeam-utils = { version = "0.8.16" } +crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } +diesel = { version = "2.1.1", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } +digest = { version = "0.10.7", features = ["mac", "oid", "std"] } +either = { version = "1.9.0" } +flate2 = { version = "1.0.27" } +futures = { version = "0.3.28" } +futures-channel = { version = "0.3.28", features = ["sink"] } +futures-core = { version = "0.3.28" } +futures-io = { version = "0.3.28", default-features = false, features = ["std"] } +futures-sink = { version = "0.3.28" } +futures-task = { version = "0.3.28", default-features = false, features = ["std"] } +futures-util = { version = "0.3.28", features = ["channel", "io", "sink"] } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "1e180ae55e56bd17af35cb868ffbd18ce487351d", features = ["std"] } -generic-array = { version = "0.14", default-features = false, features = ["more_lengths", "zeroize"] } -getrandom = { version = "0.2", default-features = false, features = ["js", "rdrand", "std"] } -hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14", features = ["raw"] } -hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13" } -hex = { version = "0.4", features = ["serde"] } -hyper = { version = "0.14", features = ["full"] } -indexmap = { version = "2", features = ["serde"] } -inout = { version = "0.1", default-features = false, features = ["std"] } -ipnetwork = { version = "0.20", features = ["schemars"] } -itertools = { version = "0.10" } -lalrpop-util = { version = "0.19" } -lazy_static = { version = "1", default-features = false, features = ["spin_no_std"] } -libc = { version = "0.2", features = ["extra_traits"] } -log = { version = "0.4", default-features = false, features = ["std"] } -managed = { version = "0.8", default-features = false, features = ["alloc", "map"] } -memchr = { version = "2" } -num-bigint = { version = "0.4", features = ["rand"] } -num-integer = { version = "0.1", features = ["i128"] } -num-iter = { version = "0.1", default-features = false, features = ["i128"] } -num-traits = { version = "0.2", features = ["i128", "libm"] } -openapiv3 = { version = "1", default-features = false, features = ["skip_serializing_defaults"] } -petgraph = { version = "0.6", features = ["serde-1"] } -postgres-types = { version = "0.2", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } -ppv-lite86 = { version = "0.2", default-features = false, features = ["simd", "std"] } -predicates = { version = "3" } -rand = { version = "0.8", features = ["min_const_gen"] } -rand_chacha = { version = "0.3" } -regex = { version = "1" } -regex-automata = { version = "0.3", default-features = false, features = ["dfa-onepass", "dfa-search", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } -regex-syntax = { version = "0.7" } -reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls", "stream"] } -ring = { version = "0.16", features = ["std"] } -schemars = { version = "0.8", features = ["bytes", "chrono", "uuid1"] } -semver = { version = "1", features = ["serde"] } -serde = { version = "1", features = ["alloc", "derive", "rc"] } -sha2 = { version = "0.10", features = ["oid"] } -signature = { version = "2", default-features = false, features = ["digest", "rand_core", "std"] } -similar = { version = "2", features = ["inline", "unicode"] } -slog = { version = "2", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } -spin = { version = "0.9" } -string_cache = { version = "0.8" } -subtle = { version = "2" } -syn-dff4ba8e3ae991db = { package = "syn", version = "1", features = ["extra-traits", "fold", "full", "visit"] } -syn-f595c2ba2a3f28df = { package = "syn", version = "2", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } -textwrap = { version = "0.16" } -time = { version = "0.3", features = ["formatting", "local-offset", "macros", "parsing"] } -tokio = { version = "1", features = ["full", "test-util"] } -tokio-postgres = { version = "0.7", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } -tokio-stream = { version = "0.1", features = ["net"] } -toml = { version = "0.7" } -toml_datetime = { version = "0.6", default-features = false, features = ["serde"] } -toml_edit = { version = "0.19", features = ["serde"] } -tracing = { version = "0.1", features = ["log"] } -trust-dns-proto = { version = "0.22" } -unicode-bidi = { version = "0.3" } -unicode-normalization = { version = "0.1" } -usdt = { version = "0.3" } -uuid = { version = "1", features = ["serde", "v4"] } -yasna = { version = "0.5", features = ["bit-vec", "num-bigint", "std", "time"] } -zeroize = { version = "1", features = ["std", "zeroize_derive"] } -zip = { version = "0.6", default-features = false, features = ["bzip2", "deflate"] } +generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } +getrandom = { version = "0.2.10", default-features = false, features = ["js", "rdrand", "std"] } +hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14.0", features = ["raw"] } +hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13.2" } +hex = { version = "0.4.3", features = ["serde"] } +hyper = { version = "0.14.27", features = ["full"] } +indexmap = { version = "2.0.0", features = ["serde"] } +inout = { version = "0.1.3", default-features = false, features = ["std"] } +ipnetwork = { version = "0.20.0", features = ["schemars"] } +itertools = { version = "0.10.5" } +lalrpop-util = { version = "0.19.12" } +lazy_static = { version = "1.4.0", default-features = false, features = ["spin_no_std"] } +libc = { version = "0.2.148", features = ["extra_traits"] } +log = { version = "0.4.20", default-features = false, features = ["std"] } +managed = { version = "0.8.0", default-features = false, features = ["alloc", "map"] } +memchr = { version = "2.6.3" } +num-bigint = { version = "0.4.4", features = ["rand"] } +num-integer = { version = "0.1.45", features = ["i128"] } +num-iter = { version = "0.1.43", default-features = false, features = ["i128"] } +num-traits = { version = "0.2.16", features = ["i128", "libm"] } +openapiv3 = { version = "1.0.3", default-features = false, features = ["skip_serializing_defaults"] } +petgraph = { version = "0.6.4", features = ["serde-1"] } +postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } +ppv-lite86 = { version = "0.2.17", default-features = false, features = ["simd", "std"] } +predicates = { version = "3.0.3" } +rand = { version = "0.8.5", features = ["min_const_gen"] } +rand_chacha = { version = "0.3.1" } +regex = { version = "1.9.5" } +regex-automata = { version = "0.3.8", default-features = false, features = ["dfa-onepass", "dfa-search", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } +regex-syntax = { version = "0.7.5" } +reqwest = { version = "0.11.20", features = ["blocking", "json", "rustls-tls", "stream"] } +ring = { version = "0.16.20", features = ["std"] } +schemars = { version = "0.8.13", features = ["bytes", "chrono", "uuid1"] } +semver = { version = "1.0.18", features = ["serde"] } +serde = { version = "1.0.188", features = ["alloc", "derive", "rc"] } +sha2 = { version = "0.10.7", features = ["oid"] } +signature = { version = "2.1.0", default-features = false, features = ["digest", "rand_core", "std"] } +similar = { version = "2.2.1", features = ["inline", "unicode"] } +slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } +spin = { version = "0.9.8" } +string_cache = { version = "0.8.7" } +subtle = { version = "2.5.0" } +syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } +syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.32", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +textwrap = { version = "0.16.0" } +time = { version = "0.3.27", features = ["formatting", "local-offset", "macros", "parsing"] } +tokio = { version = "1.32.0", features = ["full", "test-util"] } +tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } +tokio-stream = { version = "0.1.14", features = ["net"] } +toml = { version = "0.7.8" } +toml_datetime = { version = "0.6.3", default-features = false, features = ["serde"] } +toml_edit = { version = "0.19.15", features = ["serde"] } +tracing = { version = "0.1.37", features = ["log"] } +trust-dns-proto = { version = "0.22.0" } +unicode-bidi = { version = "0.3.13" } +unicode-normalization = { version = "0.1.22" } +usdt = { version = "0.3.5" } +uuid = { version = "1.4.1", features = ["serde", "v4"] } +yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } +zeroize = { version = "1.6.0", features = ["std", "zeroize_derive"] } +zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } [build-dependencies] -anyhow = { version = "1", features = ["backtrace"] } -bit-set = { version = "0.5" } -bit-vec = { version = "0.6" } -bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1" } -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["serde"] } -bitvec = { version = "1" } -bstr-6f8ce4dd05d13bba = { package = "bstr", version = "0.2" } -bstr-dff4ba8e3ae991db = { package = "bstr", version = "1" } -bytes = { version = "1", features = ["serde"] } -cc = { version = "1", default-features = false, features = ["parallel"] } -chrono = { version = "0.4", features = ["alloc", "serde"] } -cipher = { version = "0.4", default-features = false, features = ["block-padding", "zeroize"] } -clap = { version = "4", features = ["derive", "env", "wrap_help"] } -clap_builder = { version = "4", default-features = false, features = ["color", "env", "std", "suggestions", "usage", "wrap_help"] } -console = { version = "0.15" } -const-oid = { version = "0.9", default-features = false, features = ["db", "std"] } -crossbeam-epoch = { version = "0.9" } -crossbeam-utils = { version = "0.8" } -crypto-common = { version = "0.1", default-features = false, features = ["getrandom", "std"] } -diesel = { version = "2", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } -digest = { version = "0.10", features = ["mac", "oid", "std"] } -either = { version = "1" } -flate2 = { version = "1" } -futures = { version = "0.3" } -futures-channel = { version = "0.3", features = ["sink"] } -futures-core = { version = "0.3" } -futures-io = { version = "0.3", default-features = false, features = ["std"] } -futures-sink = { version = "0.3" } -futures-task = { version = "0.3", default-features = false, features = ["std"] } -futures-util = { version = "0.3", features = ["channel", "io", "sink"] } +anyhow = { version = "1.0.75", features = ["backtrace"] } +bit-set = { version = "0.5.3" } +bit-vec = { version = "0.6.3" } +bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1.3.2" } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["serde"] } +bitvec = { version = "1.0.1" } +bstr-6f8ce4dd05d13bba = { package = "bstr", version = "0.2.17" } +bstr-dff4ba8e3ae991db = { package = "bstr", version = "1.6.0" } +bytes = { version = "1.5.0", features = ["serde"] } +cc = { version = "1.0.83", default-features = false, features = ["parallel"] } +chrono = { version = "0.4.31", features = ["alloc", "serde"] } +cipher = { version = "0.4.4", default-features = false, features = ["block-padding", "zeroize"] } +clap = { version = "4.4.3", features = ["derive", "env", "wrap_help"] } +clap_builder = { version = "4.4.2", default-features = false, features = ["color", "env", "std", "suggestions", "usage", "wrap_help"] } +console = { version = "0.15.7" } +const-oid = { version = "0.9.5", default-features = false, features = ["db", "std"] } +crossbeam-epoch = { version = "0.9.15" } +crossbeam-utils = { version = "0.8.16" } +crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } +diesel = { version = "2.1.1", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } +digest = { version = "0.10.7", features = ["mac", "oid", "std"] } +either = { version = "1.9.0" } +flate2 = { version = "1.0.27" } +futures = { version = "0.3.28" } +futures-channel = { version = "0.3.28", features = ["sink"] } +futures-core = { version = "0.3.28" } +futures-io = { version = "0.3.28", default-features = false, features = ["std"] } +futures-sink = { version = "0.3.28" } +futures-task = { version = "0.3.28", default-features = false, features = ["std"] } +futures-util = { version = "0.3.28", features = ["channel", "io", "sink"] } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "1e180ae55e56bd17af35cb868ffbd18ce487351d", features = ["std"] } -generic-array = { version = "0.14", default-features = false, features = ["more_lengths", "zeroize"] } -getrandom = { version = "0.2", default-features = false, features = ["js", "rdrand", "std"] } -hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14", features = ["raw"] } -hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13" } -hex = { version = "0.4", features = ["serde"] } -hyper = { version = "0.14", features = ["full"] } -indexmap = { version = "2", features = ["serde"] } -inout = { version = "0.1", default-features = false, features = ["std"] } -ipnetwork = { version = "0.20", features = ["schemars"] } -itertools = { version = "0.10" } -lalrpop-util = { version = "0.19" } -lazy_static = { version = "1", default-features = false, features = ["spin_no_std"] } -libc = { version = "0.2", features = ["extra_traits"] } -log = { version = "0.4", default-features = false, features = ["std"] } -managed = { version = "0.8", default-features = false, features = ["alloc", "map"] } -memchr = { version = "2" } -num-bigint = { version = "0.4", features = ["rand"] } -num-integer = { version = "0.1", features = ["i128"] } -num-iter = { version = "0.1", default-features = false, features = ["i128"] } -num-traits = { version = "0.2", features = ["i128", "libm"] } -openapiv3 = { version = "1", default-features = false, features = ["skip_serializing_defaults"] } -petgraph = { version = "0.6", features = ["serde-1"] } -postgres-types = { version = "0.2", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } -ppv-lite86 = { version = "0.2", default-features = false, features = ["simd", "std"] } -predicates = { version = "3" } -rand = { version = "0.8", features = ["min_const_gen"] } -rand_chacha = { version = "0.3" } -regex = { version = "1" } -regex-automata = { version = "0.3", default-features = false, features = ["dfa-onepass", "dfa-search", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } -regex-syntax = { version = "0.7" } -reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls", "stream"] } -ring = { version = "0.16", features = ["std"] } -schemars = { version = "0.8", features = ["bytes", "chrono", "uuid1"] } -semver = { version = "1", features = ["serde"] } -serde = { version = "1", features = ["alloc", "derive", "rc"] } -sha2 = { version = "0.10", features = ["oid"] } -signature = { version = "2", default-features = false, features = ["digest", "rand_core", "std"] } -similar = { version = "2", features = ["inline", "unicode"] } -slog = { version = "2", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } -spin = { version = "0.9" } -string_cache = { version = "0.8" } -subtle = { version = "2" } -syn-dff4ba8e3ae991db = { package = "syn", version = "1", features = ["extra-traits", "fold", "full", "visit"] } -syn-f595c2ba2a3f28df = { package = "syn", version = "2", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } -textwrap = { version = "0.16" } -time = { version = "0.3", features = ["formatting", "local-offset", "macros", "parsing"] } -time-macros = { version = "0.2", default-features = false, features = ["formatting", "parsing"] } -tokio = { version = "1", features = ["full", "test-util"] } -tokio-postgres = { version = "0.7", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } -tokio-stream = { version = "0.1", features = ["net"] } -toml = { version = "0.7" } -toml_datetime = { version = "0.6", default-features = false, features = ["serde"] } -toml_edit = { version = "0.19", features = ["serde"] } -tracing = { version = "0.1", features = ["log"] } -trust-dns-proto = { version = "0.22" } -unicode-bidi = { version = "0.3" } -unicode-normalization = { version = "0.1" } -unicode-xid = { version = "0.2" } -usdt = { version = "0.3" } -uuid = { version = "1", features = ["serde", "v4"] } -yasna = { version = "0.5", features = ["bit-vec", "num-bigint", "std", "time"] } -zeroize = { version = "1", features = ["std", "zeroize_derive"] } -zip = { version = "0.6", default-features = false, features = ["bzip2", "deflate"] } +generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } +getrandom = { version = "0.2.10", default-features = false, features = ["js", "rdrand", "std"] } +hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14.0", features = ["raw"] } +hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13.2" } +hex = { version = "0.4.3", features = ["serde"] } +hyper = { version = "0.14.27", features = ["full"] } +indexmap = { version = "2.0.0", features = ["serde"] } +inout = { version = "0.1.3", default-features = false, features = ["std"] } +ipnetwork = { version = "0.20.0", features = ["schemars"] } +itertools = { version = "0.10.5" } +lalrpop-util = { version = "0.19.12" } +lazy_static = { version = "1.4.0", default-features = false, features = ["spin_no_std"] } +libc = { version = "0.2.148", features = ["extra_traits"] } +log = { version = "0.4.20", default-features = false, features = ["std"] } +managed = { version = "0.8.0", default-features = false, features = ["alloc", "map"] } +memchr = { version = "2.6.3" } +num-bigint = { version = "0.4.4", features = ["rand"] } +num-integer = { version = "0.1.45", features = ["i128"] } +num-iter = { version = "0.1.43", default-features = false, features = ["i128"] } +num-traits = { version = "0.2.16", features = ["i128", "libm"] } +openapiv3 = { version = "1.0.3", default-features = false, features = ["skip_serializing_defaults"] } +petgraph = { version = "0.6.4", features = ["serde-1"] } +postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } +ppv-lite86 = { version = "0.2.17", default-features = false, features = ["simd", "std"] } +predicates = { version = "3.0.3" } +rand = { version = "0.8.5", features = ["min_const_gen"] } +rand_chacha = { version = "0.3.1" } +regex = { version = "1.9.5" } +regex-automata = { version = "0.3.8", default-features = false, features = ["dfa-onepass", "dfa-search", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } +regex-syntax = { version = "0.7.5" } +reqwest = { version = "0.11.20", features = ["blocking", "json", "rustls-tls", "stream"] } +ring = { version = "0.16.20", features = ["std"] } +schemars = { version = "0.8.13", features = ["bytes", "chrono", "uuid1"] } +semver = { version = "1.0.18", features = ["serde"] } +serde = { version = "1.0.188", features = ["alloc", "derive", "rc"] } +sha2 = { version = "0.10.7", features = ["oid"] } +signature = { version = "2.1.0", default-features = false, features = ["digest", "rand_core", "std"] } +similar = { version = "2.2.1", features = ["inline", "unicode"] } +slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } +spin = { version = "0.9.8" } +string_cache = { version = "0.8.7" } +subtle = { version = "2.5.0" } +syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } +syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.32", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +textwrap = { version = "0.16.0" } +time = { version = "0.3.27", features = ["formatting", "local-offset", "macros", "parsing"] } +time-macros = { version = "0.2.13", default-features = false, features = ["formatting", "parsing"] } +tokio = { version = "1.32.0", features = ["full", "test-util"] } +tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } +tokio-stream = { version = "0.1.14", features = ["net"] } +toml = { version = "0.7.8" } +toml_datetime = { version = "0.6.3", default-features = false, features = ["serde"] } +toml_edit = { version = "0.19.15", features = ["serde"] } +tracing = { version = "0.1.37", features = ["log"] } +trust-dns-proto = { version = "0.22.0" } +unicode-bidi = { version = "0.3.13" } +unicode-normalization = { version = "0.1.22" } +unicode-xid = { version = "0.2.4" } +usdt = { version = "0.3.5" } +uuid = { version = "1.4.1", features = ["serde", "v4"] } +yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } +zeroize = { version = "1.6.0", features = ["std", "zeroize_derive"] } +zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } [target.x86_64-unknown-linux-gnu.dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24" } -mio = { version = "0.8", features = ["net", "os-ext"] } -once_cell = { version = "1", features = ["unstable"] } -rustix = { version = "0.38", features = ["fs", "termios"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.1" } +mio = { version = "0.8.8", features = ["net", "os-ext"] } +once_cell = { version = "1.18.0", features = ["unstable"] } +rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.x86_64-unknown-linux-gnu.build-dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24" } -mio = { version = "0.8", features = ["net", "os-ext"] } -once_cell = { version = "1", features = ["unstable"] } -rustix = { version = "0.38", features = ["fs", "termios"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.1" } +mio = { version = "0.8.8", features = ["net", "os-ext"] } +once_cell = { version = "1.18.0", features = ["unstable"] } +rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.x86_64-apple-darwin.dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24" } -mio = { version = "0.8", features = ["net", "os-ext"] } -once_cell = { version = "1", features = ["unstable"] } -rustix = { version = "0.38", features = ["fs", "termios"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.1" } +mio = { version = "0.8.8", features = ["net", "os-ext"] } +once_cell = { version = "1.18.0", features = ["unstable"] } +rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.x86_64-apple-darwin.build-dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24" } -mio = { version = "0.8", features = ["net", "os-ext"] } -once_cell = { version = "1", features = ["unstable"] } -rustix = { version = "0.38", features = ["fs", "termios"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.1" } +mio = { version = "0.8.8", features = ["net", "os-ext"] } +once_cell = { version = "1.18.0", features = ["unstable"] } +rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.aarch64-apple-darwin.dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24" } -mio = { version = "0.8", features = ["net", "os-ext"] } -once_cell = { version = "1", features = ["unstable"] } -rustix = { version = "0.38", features = ["fs", "termios"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.1" } +mio = { version = "0.8.8", features = ["net", "os-ext"] } +once_cell = { version = "1.18.0", features = ["unstable"] } +rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.aarch64-apple-darwin.build-dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24" } -mio = { version = "0.8", features = ["net", "os-ext"] } -once_cell = { version = "1", features = ["unstable"] } -rustix = { version = "0.38", features = ["fs", "termios"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.1" } +mio = { version = "0.8.8", features = ["net", "os-ext"] } +once_cell = { version = "1.18.0", features = ["unstable"] } +rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.x86_64-unknown-illumos.dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24" } -mio = { version = "0.8", features = ["net", "os-ext"] } -once_cell = { version = "1", features = ["unstable"] } -rustix = { version = "0.38", features = ["fs", "termios"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.1" } +mio = { version = "0.8.8", features = ["net", "os-ext"] } +once_cell = { version = "1.18.0", features = ["unstable"] } +rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.x86_64-unknown-illumos.build-dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24" } -mio = { version = "0.8", features = ["net", "os-ext"] } -once_cell = { version = "1", features = ["unstable"] } -rustix = { version = "0.38", features = ["fs", "termios"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.1" } +mio = { version = "0.8.8", features = ["net", "os-ext"] } +once_cell = { version = "1.18.0", features = ["unstable"] } +rustix = { version = "0.38.9", features = ["fs", "termios"] } ### END HAKARI SECTION From e86579ce0f16021d2bbb84be4e97a8d24b61acb3 Mon Sep 17 00:00:00 2001 From: bnaecker Date: Mon, 2 Oct 2023 14:06:05 -0700 Subject: [PATCH 10/35] Fix path checks for archived log files (#4161) - Fixes #4160. - Checks for archived log files were wrong. This used the SMF FMRI, rather than the derived log filename, which translates slashes into dashes. This separates checks for Oxide-managed FMRIs and the log files for those. - Use the _log file_ check when looking for archived files. - Add tests for both checks and the method for finding archived log files for a service. --- Cargo.lock | 1 + illumos-utils/src/running_zone.rs | 55 +++++++++++++++++++++---- sled-agent/Cargo.toml | 1 + sled-agent/src/zone_bundle.rs | 67 +++++++++++++++++++++++++++++-- 4 files changed, 113 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b7296ea184..3a45dcb381 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5299,6 +5299,7 @@ dependencies = [ "static_assertions", "subprocess", "tar", + "tempfile", "thiserror", "tofino", "tokio", diff --git a/illumos-utils/src/running_zone.rs b/illumos-utils/src/running_zone.rs index 4d3481b6c3..734f22bd30 100644 --- a/illumos-utils/src/running_zone.rs +++ b/illumos-utils/src/running_zone.rs @@ -987,7 +987,7 @@ impl RunningZone { let output = self.run_cmd(&["svcs", "-H", "-o", "fmri"])?; Ok(output .lines() - .filter(|line| is_oxide_smf_log_file(line)) + .filter(|line| is_oxide_smf_service(line)) .map(|line| line.trim().to_string()) .collect()) } @@ -1267,10 +1267,51 @@ impl InstalledZone { } } -/// Return true if the named file appears to be a log file for an Oxide SMF -/// service. -pub fn is_oxide_smf_log_file(name: impl AsRef) -> bool { - const SMF_SERVICE_PREFIXES: [&str; 2] = ["/oxide", "/system/illumos"]; - let name = name.as_ref(); - SMF_SERVICE_PREFIXES.iter().any(|needle| name.contains(needle)) +/// Return true if the service with the given FMRI appears to be an +/// Oxide-managed service. +pub fn is_oxide_smf_service(fmri: impl AsRef) -> bool { + const SMF_SERVICE_PREFIXES: [&str; 2] = + ["svc:/oxide/", "svc:/system/illumos/"]; + let fmri = fmri.as_ref(); + SMF_SERVICE_PREFIXES.iter().any(|prefix| fmri.starts_with(prefix)) +} + +/// Return true if the provided file name appears to be a valid log file for an +/// Oxide-managed SMF service. +/// +/// Note that this operates on the _file name_. Any leading path components will +/// cause this check to return `false`. +pub fn is_oxide_smf_log_file(filename: impl AsRef) -> bool { + // Log files are named by the SMF services, with the `/` in the FMRI + // translated to a `-`. + const PREFIXES: [&str; 2] = ["oxide-", "system-illumos-"]; + let filename = filename.as_ref(); + PREFIXES + .iter() + .any(|prefix| filename.starts_with(prefix) && filename.contains(".log")) +} + +#[cfg(test)] +mod tests { + use super::is_oxide_smf_log_file; + use super::is_oxide_smf_service; + + #[test] + fn test_is_oxide_smf_service() { + assert!(is_oxide_smf_service("svc:/oxide/blah:default")); + assert!(is_oxide_smf_service("svc:/system/illumos/blah:default")); + assert!(!is_oxide_smf_service("svc:/system/blah:default")); + assert!(!is_oxide_smf_service("svc:/not/oxide/blah:default")); + } + + #[test] + fn test_is_oxide_smf_log_file() { + assert!(is_oxide_smf_log_file("oxide-blah:default.log")); + assert!(is_oxide_smf_log_file("oxide-blah:default.log.0")); + assert!(is_oxide_smf_log_file("oxide-blah:default.log.1111")); + assert!(is_oxide_smf_log_file("system-illumos-blah:default.log")); + assert!(is_oxide_smf_log_file("system-illumos-blah:default.log.0")); + assert!(!is_oxide_smf_log_file("not-oxide-blah:default.log")); + assert!(!is_oxide_smf_log_file("not-system-illumos-blah:default.log")); + } } diff --git a/sled-agent/Cargo.toml b/sled-agent/Cargo.toml index b131698395..88e51a3bc3 100644 --- a/sled-agent/Cargo.toml +++ b/sled-agent/Cargo.toml @@ -95,6 +95,7 @@ serial_test.workspace = true subprocess.workspace = true slog-async.workspace = true slog-term.workspace = true +tempfile.workspace = true illumos-utils = { workspace = true, features = ["testing"] } diff --git a/sled-agent/src/zone_bundle.rs b/sled-agent/src/zone_bundle.rs index 2eeb8ebe7d..4c2d6a4113 100644 --- a/sled-agent/src/zone_bundle.rs +++ b/sled-agent/src/zone_bundle.rs @@ -899,9 +899,9 @@ async fn find_archived_log_files( continue; }; let fname = path.file_name().unwrap(); - if is_oxide_smf_log_file(fname) - && fname.contains(svc_name) - { + let is_oxide = is_oxide_smf_log_file(fname); + let contains = fname.contains(svc_name); + if is_oxide && contains { debug!( log, "found archived log file"; @@ -910,6 +910,14 @@ async fn find_archived_log_files( "path" => ?path, ); files.push(path); + } else { + debug!( + log, + "skipping non-matching log file"; + "filename" => fname, + "is_oxide_smf_log_file" => is_oxide, + "contains_svc_name" => contains, + ); } } Err(e) => { @@ -1764,6 +1772,7 @@ mod tests { #[cfg(all(target_os = "illumos", test))] mod illumos_tests { + use super::find_archived_log_files; use super::zfs_quota; use super::CleanupContext; use super::CleanupPeriod; @@ -1852,12 +1861,17 @@ mod illumos_tests { } } - async fn setup_fake_cleanup_task() -> anyhow::Result { + fn test_logger() -> Logger { let dec = slog_term::PlainSyncDecorator::new(slog_term::TestStdoutWriter); let drain = slog_term::FullFormat::new(dec).build().fuse(); let log = Logger::root(drain, slog::o!("component" => "fake-cleanup-task")); + log + } + + async fn setup_fake_cleanup_task() -> anyhow::Result { + let log = test_logger(); let context = CleanupContext::default(); let resource_wrapper = ResourceWrapper::new().await; let bundler = @@ -2279,4 +2293,49 @@ mod illumos_tests { let bytes = tokio::fs::metadata(&path).await?.len(); Ok(ZoneBundleInfo { metadata, path, bytes }) } + + #[tokio::test] + async fn test_find_archived_log_files() { + let log = test_logger(); + let tmpdir = tempfile::tempdir().expect("Failed to make tempdir"); + + let mut should_match = [ + "oxide-foo:default.log", + "oxide-foo:default.log.1000", + "system-illumos-foo:default.log", + "system-illumos-foo:default.log.100", + ]; + let should_not_match = [ + "oxide-foo:default", + "not-oxide-foo:default.log.1000", + "system-illumos-foo", + "not-system-illumos-foo:default.log.100", + ]; + for name in should_match.iter().chain(should_not_match.iter()) { + let path = tmpdir.path().join(name); + tokio::fs::File::create(path) + .await + .expect("failed to create dummy file"); + } + + let path = + Utf8PathBuf::try_from(tmpdir.path().as_os_str().to_str().unwrap()) + .unwrap(); + let mut files = find_archived_log_files( + &log, + "zone-name", // unused here, for logging only + "foo", + &[path], + ) + .await; + + // Sort everything to compare correctly. + should_match.sort(); + files.sort(); + assert_eq!(files.len(), should_match.len()); + assert!(files + .iter() + .zip(should_match.iter()) + .all(|(file, name)| { file.file_name().unwrap() == *name })); + } } From 6bc5e6062df7bffbe0eca1465b9d116ff7849e9f Mon Sep 17 00:00:00 2001 From: artemis everfree Date: Mon, 2 Oct 2023 19:29:36 -0700 Subject: [PATCH 11/35] RandomnWithDistinctSleds region allocation strategy (#3858) PR #3650 introduced the Random region allocation strategy to allocate regions randomly across the rack. This expands on that with the addition of the RandomWithDistinctSleds region allocation strategy. This strategy is the same, but requires the 3 crucible regions be allocated on 3 different sleds to improve resiliency against a whole-sled failure. The Random strategy still exists, and does not require 3 distinct sleds. This is useful in one-sled environments such as the integration tests, and lab setups. This also fixes a shortcoming of #3650 whereby multiple datasets on a single zpool could be selected. That fix applies to both the old Random strategy and the new RandomWithDistinctSleds strategy. In the present, I have unit tests that verify the allocation behavior works correctly with cockroachdb, and we can try it out on dogfood. Adds the `-r` / `--rack-topology` command line argument to omicron-package target create. Use this to specify whether you are packaging for a single-sled or multi-sled environment. Under single-sled environments, the requirement for 3 distinct sleds is removed. Fixes #3702 --------- Co-authored-by: iliana etaoin --- .github/buildomat/jobs/deploy.sh | 1 + .github/buildomat/jobs/package.sh | 9 +- .github/buildomat/jobs/tuf-repo.sh | 3 +- .github/workflows/rust.yml | 2 +- common/src/nexus_config.rs | 74 ++++- docs/how-to-run.adoc | 34 ++- installinator/Cargo.toml | 1 + .../db-model/src/queries/region_allocation.rs | 22 ++ nexus/db-queries/src/db/datastore/mod.rs | 277 +++++++++++++----- nexus/db-queries/src/db/datastore/region.rs | 2 +- .../src/db/queries/region_allocation.rs | 242 +++++++++------ nexus/examples/config.toml | 11 + nexus/src/app/mod.rs | 8 + nexus/src/app/sagas/disk_create.rs | 8 +- nexus/src/app/sagas/snapshot_create.rs | 7 +- nexus/tests/config.test.toml | 5 + package-manifest.toml | 3 +- package/src/bin/omicron-package.rs | 3 +- package/src/lib.rs | 23 ++ package/src/target.rs | 31 +- sled-agent/Cargo.toml | 2 + sled-agent/src/services.rs | 3 + smf/nexus/multi-sled/config-partial.toml | 45 +++ .../{ => single-sled}/config-partial.toml | 5 + 24 files changed, 617 insertions(+), 204 deletions(-) create mode 100644 smf/nexus/multi-sled/config-partial.toml rename smf/nexus/{ => single-sled}/config-partial.toml (86%) diff --git a/.github/buildomat/jobs/deploy.sh b/.github/buildomat/jobs/deploy.sh index 5d3dd8ec39..c2579d98ea 100755 --- a/.github/buildomat/jobs/deploy.sh +++ b/.github/buildomat/jobs/deploy.sh @@ -143,6 +143,7 @@ cd /opt/oxide/work ptime -m tar xvzf /input/package/work/package.tar.gz cp /input/package/work/zones/* out/ +mv out/omicron-nexus-single-sled.tar.gz out/omicron-nexus.tar.gz mkdir tests for p in /input/ci-tools/work/end-to-end-tests/*.gz; do ptime -m gunzip < "$p" > "tests/$(basename "${p%.gz}")" diff --git a/.github/buildomat/jobs/package.sh b/.github/buildomat/jobs/package.sh index fe5d6a9b7f..64c087524e 100755 --- a/.github/buildomat/jobs/package.sh +++ b/.github/buildomat/jobs/package.sh @@ -45,7 +45,7 @@ ptime -m ./tools/ci_download_softnpu_machinery # Build the test target ptime -m cargo run --locked --release --bin omicron-package -- \ - -t test target create -i standard -m non-gimlet -s softnpu + -t test target create -i standard -m non-gimlet -s softnpu -r single-sled ptime -m cargo run --locked --release --bin omicron-package -- \ -t test package @@ -81,9 +81,13 @@ stamp_packages() { done } +# Keep the single-sled Nexus zone around for the deploy job. (The global zone +# build below overwrites the file.) +mv out/omicron-nexus.tar.gz out/omicron-nexus-single-sled.tar.gz + # Build necessary for the global zone ptime -m cargo run --locked --release --bin omicron-package -- \ - -t host target create -i standard -m gimlet -s asic + -t host target create -i standard -m gimlet -s asic -r multi-sled ptime -m cargo run --locked --release --bin omicron-package -- \ -t host package stamp_packages omicron-sled-agent maghemite propolis-server overlay @@ -111,6 +115,7 @@ zones=( out/external-dns.tar.gz out/internal-dns.tar.gz out/omicron-nexus.tar.gz + out/omicron-nexus-single-sled.tar.gz out/oximeter-collector.tar.gz out/propolis-server.tar.gz out/switch-*.tar.gz diff --git a/.github/buildomat/jobs/tuf-repo.sh b/.github/buildomat/jobs/tuf-repo.sh index fab6770564..e169bebff6 100644 --- a/.github/buildomat/jobs/tuf-repo.sh +++ b/.github/buildomat/jobs/tuf-repo.sh @@ -77,10 +77,11 @@ done mkdir /work/package pushd /work/package tar xf /input/package/work/package.tar.gz out package-manifest.toml target/release/omicron-package -target/release/omicron-package -t default target create -i standard -m gimlet -s asic +target/release/omicron-package -t default target create -i standard -m gimlet -s asic -r multi-sled ln -s /input/package/work/zones/* out/ rm out/switch-softnpu.tar.gz # not used when target switch=asic rm out/omicron-gateway-softnpu.tar.gz # not used when target switch=asic +rm out/omicron-nexus-single-sled.tar.gz # only used for deploy tests for zone in out/*.tar.gz; do target/release/omicron-package stamp "$(basename "${zone%.tar.gz}")" "$VERSION" done diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 722aacbe0f..f5cf1dc885 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -41,7 +41,7 @@ jobs: - name: Install Pre-Requisites run: ./tools/install_builder_prerequisites.sh -y - name: Set default target - run: cargo run --bin omicron-package -- -t default target create + run: cargo run --bin omicron-package -- -t default target create -r single-sled - name: Check build of deployed Omicron packages run: cargo run --bin omicron-package -- -t default check diff --git a/common/src/nexus_config.rs b/common/src/nexus_config.rs index 73ccec996c..ad62c34f92 100644 --- a/common/src/nexus_config.rs +++ b/common/src/nexus_config.rs @@ -372,6 +372,8 @@ pub struct PackageConfig { pub dendrite: HashMap, /// Background task configuration pub background_tasks: BackgroundTaskConfig, + /// Default Crucible region allocation strategy + pub default_region_allocation_strategy: RegionAllocationStrategy, } #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] @@ -594,6 +596,9 @@ mod test { dns_external.period_secs_propagation = 7 dns_external.max_concurrent_server_updates = 8 external_endpoints.period_secs = 9 + [default_region_allocation_strategy] + type = "random" + seed = 0 "##, ) .unwrap(); @@ -677,6 +682,10 @@ mod test { period_secs: Duration::from_secs(9), } }, + default_region_allocation_strategy: + crate::nexus_config::RegionAllocationStrategy::Random { + seed: Some(0) + } }, } ); @@ -724,6 +733,8 @@ mod test { dns_external.period_secs_propagation = 7 dns_external.max_concurrent_server_updates = 8 external_endpoints.period_secs = 9 + [default_region_allocation_strategy] + type = "random" "##, ) .unwrap(); @@ -864,25 +875,31 @@ mod test { struct DummyConfig { deployment: DeploymentConfig, } - let config_path = "../smf/nexus/config-partial.toml"; - println!( - "checking {:?} with example deployment section added", - config_path - ); - let mut contents = std::fs::read_to_string(config_path) - .expect("failed to read Nexus SMF config file"); - contents.push_str( - "\n\n\n \ - # !! content below added by test_repo_configs_are_valid()\n\ - \n\n\n", - ); let example_deployment = toml::to_string_pretty(&DummyConfig { deployment: example_config.deployment, }) .unwrap(); - contents.push_str(&example_deployment); - let _: Config = toml::from_str(&contents) - .expect("Nexus SMF config file is not valid"); + + let nexus_config_paths = [ + "../smf/nexus/single-sled/config-partial.toml", + "../smf/nexus/multi-sled/config-partial.toml", + ]; + for config_path in nexus_config_paths { + println!( + "checking {:?} with example deployment section added", + config_path + ); + let mut contents = std::fs::read_to_string(config_path) + .expect("failed to read Nexus SMF config file"); + contents.push_str( + "\n\n\n \ + # !! content below added by test_repo_configs_are_valid()\n\ + \n\n\n", + ); + contents.push_str(&example_deployment); + let _: Config = toml::from_str(&contents) + .expect("Nexus SMF config file is not valid"); + } } #[test] @@ -894,3 +911,30 @@ mod test { ); } } + +/// Defines a strategy for choosing what physical disks to use when allocating +/// new crucible regions. +/// +/// NOTE: More strategies can - and should! - be added. +/// +/// See for a more +/// complete discussion. +/// +/// Longer-term, we should consider: +/// - Storage size + remaining free space +/// - Sled placement of datasets +/// - What sort of loads we'd like to create (even split across all disks +/// may not be preferable, especially if maintenance is expected) +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum RegionAllocationStrategy { + /// Choose disks pseudo-randomly. An optional seed may be provided to make + /// the ordering deterministic, otherwise the current time in nanoseconds + /// will be used. Ordering is based on sorting the output of `md5(UUID of + /// candidate dataset + seed)`. The seed does not need to come from a + /// cryptographically secure source. + Random { seed: Option }, + + /// Like Random, but ensures that each region is allocated on its own sled. + RandomWithDistinctSleds { seed: Option }, +} diff --git a/docs/how-to-run.adoc b/docs/how-to-run.adoc index 1988a42669..7539c5183f 100644 --- a/docs/how-to-run.adoc +++ b/docs/how-to-run.adoc @@ -321,10 +321,32 @@ Error: Creates a new build target, and sets it as "active" Usage: omicron-package target create [OPTIONS] Options: - -i, --image [default: standard] [possible values: standard, trampoline] - -m, --machine [possible values: gimlet, gimlet-standalone, non-gimlet] - -s, --switch [possible values: asic, stub, softnpu] - -h, --help Print help (see more with '--help') + -i, --image + [default: standard] + + Possible values: + - standard: A typical host OS image + - trampoline: A recovery host OS image, intended to bootstrap a Standard image + + -m, --machine + Possible values: + - gimlet: Use sled agent configuration for a Gimlet + - gimlet-standalone: Use sled agent configuration for a Gimlet running in isolation + - non-gimlet: Use sled agent configuration for a device emulating a Gimlet + + -s, --switch + Possible values: + - asic: Use the "real" Dendrite, that attempts to interact with the Tofino + - stub: Use a "stub" Dendrite that does not require any real hardware + - softnpu: Use a "softnpu" Dendrite that uses the SoftNPU asic emulator + + -r, --rack-topology + Possible values: + - multi-sled: Use configurations suitable for a multi-sled deployment, such as dogfood and production racks + - single-sled: Use configurations suitable for a single-sled deployment, such as CI and dev machines + + -h, --help + Print help (see a summary with '-h') ---- @@ -332,9 +354,9 @@ To set up a build target for a non-Gimlet machine with simulated (but fully func [source,console] ---- -$ cargo run --release --bin omicron-package -- -t default target create -i standard -m non-gimlet -s softnpu +$ cargo run --release --bin omicron-package -- -t default target create -i standard -m non-gimlet -s softnpu -r single-sled Finished release [optimized] target(s) in 0.66s - Running `target/release/omicron-package -t default target create -i standard -m non-gimlet -s softnpu` + Running `target/release/omicron-package -t default target create -i standard -m non-gimlet -s softnpu -r single-sled` Created new build target 'default' and set it as active ---- diff --git a/installinator/Cargo.toml b/installinator/Cargo.toml index 3b2f04c38f..428ea0d08e 100644 --- a/installinator/Cargo.toml +++ b/installinator/Cargo.toml @@ -57,3 +57,4 @@ tokio-stream.workspace = true [features] image-standard = [] image-trampoline = [] +rack-topology-single-sled = [] \ No newline at end of file diff --git a/nexus/db-model/src/queries/region_allocation.rs b/nexus/db-model/src/queries/region_allocation.rs index 43fac3c9a6..2025e79fb8 100644 --- a/nexus/db-model/src/queries/region_allocation.rs +++ b/nexus/db-model/src/queries/region_allocation.rs @@ -47,6 +47,13 @@ table! { } } +table! { + shuffled_candidate_datasets { + id -> Uuid, + pool_id -> Uuid, + } +} + table! { candidate_regions { id -> Uuid, @@ -89,6 +96,19 @@ table! { } } +table! { + one_zpool_per_sled (pool_id) { + pool_id -> Uuid + } +} + +table! { + one_dataset_per_zpool { + id -> Uuid, + pool_id -> Uuid + } +} + table! { inserted_regions { id -> Uuid, @@ -141,6 +161,7 @@ diesel::allow_tables_to_appear_in_same_query!( ); diesel::allow_tables_to_appear_in_same_query!(old_regions, dataset,); +diesel::allow_tables_to_appear_in_same_query!(old_regions, zpool,); diesel::allow_tables_to_appear_in_same_query!( inserted_regions, @@ -149,6 +170,7 @@ diesel::allow_tables_to_appear_in_same_query!( diesel::allow_tables_to_appear_in_same_query!(candidate_zpools, dataset,); diesel::allow_tables_to_appear_in_same_query!(candidate_zpools, zpool,); +diesel::allow_tables_to_appear_in_same_query!(candidate_datasets, dataset); // == Needed for random region allocation == diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index ff1df710bb..b1f3203c60 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -307,43 +307,6 @@ pub enum UpdatePrecondition { Value(T), } -/// Defines a strategy for choosing what physical disks to use when allocating -/// new crucible regions. -/// -/// NOTE: More strategies can - and should! - be added. -/// -/// See for a more -/// complete discussion. -/// -/// Longer-term, we should consider: -/// - Storage size + remaining free space -/// - Sled placement of datasets -/// - What sort of loads we'd like to create (even split across all disks -/// may not be preferable, especially if maintenance is expected) -#[derive(Debug, Clone)] -pub enum RegionAllocationStrategy { - /// Choose disks that have the least data usage in the rack. This strategy - /// can lead to bad failure states wherein the disks with the least usage - /// have the least usage because regions on them are actually failing in - /// some way. Further retried allocations will then continue to try to - /// allocate onto the disk, perpetuating the problem. Currently this - /// strategy only exists so we can test that using different allocation - /// strategies actually results in different allocation patterns, hence the - /// `#[cfg(test)]`. - /// - /// See https://github.com/oxidecomputer/omicron/issues/3416 for more on the - /// failure-states associated with this strategy - #[cfg(test)] - LeastUsedDisk, - - /// Choose disks pseudo-randomly. An optional seed may be provided to make - /// the ordering deterministic, otherwise the current time in nanoseconds - /// will be used. Ordering is based on sorting the output of `md5(UUID of - /// candidate dataset + seed)`. The seed does not need to come from a - /// cryptographically secure source. - Random(Option), -} - /// Constructs a DataStore for use in test suites that has preloaded the /// built-in users, roles, and role assignments that are needed for basic /// operation @@ -421,7 +384,9 @@ mod test { use omicron_common::api::external::{ self, ByteCount, Error, IdentityMetadataCreateParams, LookupType, Name, }; + use omicron_common::nexus_config::RegionAllocationStrategy; use omicron_test_utils::dev; + use std::collections::HashMap; use std::collections::HashSet; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddrV6}; use std::num::NonZeroU32; @@ -704,12 +669,18 @@ mod test { } } + struct TestDataset { + sled_id: Uuid, + dataset_id: Uuid, + } + async fn create_test_datasets_for_region_allocation( opctx: &OpContext, datastore: Arc, - ) -> Vec { + number_of_sleds: usize, + ) -> Vec { // Create sleds... - let sled_ids: Vec = stream::iter(0..REGION_REDUNDANCY_THRESHOLD) + let sled_ids: Vec = stream::iter(0..number_of_sleds) .then(|_| create_test_sled(&datastore)) .collect() .await; @@ -740,48 +711,69 @@ mod test { .collect() .await; + #[derive(Copy, Clone)] + struct Zpool { + sled_id: Uuid, + pool_id: Uuid, + } + // 1 pool per disk - let zpool_ids: Vec = stream::iter(physical_disks) + let zpools: Vec = stream::iter(physical_disks) .then(|disk| { - create_test_zpool(&datastore, disk.sled_id, disk.disk_id) + let pool_id_future = + create_test_zpool(&datastore, disk.sled_id, disk.disk_id); + async move { + let pool_id = pool_id_future.await; + Zpool { sled_id: disk.sled_id, pool_id } + } }) .collect() .await; let bogus_addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 8080, 0, 0); - // 1 dataset per zpool - let dataset_ids: Vec = stream::iter(zpool_ids) - .then(|zpool_id| { - let id = Uuid::new_v4(); - let dataset = Dataset::new( - id, - zpool_id, - bogus_addr, - DatasetKind::Crucible, - ); - let datastore = datastore.clone(); - async move { - datastore.dataset_upsert(dataset).await.unwrap(); - id - } + let datasets: Vec = stream::iter(zpools) + .map(|zpool| { + // 3 datasets per zpool, to test that pools are distinct + let zpool_iter: Vec = (0..3).map(|_| zpool).collect(); + stream::iter(zpool_iter).then(|zpool| { + let id = Uuid::new_v4(); + let dataset = Dataset::new( + id, + zpool.pool_id, + bogus_addr, + DatasetKind::Crucible, + ); + + let datastore = datastore.clone(); + async move { + datastore.dataset_upsert(dataset).await.unwrap(); + + TestDataset { sled_id: zpool.sled_id, dataset_id: id } + } + }) }) + .flatten() .collect() .await; - dataset_ids + datasets } #[tokio::test] /// Note that this test is currently non-deterministic. It can be made /// deterministic by generating deterministic *dataset* Uuids. The sled and /// pool IDs should not matter. - async fn test_region_allocation() { - let logctx = dev::test_setup_log("test_region_allocation"); + async fn test_region_allocation_strat_random() { + let logctx = dev::test_setup_log("test_region_allocation_strat_random"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - create_test_datasets_for_region_allocation(&opctx, datastore.clone()) - .await; + create_test_datasets_for_region_allocation( + &opctx, + datastore.clone(), + REGION_REDUNDANCY_THRESHOLD, + ) + .await; // Allocate regions from the datasets for this disk. Do it a few times // for good measure. @@ -799,7 +791,9 @@ mod test { volume_id, ¶ms.disk_source, params.size, - &RegionAllocationStrategy::Random(Some(alloc_seed as u128)), + &RegionAllocationStrategy::Random { + seed: Some(alloc_seed), + }, ) .await .unwrap(); @@ -809,8 +803,81 @@ mod test { let mut disk_datasets = HashSet::new(); let mut disk_zpools = HashSet::new(); - // TODO: When allocation chooses 3 distinct sleds, uncomment this. - // let mut disk1_sleds = HashSet::new(); + for (dataset, region) in dataset_and_regions { + // Must be 3 unique datasets + assert!(disk_datasets.insert(dataset.id())); + + // Must be 3 unique zpools + assert!(disk_zpools.insert(dataset.pool_id)); + + assert_eq!(volume_id, region.volume_id()); + assert_eq!(ByteCount::from(4096), region.block_size()); + let (_, extent_count) = DataStore::get_crucible_allocation( + &BlockSize::AdvancedFormat, + params.size, + ); + assert_eq!(extent_count, region.extent_count()); + } + } + + let _ = db.cleanup().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + /// Test the [`RegionAllocationStrategy::RandomWithDistinctSleds`] strategy. + /// It should always pick datasets where no two datasets are on the same + /// zpool and no two zpools are on the same sled. + async fn test_region_allocation_strat_random_with_distinct_sleds() { + let logctx = dev::test_setup_log( + "test_region_allocation_strat_random_with_distinct_sleds", + ); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // Create a rack without enough sleds for a successful allocation when + // we require 3 distinct sleds. + let test_datasets = create_test_datasets_for_region_allocation( + &opctx, + datastore.clone(), + REGION_REDUNDANCY_THRESHOLD, + ) + .await; + + // We need to check that our datasets end up on 3 distinct sleds, but the query doesn't return the sled ID, so we need to reverse map from dataset ID to sled ID + let sled_id_map: HashMap = test_datasets + .into_iter() + .map(|test_dataset| (test_dataset.dataset_id, test_dataset.sled_id)) + .collect(); + + // Allocate regions from the datasets for this disk. Do it a few times + // for good measure. + for alloc_seed in 0..10 { + let params = create_test_disk_create_params( + &format!("disk{}", alloc_seed), + ByteCount::from_mebibytes_u32(1), + ); + let volume_id = Uuid::new_v4(); + + let expected_region_count = REGION_REDUNDANCY_THRESHOLD; + let dataset_and_regions = datastore + .region_allocate( + &opctx, + volume_id, + ¶ms.disk_source, + params.size, + &&RegionAllocationStrategy::RandomWithDistinctSleds { + seed: Some(alloc_seed), + }, + ) + .await + .unwrap(); + + // Verify the allocation. + assert_eq!(expected_region_count, dataset_and_regions.len()); + let mut disk_datasets = HashSet::new(); + let mut disk_zpools = HashSet::new(); + let mut disk_sleds = HashSet::new(); for (dataset, region) in dataset_and_regions { // Must be 3 unique datasets assert!(disk_datasets.insert(dataset.id())); @@ -819,8 +886,8 @@ mod test { assert!(disk_zpools.insert(dataset.pool_id)); // Must be 3 unique sleds - // TODO: When allocation chooses 3 distinct sleds, uncomment this. - // assert!(disk1_sleds.insert(Err(dataset))); + let sled_id = sled_id_map.get(&dataset.id()).unwrap(); + assert!(disk_sleds.insert(*sled_id)); assert_eq!(volume_id, region.volume_id()); assert_eq!(ByteCount::from(4096), region.block_size()); @@ -836,14 +903,72 @@ mod test { logctx.cleanup_successful(); } + #[tokio::test] + /// Ensure the [`RegionAllocationStrategy::RandomWithDistinctSleds`] + /// strategy fails when there aren't enough distinct sleds. + async fn test_region_allocation_strat_random_with_distinct_sleds_fails() { + let logctx = dev::test_setup_log( + "test_region_allocation_strat_random_with_distinct_sleds_fails", + ); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // Create a rack without enough sleds for a successful allocation when + // we require 3 distinct sleds. + create_test_datasets_for_region_allocation( + &opctx, + datastore.clone(), + REGION_REDUNDANCY_THRESHOLD - 1, + ) + .await; + + // Allocate regions from the datasets for this disk. Do it a few times + // for good measure. + for alloc_seed in 0..10 { + let params = create_test_disk_create_params( + &format!("disk{}", alloc_seed), + ByteCount::from_mebibytes_u32(1), + ); + let volume_id = Uuid::new_v4(); + + let err = datastore + .region_allocate( + &opctx, + volume_id, + ¶ms.disk_source, + params.size, + &&RegionAllocationStrategy::RandomWithDistinctSleds { + seed: Some(alloc_seed), + }, + ) + .await + .unwrap_err(); + + let expected = "Not enough zpool space to allocate disks"; + assert!( + err.to_string().contains(expected), + "Saw error: \'{err}\', but expected \'{expected}\'" + ); + + assert!(matches!(err, Error::ServiceUnavailable { .. })); + } + + let _ = db.cleanup().await; + logctx.cleanup_successful(); + } + #[tokio::test] async fn test_region_allocation_is_idempotent() { let logctx = dev::test_setup_log("test_region_allocation_is_idempotent"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - create_test_datasets_for_region_allocation(&opctx, datastore.clone()) - .await; + create_test_datasets_for_region_allocation( + &opctx, + datastore.clone(), + REGION_REDUNDANCY_THRESHOLD, + ) + .await; // Allocate regions from the datasets for this volume. let params = create_test_disk_create_params( @@ -857,7 +982,7 @@ mod test { volume_id, ¶ms.disk_source, params.size, - &RegionAllocationStrategy::Random(Some(0)), + &RegionAllocationStrategy::Random { seed: Some(0) }, ) .await .unwrap(); @@ -870,7 +995,7 @@ mod test { volume_id, ¶ms.disk_source, params.size, - &RegionAllocationStrategy::Random(Some(1)), + &RegionAllocationStrategy::Random { seed: Some(1) }, ) .await .unwrap(); @@ -959,7 +1084,7 @@ mod test { volume1_id, ¶ms.disk_source, params.size, - &RegionAllocationStrategy::Random(Some(0)), + &RegionAllocationStrategy::Random { seed: Some(0) }, ) .await .unwrap_err(); @@ -983,8 +1108,12 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - create_test_datasets_for_region_allocation(&opctx, datastore.clone()) - .await; + create_test_datasets_for_region_allocation( + &opctx, + datastore.clone(), + REGION_REDUNDANCY_THRESHOLD, + ) + .await; let disk_size = test_zpool_size(); let alloc_size = ByteCount::try_from(disk_size.to_bytes() * 2).unwrap(); @@ -997,7 +1126,7 @@ mod test { volume1_id, ¶ms.disk_source, params.size, - &RegionAllocationStrategy::Random(Some(0)), + &RegionAllocationStrategy::Random { seed: Some(0) }, ) .await .is_err()); diff --git a/nexus/db-queries/src/db/datastore/region.rs b/nexus/db-queries/src/db/datastore/region.rs index 5bc79b9481..9465fe2792 100644 --- a/nexus/db-queries/src/db/datastore/region.rs +++ b/nexus/db-queries/src/db/datastore/region.rs @@ -5,7 +5,6 @@ //! [`DataStore`] methods on [`Region`]s. use super::DataStore; -use super::RegionAllocationStrategy; use super::RunnableQuery; use crate::context::OpContext; use crate::db; @@ -23,6 +22,7 @@ use omicron_common::api::external; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; use omicron_common::backoff::{self, BackoffError}; +use omicron_common::nexus_config::RegionAllocationStrategy; use slog::Logger; use uuid::Uuid; diff --git a/nexus/db-queries/src/db/queries/region_allocation.rs b/nexus/db-queries/src/db/queries/region_allocation.rs index b071ee3f44..7f7b2ea9bf 100644 --- a/nexus/db-queries/src/db/queries/region_allocation.rs +++ b/nexus/db-queries/src/db/queries/region_allocation.rs @@ -6,7 +6,6 @@ use crate::db::alias::ExpressionAlias; use crate::db::cast_uuid_as_bytea::CastUuidToBytea; -use crate::db::datastore::RegionAllocationStrategy; use crate::db::datastore::REGION_REDUNDANCY_THRESHOLD; use crate::db::model::{Dataset, DatasetKind, Region}; use crate::db::pool::DbConnection; @@ -24,10 +23,11 @@ use diesel::{ use nexus_db_model::queries::region_allocation::{ candidate_datasets, candidate_regions, candidate_zpools, cockroach_md5, do_insert, inserted_regions, old_regions, old_zpool_usage, - proposed_dataset_changes, updated_datasets, + proposed_dataset_changes, shuffled_candidate_datasets, updated_datasets, }; use nexus_db_model::schema; use omicron_common::api::external; +use omicron_common::nexus_config::RegionAllocationStrategy; const NOT_ENOUGH_DATASETS_SENTINEL: &'static str = "Not enough datasets"; const NOT_ENOUGH_ZPOOL_SPACE_SENTINEL: &'static str = "Not enough space"; @@ -53,7 +53,7 @@ pub fn from_diesel(e: async_bb8_diesel::ConnectionError) -> external::Error { } NOT_ENOUGH_ZPOOL_SPACE_SENTINEL => { return external::Error::unavail( - "Not enough zpool space to allocate disks", + "Not enough zpool space to allocate disks. There may not be enough disks with space for the requested region. You may also see this if your rack is in a degraded state, or you're running the default multi-rack topology configuration in a 1-sled development environment.", ); } NOT_ENOUGH_UNIQUE_ZPOOLS_SENTINEL => { @@ -91,6 +91,8 @@ impl OldRegions { /// This implicitly distinguishes between "M.2s" and "U.2s" -- Nexus needs to /// determine during dataset provisioning which devices should be considered for /// usage as Crucible storage. +/// +/// We select only one dataset from each zpool. #[derive(Subquery, QueryId)] #[subquery(name = candidate_datasets)] struct CandidateDatasets { @@ -98,71 +100,65 @@ struct CandidateDatasets { } impl CandidateDatasets { - fn new( - allocation_strategy: &RegionAllocationStrategy, - candidate_zpools: &CandidateZpools, - ) -> Self { + fn new(candidate_zpools: &CandidateZpools, seed: u128) -> Self { use crate::db::schema::dataset::dsl as dataset_dsl; use candidate_zpools::dsl as candidate_zpool_dsl; - let query = match allocation_strategy { - #[cfg(test)] - RegionAllocationStrategy::LeastUsedDisk => { - let query: Box< - dyn CteQuery, - > = Box::new( - dataset_dsl::dataset - .inner_join( - candidate_zpools - .query_source() - .on(dataset_dsl::pool_id - .eq(candidate_zpool_dsl::pool_id)), - ) - .filter(dataset_dsl::time_deleted.is_null()) - .filter(dataset_dsl::size_used.is_not_null()) - .filter(dataset_dsl::kind.eq(DatasetKind::Crucible)) - .order(dataset_dsl::size_used.asc()) - .limit(REGION_REDUNDANCY_THRESHOLD.try_into().unwrap()) - .select((dataset_dsl::id, dataset_dsl::pool_id)), - ); - query - } - RegionAllocationStrategy::Random(seed) => { - let seed = seed.unwrap_or_else(|| { - std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_nanos() - }); - - let seed_bytes = seed.to_le_bytes(); - - let query: Box< - dyn CteQuery, - > = Box::new( - dataset_dsl::dataset - .inner_join( - candidate_zpools - .query_source() - .on(dataset_dsl::pool_id - .eq(candidate_zpool_dsl::pool_id)), - ) - .filter(dataset_dsl::time_deleted.is_null()) - .filter(dataset_dsl::size_used.is_not_null()) - .filter(dataset_dsl::kind.eq(DatasetKind::Crucible)) - // We order by md5 to shuffle the ordering of the datasets. - // md5 has a uniform output distribution so it does the job. - .order(cockroach_md5::dsl::md5( + let seed_bytes = seed.to_le_bytes(); + + let query: Box> = + Box::new( + dataset_dsl::dataset + .inner_join(candidate_zpools.query_source().on( + dataset_dsl::pool_id.eq(candidate_zpool_dsl::pool_id), + )) + .filter(dataset_dsl::time_deleted.is_null()) + .filter(dataset_dsl::size_used.is_not_null()) + .filter(dataset_dsl::kind.eq(DatasetKind::Crucible)) + .distinct_on(dataset_dsl::pool_id) + .order_by(( + dataset_dsl::pool_id, + cockroach_md5::dsl::md5( CastUuidToBytea::new(dataset_dsl::id) .concat(seed_bytes.to_vec()), - )) - .select((dataset_dsl::id, dataset_dsl::pool_id)) - .limit(REGION_REDUNDANCY_THRESHOLD.try_into().unwrap()), - ); - query - } - }; + ), + )) + .select((dataset_dsl::id, dataset_dsl::pool_id)), + ); + Self { query } + } +} + +/// Shuffle the candidate datasets, and select REGION_REDUNDANCY_THRESHOLD +/// regions from it. +#[derive(Subquery, QueryId)] +#[subquery(name = shuffled_candidate_datasets)] +struct ShuffledCandidateDatasets { + query: Box>, +} +impl ShuffledCandidateDatasets { + fn new(candidate_datasets: &CandidateDatasets, seed: u128) -> Self { + use candidate_datasets::dsl as candidate_datasets_dsl; + + let seed_bytes = seed.to_le_bytes(); + + let query: Box> = + Box::new( + candidate_datasets + .query_source() + // We order by md5 to shuffle the ordering of the datasets. + // md5 has a uniform output distribution so it does the job. + .order(cockroach_md5::dsl::md5( + CastUuidToBytea::new(candidate_datasets_dsl::id) + .concat(seed_bytes.to_vec()), + )) + .select(( + candidate_datasets_dsl::id, + candidate_datasets_dsl::pool_id, + )) + .limit(REGION_REDUNDANCY_THRESHOLD.try_into().unwrap()), + ); Self { query } } } @@ -179,14 +175,14 @@ diesel::sql_function!(fn now() -> Timestamptz); impl CandidateRegions { fn new( - candidate_datasets: &CandidateDatasets, + shuffled_candidate_datasets: &ShuffledCandidateDatasets, volume_id: uuid::Uuid, block_size: u64, blocks_per_extent: u64, extent_count: u64, ) -> Self { - use candidate_datasets::dsl as candidate_datasets_dsl; use schema::region; + use shuffled_candidate_datasets::dsl as shuffled_candidate_datasets_dsl; let volume_id = volume_id.into_sql::(); let block_size = (block_size as i64).into_sql::(); @@ -195,20 +191,22 @@ impl CandidateRegions { let extent_count = (extent_count as i64).into_sql::(); Self { - query: Box::new(candidate_datasets.query_source().select(( - ExpressionAlias::new::(gen_random_uuid()), - ExpressionAlias::new::(now()), - ExpressionAlias::new::(now()), - ExpressionAlias::new::( - candidate_datasets_dsl::id, + query: Box::new(shuffled_candidate_datasets.query_source().select( + ( + ExpressionAlias::new::(gen_random_uuid()), + ExpressionAlias::new::(now()), + ExpressionAlias::new::(now()), + ExpressionAlias::new::( + shuffled_candidate_datasets_dsl::id, + ), + ExpressionAlias::new::(volume_id), + ExpressionAlias::new::(block_size), + ExpressionAlias::new::( + blocks_per_extent, + ), + ExpressionAlias::new::(extent_count), ), - ExpressionAlias::new::(volume_id), - ExpressionAlias::new::(block_size), - ExpressionAlias::new::( - blocks_per_extent, - ), - ExpressionAlias::new::(extent_count), - ))), + )), } } } @@ -285,12 +283,14 @@ struct CandidateZpools { } impl CandidateZpools { - fn new(old_zpool_usage: &OldPoolUsage, zpool_size_delta: u64) -> Self { + fn new( + old_zpool_usage: &OldPoolUsage, + zpool_size_delta: u64, + seed: u128, + distinct_sleds: bool, + ) -> Self { use schema::zpool::dsl as zpool_dsl; - let with_zpool = zpool_dsl::zpool - .on(zpool_dsl::id.eq(old_zpool_usage::dsl::pool_id)); - // Why are we using raw `diesel::dsl::sql` here? // // When SQL performs the "SUM" operation on "bigint" type, the result @@ -309,15 +309,40 @@ impl CandidateZpools { + diesel::dsl::sql(&zpool_size_delta.to_string())) .le(diesel::dsl::sql(zpool_dsl::total_size::NAME)); - Self { - query: Box::new( - old_zpool_usage - .query_source() - .inner_join(with_zpool) - .filter(it_will_fit) - .select((old_zpool_usage::dsl::pool_id,)), - ), - } + let with_zpool = zpool_dsl::zpool + .on(zpool_dsl::id.eq(old_zpool_usage::dsl::pool_id)); + + let base_query = old_zpool_usage + .query_source() + .inner_join(with_zpool) + .filter(it_will_fit) + .select((old_zpool_usage::dsl::pool_id,)); + + let query = if distinct_sleds { + let seed_bytes = seed.to_le_bytes(); + + let query: Box> = + Box::new( + base_query + .order_by(( + zpool_dsl::sled_id, + cockroach_md5::dsl::md5( + CastUuidToBytea::new(zpool_dsl::id) + .concat(seed_bytes.to_vec()), + ), + )) + .distinct_on(zpool_dsl::sled_id), + ); + + query + } else { + let query: Box> = + Box::new(base_query); + + query + }; + + Self { query } } } @@ -508,19 +533,47 @@ impl RegionAllocate { extent_count: u64, allocation_strategy: &RegionAllocationStrategy, ) -> Self { + let (seed, distinct_sleds) = { + let (input_seed, distinct_sleds) = match allocation_strategy { + RegionAllocationStrategy::Random { seed } => (seed, false), + RegionAllocationStrategy::RandomWithDistinctSleds { seed } => { + (seed, true) + } + }; + ( + input_seed.map_or_else( + || { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_nanos() + }, + |seed| seed as u128, + ), + distinct_sleds, + ) + }; + let size_delta = block_size * blocks_per_extent * extent_count; let old_regions = OldRegions::new(volume_id); let old_pool_usage = OldPoolUsage::new(); - let candidate_zpools = - CandidateZpools::new(&old_pool_usage, size_delta); + let candidate_zpools = CandidateZpools::new( + &old_pool_usage, + size_delta, + seed, + distinct_sleds, + ); let candidate_datasets = - CandidateDatasets::new(&allocation_strategy, &candidate_zpools); + CandidateDatasets::new(&candidate_zpools, seed); + + let shuffled_candidate_datasets = + ShuffledCandidateDatasets::new(&candidate_datasets, seed); let candidate_regions = CandidateRegions::new( - &candidate_datasets, + &shuffled_candidate_datasets, volume_id, block_size, blocks_per_extent, @@ -577,6 +630,7 @@ impl RegionAllocate { .add_subquery(old_pool_usage) .add_subquery(candidate_zpools) .add_subquery(candidate_datasets) + .add_subquery(shuffled_candidate_datasets) .add_subquery(candidate_regions) .add_subquery(proposed_changes) .add_subquery(do_insert) diff --git a/nexus/examples/config.toml b/nexus/examples/config.toml index f1b20c32a1..1a9afbc6bd 100644 --- a/nexus/examples/config.toml +++ b/nexus/examples/config.toml @@ -92,3 +92,14 @@ dns_external.max_concurrent_server_updates = 5 # certificates it will take _other_ Nexus instances to notice and stop serving # them (on a sunny day). external_endpoints.period_secs = 60 + +[default_region_allocation_strategy] +# allocate region on 3 random distinct zpools, on 3 random distinct sleds. +type = "random_with_distinct_sleds" + +# the same as random_with_distinct_sleds, but without requiring distinct sleds +# type = "random" + +# setting `seed` to a fixed value will make dataset selection ordering use the +# same shuffling order for every region allocation. +# seed = 0 diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 5bab5e2820..354df0ead3 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -23,6 +23,7 @@ use omicron_common::address::DENDRITE_PORT; use omicron_common::address::MGS_PORT; use omicron_common::api::external::Error; use omicron_common::api::internal::shared::SwitchLocation; +use omicron_common::nexus_config::RegionAllocationStrategy; use slog::Logger; use std::collections::HashMap; use std::net::Ipv6Addr; @@ -153,6 +154,9 @@ pub struct Nexus { /// Background tasks background_tasks: background::BackgroundTasks, + + /// Default Crucible region allocation strategy + default_region_allocation_strategy: RegionAllocationStrategy, } impl Nexus { @@ -325,6 +329,10 @@ impl Nexus { external_resolver, dpd_clients, background_tasks, + default_region_allocation_strategy: config + .pkg + .default_region_allocation_strategy + .clone(), }; // TODO-cleanup all the extra Arcs here seems wrong diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index cca36cefa7..275c8738cc 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -12,11 +12,10 @@ use super::{ ACTION_GENERATE_ID, }; use crate::app::sagas::declare_saga_actions; +use crate::app::{authn, authz, db}; use crate::external_api::params; -use nexus_db_queries::db::datastore::RegionAllocationStrategy; use nexus_db_queries::db::identity::{Asset, Resource}; use nexus_db_queries::db::lookup::LookupPath; -use nexus_db_queries::{authn, authz, db}; use omicron_common::api::external::DiskState; use omicron_common::api::external::Error; use rand::{rngs::StdRng, RngCore, SeedableRng}; @@ -255,6 +254,9 @@ async fn sdc_alloc_regions( &sagactx, ¶ms.serialized_authn, ); + + let strategy = &osagactx.nexus().default_region_allocation_strategy; + let datasets_and_regions = osagactx .datastore() .region_allocate( @@ -262,7 +264,7 @@ async fn sdc_alloc_regions( volume_id, ¶ms.create_params.disk_source, params.create_params.size, - &RegionAllocationStrategy::Random(None), + &strategy, ) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index b27f4a3a9b..eeabf64894 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -100,14 +100,13 @@ use super::{ }; use crate::app::sagas::declare_saga_actions; use crate::app::sagas::retry_until_known_result; +use crate::app::{authn, authz, db}; use crate::external_api::params; use anyhow::anyhow; use crucible_agent_client::{types::RegionId, Client as CrucibleAgentClient}; use nexus_db_model::Generation; -use nexus_db_queries::db::datastore::RegionAllocationStrategy; use nexus_db_queries::db::identity::{Asset, Resource}; use nexus_db_queries::db::lookup::LookupPath; -use nexus_db_queries::{authn, authz, db}; use omicron_common::api::external; use omicron_common::api::external::Error; use rand::{rngs::StdRng, RngCore, SeedableRng}; @@ -332,6 +331,8 @@ async fn ssc_alloc_regions( .await .map_err(ActionError::action_failed)?; + let strategy = &osagactx.nexus().default_region_allocation_strategy; + let datasets_and_regions = osagactx .datastore() .region_allocate( @@ -344,7 +345,7 @@ async fn ssc_alloc_regions( .map_err(|e| ActionError::action_failed(e.to_string()))?, }, external::ByteCount::from(disk.size), - &RegionAllocationStrategy::Random(None), + &strategy, ) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/tests/config.test.toml b/nexus/tests/config.test.toml index 6eeacceaed..1b1ae2c912 100644 --- a/nexus/tests/config.test.toml +++ b/nexus/tests/config.test.toml @@ -89,3 +89,8 @@ dns_external.max_concurrent_server_updates = 5 # certificates it will take _other_ Nexus instances to notice and stop serving # them (on a sunny day). external_endpoints.period_secs = 60 + +[default_region_allocation_strategy] +# we only have one sled in the test environment, so we need to use the +# `Random` strategy, instead of `RandomWithDistinctSleds` +type = "random" \ No newline at end of file diff --git a/package-manifest.toml b/package-manifest.toml index 4dc0f6b616..c776f6d96d 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -92,7 +92,8 @@ source.rust.binary_names = ["nexus", "schema-updater"] source.rust.release = true source.paths = [ { from = "/opt/ooce/pgsql-13/lib/amd64", to = "/opt/ooce/pgsql-13/lib/amd64" }, - { from = "smf/nexus", to = "/var/svc/manifest/site/nexus" }, + { from = "smf/nexus/manifest.xml", to = "/var/svc/manifest/site/nexus/manifest.xml" }, + { from = "smf/nexus/{{rack-topology}}", to = "/var/svc/manifest/site/nexus" }, { from = "out/console-assets", to = "/var/nexus/static" }, { from = "schema/crdb", to = "/var/nexus/schema/crdb" }, ] diff --git a/package/src/bin/omicron-package.rs b/package/src/bin/omicron-package.rs index ea490e54cf..bc07b61234 100644 --- a/package/src/bin/omicron-package.rs +++ b/package/src/bin/omicron-package.rs @@ -211,11 +211,12 @@ async fn do_target( format!("failed to create directory {}", target_dir.display()) })?; match subcommand { - TargetCommand::Create { image, machine, switch } => { + TargetCommand::Create { image, machine, switch, rack_topology } => { let target = KnownTarget::new( image.clone(), machine.clone(), switch.clone(), + rack_topology.clone(), )?; let path = get_single_target(&target_dir, name).await?; diff --git a/package/src/lib.rs b/package/src/lib.rs index b0cc04970a..395f3ed472 100644 --- a/package/src/lib.rs +++ b/package/src/lib.rs @@ -46,6 +46,29 @@ pub enum TargetCommand { #[clap(short, long, default_value_if("image", "standard", "stub"))] switch: Option, + + #[clap( + short, + long, + default_value_if("image", "trampoline", Some("single-sled")), + + // This opt is required, and clap will enforce that even with + // `required = false`, since it's not an Option. But the + // default_value_if only works if we set `required` to false. It's + // jank, but it is what it is. + // https://github.com/clap-rs/clap/issues/4086 + required = false + )] + /// Specify whether nexus will run in a single-sled or multi-sled + /// environment. + /// + /// Set single-sled for dev purposes when you're running a single + /// sled-agent. Set multi-sled if you're running with mulitple sleds. + /// Currently this only affects the crucible disk allocation strategy- + /// VM disks will require 3 distinct sleds with `multi-sled`, which will + /// fail in a single-sled environment. `single-sled` relaxes this + /// requirement. + rack_topology: crate::target::RackTopology, }, /// List all existing targets List, diff --git a/package/src/target.rs b/package/src/target.rs index a7b2dd4539..d5d5e92c46 100644 --- a/package/src/target.rs +++ b/package/src/target.rs @@ -48,12 +48,27 @@ pub enum Switch { SoftNpu, } +/// Topology of the sleds within the rack. +#[derive(Clone, Debug, strum::EnumString, strum::Display, ValueEnum)] +#[strum(serialize_all = "kebab-case")] +#[clap(rename_all = "kebab-case")] +pub enum RackTopology { + /// Use configurations suitable for a multi-sled deployment, such as dogfood + /// and production racks. + MultiSled, + + /// Use configurations suitable for a single-sled deployment, such as CI and + /// dev machines. + SingleSled, +} + /// A strongly-typed variant of [Target]. #[derive(Clone, Debug)] pub struct KnownTarget { image: Image, machine: Option, switch: Option, + rack_topology: RackTopology, } impl KnownTarget { @@ -61,6 +76,7 @@ impl KnownTarget { image: Image, machine: Option, switch: Option, + rack_topology: RackTopology, ) -> Result { if matches!(image, Image::Trampoline) { if machine.is_some() { @@ -77,7 +93,7 @@ impl KnownTarget { bail!("'switch=asic' is only valid with 'machine=gimlet'"); } - Ok(Self { image, machine, switch }) + Ok(Self { image, machine, switch, rack_topology }) } } @@ -87,6 +103,7 @@ impl Default for KnownTarget { image: Image::Standard, machine: Some(Machine::NonGimlet), switch: Some(Switch::Stub), + rack_topology: RackTopology::MultiSled, } } } @@ -101,6 +118,7 @@ impl From for Target { if let Some(switch) = kt.switch { map.insert("switch".to_string(), switch.to_string()); } + map.insert("rack-topology".to_string(), kt.rack_topology.to_string()); Target(map) } } @@ -121,6 +139,7 @@ impl std::str::FromStr for KnownTarget { let mut image = Self::default().image; let mut machine = None; let mut switch = None; + let mut rack_topology = None; for (k, v) in target.0.into_iter() { match k.as_str() { @@ -133,6 +152,9 @@ impl std::str::FromStr for KnownTarget { "switch" => { switch = Some(v.parse()?); } + "rack-topology" => { + rack_topology = Some(v.parse()?); + } _ => { bail!( "Unknown target key {k}\nValid keys include: [{}]", @@ -146,6 +168,11 @@ impl std::str::FromStr for KnownTarget { } } } - KnownTarget::new(image, machine, switch) + KnownTarget::new( + image, + machine, + switch, + rack_topology.unwrap_or(RackTopology::MultiSled), + ) } } diff --git a/sled-agent/Cargo.toml b/sled-agent/Cargo.toml index 88e51a3bc3..d4ccfc97c8 100644 --- a/sled-agent/Cargo.toml +++ b/sled-agent/Cargo.toml @@ -120,3 +120,5 @@ machine-non-gimlet = [] switch-asic = [] switch-stub = [] switch-softnpu = [] +rack-topology-single-sled = [] +rack-topology-multi-sled = [] diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index 96cdf8222b..60f0965612 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -1513,6 +1513,9 @@ impl ServiceManager { .open(&config_path) .await .map_err(|err| Error::io_path(&config_path, err))?; + file.write_all(b"\n\n") + .await + .map_err(|err| Error::io_path(&config_path, err))?; file.write_all(config_str.as_bytes()) .await .map_err(|err| Error::io_path(&config_path, err))?; diff --git a/smf/nexus/multi-sled/config-partial.toml b/smf/nexus/multi-sled/config-partial.toml new file mode 100644 index 0000000000..2dfee81d02 --- /dev/null +++ b/smf/nexus/multi-sled/config-partial.toml @@ -0,0 +1,45 @@ +# +# Oxide API: partial configuration file +# + +[console] +# Directory for static assets. Absolute path or relative to CWD. +static_dir = "/var/nexus/static" +session_idle_timeout_minutes = 60 +session_absolute_timeout_minutes = 480 + +[authn] +schemes_external = ["session_cookie", "access_token"] + +[log] +# Show log messages of this level and more severe +level = "debug" +mode = "file" +path = "/dev/stdout" +if_exists = "append" + +# TODO: Uncomment the following lines to enable automatic schema +# migration on boot. +# +# [schema] +# schema_dir = "/var/nexus/schema/crdb" + +[background_tasks] +dns_internal.period_secs_config = 60 +dns_internal.period_secs_servers = 60 +dns_internal.period_secs_propagation = 60 +dns_internal.max_concurrent_server_updates = 5 +dns_external.period_secs_config = 60 +dns_external.period_secs_servers = 60 +dns_external.period_secs_propagation = 60 +dns_external.max_concurrent_server_updates = 5 +# How frequently we check the list of stored TLS certificates. This is +# approximately an upper bound on how soon after updating the list of +# certificates it will take _other_ Nexus instances to notice and stop serving +# them (on a sunny day). +external_endpoints.period_secs = 60 + +[default_region_allocation_strategy] +# by default, allocate across 3 distinct sleds +# seed is omitted so a new seed will be chosen with every allocation. +type = "random_with_distinct_sleds" \ No newline at end of file diff --git a/smf/nexus/config-partial.toml b/smf/nexus/single-sled/config-partial.toml similarity index 86% rename from smf/nexus/config-partial.toml rename to smf/nexus/single-sled/config-partial.toml index b29727c4aa..aff0a8a25f 100644 --- a/smf/nexus/config-partial.toml +++ b/smf/nexus/single-sled/config-partial.toml @@ -38,3 +38,8 @@ dns_external.max_concurrent_server_updates = 5 # certificates it will take _other_ Nexus instances to notice and stop serving # them (on a sunny day). external_endpoints.period_secs = 60 + +[default_region_allocation_strategy] +# by default, allocate without requirement for distinct sleds. +# seed is omitted so a new seed will be chosen with every allocation. +type = "random" \ No newline at end of file From d300fb89fb798d4b9cc6b785829ddceffa66ecd4 Mon Sep 17 00:00:00 2001 From: Ryan Goodfellow Date: Tue, 3 Oct 2023 12:45:17 -0700 Subject: [PATCH 12/35] Omdb networking (#4147) --- Cargo.lock | 1 + dev-tools/omdb/Cargo.toml | 1 + dev-tools/omdb/src/bin/omdb/db.rs | 180 ++++++++++++++++++++++++ dev-tools/omdb/tests/test_all_output.rs | 1 + dev-tools/omdb/tests/usage_errors.out | 20 +++ 5 files changed, 203 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 3a45dcb381..c4385bf694 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5127,6 +5127,7 @@ dependencies = [ "expectorate", "humantime", "internal-dns 0.1.0", + "ipnetwork", "nexus-client 0.1.0", "nexus-db-model", "nexus-db-queries", diff --git a/dev-tools/omdb/Cargo.toml b/dev-tools/omdb/Cargo.toml index 5b2adde1b2..5a05e93db9 100644 --- a/dev-tools/omdb/Cargo.toml +++ b/dev-tools/omdb/Cargo.toml @@ -33,6 +33,7 @@ textwrap.workspace = true tokio = { workspace = true, features = [ "full" ] } uuid.workspace = true omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +ipnetwork.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 93e5ef4301..10e5546b6d 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -12,6 +12,9 @@ //! would be the only consumer -- and in that case it's okay to query the //! database directly. +// NOTE: eminates from Tabled macros +#![allow(clippy::useless_vec)] + use crate::Omdb; use anyhow::anyhow; use anyhow::bail; @@ -30,7 +33,9 @@ use nexus_db_model::DnsGroup; use nexus_db_model::DnsName; use nexus_db_model::DnsVersion; use nexus_db_model::DnsZone; +use nexus_db_model::ExternalIp; use nexus_db_model::Instance; +use nexus_db_model::Project; use nexus_db_model::Region; use nexus_db_model::Sled; use nexus_db_model::Zpool; @@ -86,6 +91,8 @@ enum DbCommands { Sleds, /// Print information about customer instances Instances, + /// Print information about the network + Network(NetworkArgs), } #[derive(Debug, Args)] @@ -170,6 +177,22 @@ enum ServicesCommands { ListBySled, } +#[derive(Debug, Args)] +struct NetworkArgs { + #[command(subcommand)] + command: NetworkCommands, + + /// Print out raw data structures from the data store. + #[clap(long)] + verbose: bool, +} + +#[derive(Debug, Subcommand)] +enum NetworkCommands { + /// List external IPs + ListEips, +} + impl DbArgs { /// Run a `omdb db` subcommand. pub(crate) async fn run_cmd( @@ -269,6 +292,13 @@ impl DbArgs { DbCommands::Instances => { cmd_db_instances(&datastore, self.fetch_limit).await } + DbCommands::Network(NetworkArgs { + command: NetworkCommands::ListEips, + verbose, + }) => { + cmd_db_eips(&opctx, &datastore, self.fetch_limit, *verbose) + .await + } } } } @@ -1098,6 +1128,156 @@ async fn cmd_db_dns_names( Ok(()) } +async fn cmd_db_eips( + opctx: &OpContext, + datastore: &DataStore, + limit: NonZeroU32, + verbose: bool, +) -> Result<(), anyhow::Error> { + use db::schema::external_ip::dsl; + let ips: Vec = dsl::external_ip + .filter(dsl::time_deleted.is_null()) + .select(ExternalIp::as_select()) + .get_results_async(&*datastore.pool_connection_for_tests().await?) + .await?; + + check_limit(&ips, limit, || String::from("listing external ips")); + + struct PortRange { + first: u16, + last: u16, + } + + impl Display for PortRange { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}/{}", self.first, self.last) + } + } + + #[derive(Tabled)] + enum Owner { + Instance { project: String, name: String }, + Service { kind: String }, + None, + } + + impl Display for Owner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Instance { project, name } => { + write!(f, "Instance {project}/{name}") + } + Self::Service { kind } => write!(f, "Service {kind}"), + Self::None => write!(f, "None"), + } + } + } + + #[derive(Tabled)] + struct IpRow { + ip: ipnetwork::IpNetwork, + ports: PortRange, + kind: String, + owner: Owner, + } + + if verbose { + for ip in &ips { + if verbose { + println!("{ip:#?}"); + } + } + return Ok(()); + } + + let mut rows = Vec::new(); + + for ip in &ips { + let owner = if let Some(owner_id) = ip.parent_id { + if ip.is_service { + let service = match LookupPath::new(opctx, datastore) + .service_id(owner_id) + .fetch() + .await + { + Ok(instance) => instance, + Err(e) => { + eprintln!( + "error looking up service with id {owner_id}: {e}" + ); + continue; + } + }; + Owner::Service { kind: format!("{:?}", service.1.kind) } + } else { + use db::schema::instance::dsl as instance_dsl; + let instance = match instance_dsl::instance + .filter(instance_dsl::id.eq(owner_id)) + .limit(1) + .select(Instance::as_select()) + .load_async(&*datastore.pool_connection_for_tests().await?) + .await + .context("loading requested instance")? + .pop() + { + Some(instance) => instance, + None => { + eprintln!("instance with id {owner_id} not found"); + continue; + } + }; + + use db::schema::project::dsl as project_dsl; + let project = match project_dsl::project + .filter(project_dsl::id.eq(instance.project_id)) + .limit(1) + .select(Project::as_select()) + .load_async(&*datastore.pool_connection_for_tests().await?) + .await + .context("loading requested project")? + .pop() + { + Some(instance) => instance, + None => { + eprintln!( + "project with id {} not found", + instance.project_id + ); + continue; + } + }; + + Owner::Instance { + project: project.name().to_string(), + name: instance.name().to_string(), + } + } + } else { + Owner::None + }; + + let row = IpRow { + ip: ip.ip, + ports: PortRange { + first: ip.first_port.into(), + last: ip.last_port.into(), + }, + kind: format!("{:?}", ip.kind), + owner, + }; + rows.push(row); + } + + rows.sort_by(|a, b| a.ip.cmp(&b.ip)); + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .to_string(); + + println!("{}", table); + + Ok(()) +} + fn print_name( prefix: &str, name: &str, diff --git a/dev-tools/omdb/tests/test_all_output.rs b/dev-tools/omdb/tests/test_all_output.rs index 0eddcb492c..d757369ead 100644 --- a/dev-tools/omdb/tests/test_all_output.rs +++ b/dev-tools/omdb/tests/test_all_output.rs @@ -41,6 +41,7 @@ async fn test_omdb_usage_errors() { &["db", "dns", "diff"], &["db", "dns", "names"], &["db", "services"], + &["db", "network"], &["nexus"], &["nexus", "background-tasks"], &["sled-agent"], diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index 136a631e80..b5421b76af 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -91,6 +91,7 @@ Commands: services Print information about control plane services sleds Print information about sleds instances Print information about customer instances + network Print information about the network help Print this message or the help of the given subcommand(s) Options: @@ -112,6 +113,7 @@ Commands: services Print information about control plane services sleds Print information about sleds instances Print information about customer instances + network Print information about the network help Print this message or the help of the given subcommand(s) Options: @@ -186,6 +188,24 @@ Commands: Options: -h, --help Print help ============================================= +EXECUTING COMMAND: omdb ["db", "network"] +termination: Exited(2) +--------------------------------------------- +stdout: +--------------------------------------------- +stderr: +Print information about the network + +Usage: omdb db network [OPTIONS] + +Commands: + list-eips List external IPs + help Print this message or the help of the given subcommand(s) + +Options: + --verbose Print out raw data structures from the data store + -h, --help Print help +============================================= EXECUTING COMMAND: omdb ["nexus"] termination: Exited(2) --------------------------------------------- From 3a5a7cd9d57bd2ed5f2e71d9967eb0b40a236d98 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 22:43:16 -0700 Subject: [PATCH 13/35] Bump thiserror from 1.0.48 to 1.0.49 (#4173) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [thiserror](https://github.com/dtolnay/thiserror) from 1.0.48 to 1.0.49.
Release notes

Sourced from thiserror's releases.

1.0.49

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=thiserror&package-manager=cargo&previous-version=1.0.48&new-version=1.0.49)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c4385bf694..6133dabc0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8755,18 +8755,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.48" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" +checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.48" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" +checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ "proc-macro2", "quote", From af14d1abb5d1fc684157a560d7e0a3cd6a2f860b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 22:43:28 -0700 Subject: [PATCH 14/35] Bump expectorate from 1.0.7 to 1.1.0 (#4176) Bumps [expectorate](https://github.com/oxidecomputer/expectorate) from 1.0.7 to 1.1.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=expectorate&package-manager=cargo&previous-version=1.0.7&new-version=1.1.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6133dabc0a..54e74ce07a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2404,9 +2404,9 @@ dependencies = [ [[package]] name = "expectorate" -version = "1.0.7" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "710ab6a2d57038a835d66f78d5af3fa5d27c1ec4682f823b9203c48826cb0591" +checksum = "de6f19b25bdfa2747ae775f37cd109c31f1272d4e4c83095be0727840aa1d75f" dependencies = [ "console", "newline-converter", diff --git a/Cargo.toml b/Cargo.toml index 63d8e0b2d6..e453c47244 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -180,7 +180,7 @@ dns-service-client = { path = "dns-service-client" } dpd-client = { path = "dpd-client" } dropshot = { git = "https://github.com/oxidecomputer/dropshot", branch = "main", features = [ "usdt-probes" ] } either = "1.9.0" -expectorate = "1.0.7" +expectorate = "1.1.0" fatfs = "0.3.6" flate2 = "1.0.27" flume = "0.11.0" From 901d005ebf913be82b50ce6e66de9775c06a954e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 22:44:48 -0700 Subject: [PATCH 15/35] Bump hubtools from `0c642f6` to `2481445` (#4178) Bumps [hubtools](https://github.com/oxidecomputer/hubtools) from `0c642f6` to `2481445`.
Commits

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 54e74ce07a..c6335cb32e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3219,7 +3219,7 @@ dependencies = [ [[package]] name = "hubtools" version = "0.4.1" -source = "git+https://github.com/oxidecomputer/hubtools.git?branch=main#0c642f6e1f83b74725c7119a546bc26ac7452a48" +source = "git+https://github.com/oxidecomputer/hubtools.git?branch=main#2481445b80f8476041f62a1c8b6301e4918c63ed" dependencies = [ "lpc55_areas", "lpc55_sign", @@ -4008,7 +4008,7 @@ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "lpc55_areas" version = "0.2.4" -source = "git+https://github.com/oxidecomputer/lpc55_support#4051a3b9421573dc36ed6098b292a7609a3cf98b" +source = "git+https://github.com/oxidecomputer/lpc55_support#96f064eaae5e95930efaab6c29fd1b2e22225dac" dependencies = [ "bitfield", "clap 4.4.3", @@ -4018,8 +4018,8 @@ dependencies = [ [[package]] name = "lpc55_sign" -version = "0.3.2" -source = "git+https://github.com/oxidecomputer/lpc55_support#4051a3b9421573dc36ed6098b292a7609a3cf98b" +version = "0.3.3" +source = "git+https://github.com/oxidecomputer/lpc55_support#96f064eaae5e95930efaab6c29fd1b2e22225dac" dependencies = [ "byteorder", "const-oid", @@ -9429,8 +9429,8 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 1.0.0", - "rand 0.8.5", + "cfg-if 0.1.10", + "rand 0.4.6", "static_assertions", ] From a9104a0786f437e38d4945dd77470bc8fd4f974c Mon Sep 17 00:00:00 2001 From: Rain Date: Wed, 4 Oct 2023 00:12:30 -0700 Subject: [PATCH 16/35] [workspace-hack] use workspace-dotted format and a patch directive (#4197) Two changes: 1. Switch to the workspace-dotted format (`.workspace = true`) for uniformity with the rest of omicron. This is new in cargo-hakari 0.9.28. 2. Use a patch directive, which means that the workspace-hack only applies while building within this workspace. If another workspace imports a crate from here via a git dependency, it will not have the workspace-hack applied to it (instead, it will use [this empty crate](https://crates.io/crates/omicron-workspace-hack) on crates.io). Thanks so much to @pfmooney for this suggestion! Also remove one of the exceptions made in the xtask (workspace-hack lines in *other* `Cargo.toml`s are now output as `.workspace = true`, but hakari cannot yet generate workspace lines in its own `Cargo.toml`). I verified by creating an empty project that the workspace-hack isn't applied to downstream projects that import e.g. `omicron-common` as a git (or path) dependency. Folks will have to update to cargo-hakari 0.9.28 to use this, but hopefully that won't be too much of a bother. --- .config/hakari.toml | 5 ++++- Cargo.toml | 8 ++++++++ api_identity/Cargo.toml | 2 +- bootstore/Cargo.toml | 2 +- bootstrap-agent-client/Cargo.toml | 2 +- caboose-util/Cargo.toml | 2 +- certificates/Cargo.toml | 2 +- common/Cargo.toml | 2 +- crdb-seed/Cargo.toml | 2 +- ddm-admin-client/Cargo.toml | 2 +- deploy/Cargo.toml | 2 +- dev-tools/omdb/Cargo.toml | 2 +- dev-tools/omicron-dev/Cargo.toml | 2 +- dev-tools/xtask/src/main.rs | 6 ------ dns-server/Cargo.toml | 2 +- dns-service-client/Cargo.toml | 2 +- dpd-client/Cargo.toml | 2 +- end-to-end-tests/Cargo.toml | 2 +- gateway-cli/Cargo.toml | 2 +- gateway-client/Cargo.toml | 2 +- gateway-test-utils/Cargo.toml | 2 +- gateway/Cargo.toml | 2 +- illumos-utils/Cargo.toml | 2 +- installinator-artifact-client/Cargo.toml | 2 +- installinator-artifactd/Cargo.toml | 2 +- installinator-common/Cargo.toml | 2 +- installinator/Cargo.toml | 4 ++-- internal-dns-cli/Cargo.toml | 2 +- internal-dns/Cargo.toml | 2 +- ipcc-key-value/Cargo.toml | 2 +- key-manager/Cargo.toml | 2 +- nexus-client/Cargo.toml | 2 +- nexus/Cargo.toml | 2 +- nexus/authz-macros/Cargo.toml | 2 +- nexus/db-macros/Cargo.toml | 2 +- nexus/db-model/Cargo.toml | 2 +- nexus/db-queries/Cargo.toml | 2 +- nexus/defaults/Cargo.toml | 2 +- nexus/test-interface/Cargo.toml | 2 +- nexus/test-utils-macros/Cargo.toml | 2 +- nexus/test-utils/Cargo.toml | 2 +- nexus/types/Cargo.toml | 2 +- oxide-client/Cargo.toml | 2 +- oximeter-client/Cargo.toml | 2 +- oximeter/collector/Cargo.toml | 2 +- oximeter/db/Cargo.toml | 2 +- oximeter/instruments/Cargo.toml | 2 +- oximeter/oximeter-macro-impl/Cargo.toml | 2 +- oximeter/oximeter/Cargo.toml | 2 +- oximeter/producer/Cargo.toml | 2 +- package/Cargo.toml | 2 +- passwords/Cargo.toml | 2 +- rpaths/Cargo.toml | 2 +- sled-agent-client/Cargo.toml | 2 +- sled-agent/Cargo.toml | 2 +- sled-hardware/Cargo.toml | 2 +- sp-sim/Cargo.toml | 2 +- test-utils/Cargo.toml | 2 +- tufaceous-lib/Cargo.toml | 2 +- tufaceous/Cargo.toml | 2 +- update-engine/Cargo.toml | 2 +- wicket-common/Cargo.toml | 2 +- wicket-dbg/Cargo.toml | 2 +- wicket/Cargo.toml | 2 +- wicketd-client/Cargo.toml | 2 +- wicketd/Cargo.toml | 2 +- 66 files changed, 76 insertions(+), 71 deletions(-) diff --git a/.config/hakari.toml b/.config/hakari.toml index 62f15df276..0d883dc6f6 100644 --- a/.config/hakari.toml +++ b/.config/hakari.toml @@ -6,6 +6,10 @@ hakari-package = "omicron-workspace-hack" # Format for `workspace-hack = ...` lines in other Cargo.tomls. Requires cargo-hakari 0.9.8 or above. dep-format-version = "4" +# Output lines as `omicron-workspace-hack.workspace = true`. Requires +# cargo-hakari 0.9.28 or above. +workspace-hack-line-style = "workspace-dotted" + # Setting workspace.resolver = "2" in the root Cargo.toml is HIGHLY recommended. # Hakari works much better with the new feature resolver. # For more about the new feature resolver, see: @@ -27,4 +31,3 @@ exact-versions = true [traversal-excludes] workspace-members = ["xtask"] - diff --git a/Cargo.toml b/Cargo.toml index e453c47244..fb610128ed 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -229,6 +229,7 @@ nexus-db-queries = { path = "nexus/db-queries" } nexus-defaults = { path = "nexus/defaults" } omicron-certificates = { path = "certificates" } omicron-passwords = { path = "passwords" } +omicron-workspace-hack = "0.1.0" nexus-test-interface = { path = "nexus/test-interface" } nexus-test-utils-macros = { path = "nexus/test-utils-macros" } nexus-test-utils = { path = "nexus/test-utils" } @@ -554,3 +555,10 @@ opt-level = 3 [patch.crates-io.pq-sys] git = 'https://github.com/oxidecomputer/pq-sys' branch = "oxide/omicron" + +# Using the workspace-hack via this patch directive means that it only applies +# while building within this workspace. If another workspace imports a crate +# from here via a git dependency, it will not have the workspace-hack applied +# to it. +[patch.crates-io.omicron-workspace-hack] +path = "workspace-hack" diff --git a/api_identity/Cargo.toml b/api_identity/Cargo.toml index 9faf2a1878..547defa7c5 100644 --- a/api_identity/Cargo.toml +++ b/api_identity/Cargo.toml @@ -14,4 +14,4 @@ proc-macro = true proc-macro2.workspace = true quote.workspace = true syn.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/bootstore/Cargo.toml b/bootstore/Cargo.toml index eefe05c8d6..18e3e3876b 100644 --- a/bootstore/Cargo.toml +++ b/bootstore/Cargo.toml @@ -36,7 +36,7 @@ zeroize.workspace = true # utils`. Unfortunately, it doesn't appear possible to put the `pq-sys` dep # only in `[dev-dependencies]`. pq-sys = "*" -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] assert_matches.workspace = true diff --git a/bootstrap-agent-client/Cargo.toml b/bootstrap-agent-client/Cargo.toml index 17989a5c5f..42ae59b7aa 100644 --- a/bootstrap-agent-client/Cargo.toml +++ b/bootstrap-agent-client/Cargo.toml @@ -17,4 +17,4 @@ serde.workspace = true sled-hardware.workspace = true slog.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/caboose-util/Cargo.toml b/caboose-util/Cargo.toml index 253d54643d..91bf00741e 100644 --- a/caboose-util/Cargo.toml +++ b/caboose-util/Cargo.toml @@ -7,4 +7,4 @@ license = "MPL-2.0" [dependencies] anyhow.workspace = true hubtools.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/certificates/Cargo.toml b/certificates/Cargo.toml index d20d257e4c..87b12fd167 100644 --- a/certificates/Cargo.toml +++ b/certificates/Cargo.toml @@ -12,7 +12,7 @@ openssl-sys.workspace = true thiserror.workspace = true omicron-common.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] omicron-test-utils.workspace = true diff --git a/common/Cargo.toml b/common/Cargo.toml index bda88d0d43..75c1efab55 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -40,7 +40,7 @@ toml.workspace = true uuid.workspace = true parse-display.workspace = true progenitor.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] camino-tempfile.workspace = true diff --git a/crdb-seed/Cargo.toml b/crdb-seed/Cargo.toml index fa71fe7e8a..8d6d570d08 100644 --- a/crdb-seed/Cargo.toml +++ b/crdb-seed/Cargo.toml @@ -13,4 +13,4 @@ omicron-test-utils.workspace = true ring.workspace = true slog.workspace = true tokio.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/ddm-admin-client/Cargo.toml b/ddm-admin-client/Cargo.toml index 3814446b3e..4d00f329e7 100644 --- a/ddm-admin-client/Cargo.toml +++ b/ddm-admin-client/Cargo.toml @@ -15,7 +15,7 @@ tokio.workspace = true omicron-common.workspace = true sled-hardware.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [build-dependencies] anyhow.workspace = true diff --git a/deploy/Cargo.toml b/deploy/Cargo.toml index 17bacd6354..1a6c05a546 100644 --- a/deploy/Cargo.toml +++ b/deploy/Cargo.toml @@ -14,7 +14,7 @@ serde.workspace = true serde_derive.workspace = true thiserror.workspace = true toml.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [[bin]] name = "thing-flinger" diff --git a/dev-tools/omdb/Cargo.toml b/dev-tools/omdb/Cargo.toml index 5a05e93db9..f865acff2b 100644 --- a/dev-tools/omdb/Cargo.toml +++ b/dev-tools/omdb/Cargo.toml @@ -32,8 +32,8 @@ tabled.workspace = true textwrap.workspace = true tokio = { workspace = true, features = [ "full" ] } uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } ipnetwork.workspace = true +omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/dev-tools/omicron-dev/Cargo.toml b/dev-tools/omicron-dev/Cargo.toml index 95da4d42ef..5439b69c76 100644 --- a/dev-tools/omicron-dev/Cargo.toml +++ b/dev-tools/omicron-dev/Cargo.toml @@ -28,7 +28,7 @@ signal-hook-tokio.workspace = true tokio = { workspace = true, features = [ "full" ] } tokio-postgres.workspace = true toml.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] camino-tempfile.workspace = true diff --git a/dev-tools/xtask/src/main.rs b/dev-tools/xtask/src/main.rs index 3e52d742f5..93d91799bc 100644 --- a/dev-tools/xtask/src/main.rs +++ b/dev-tools/xtask/src/main.rs @@ -133,12 +133,6 @@ fn cmd_check_workspace_deps() -> Result<()> { } } - if name == WORKSPACE_HACK_PACKAGE_NAME { - // Skip over workspace-hack because hakari doesn't yet support - // workspace deps: https://github.com/guppy-rs/guppy/issues/7 - continue; - } - non_workspace_dependencies .entry(name.to_owned()) .or_insert_with(Vec::new) diff --git a/dns-server/Cargo.toml b/dns-server/Cargo.toml index d7606dcff5..f91cbfafdb 100644 --- a/dns-server/Cargo.toml +++ b/dns-server/Cargo.toml @@ -30,7 +30,7 @@ trust-dns-proto.workspace = true trust-dns-resolver.workspace = true trust-dns-server.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/dns-service-client/Cargo.toml b/dns-service-client/Cargo.toml index e351d90da2..681c06672f 100644 --- a/dns-service-client/Cargo.toml +++ b/dns-service-client/Cargo.toml @@ -14,4 +14,4 @@ serde.workspace = true serde_json.workspace = true slog.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/dpd-client/Cargo.toml b/dpd-client/Cargo.toml index 26807f7d79..0239c6d9b0 100644 --- a/dpd-client/Cargo.toml +++ b/dpd-client/Cargo.toml @@ -17,7 +17,7 @@ ipnetwork.workspace = true http.workspace = true schemars.workspace = true rand.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [build-dependencies] anyhow.workspace = true diff --git a/end-to-end-tests/Cargo.toml b/end-to-end-tests/Cargo.toml index 5ff0f9b377..732a4a2091 100644 --- a/end-to-end-tests/Cargo.toml +++ b/end-to-end-tests/Cargo.toml @@ -24,4 +24,4 @@ tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } toml.workspace = true trust-dns-resolver.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/gateway-cli/Cargo.toml b/gateway-cli/Cargo.toml index 0d179750ea..ba66fa4c4f 100644 --- a/gateway-cli/Cargo.toml +++ b/gateway-cli/Cargo.toml @@ -24,4 +24,4 @@ uuid.workspace = true gateway-client.workspace = true gateway-messages.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/gateway-client/Cargo.toml b/gateway-client/Cargo.toml index 96a1eb221f..fc33174107 100644 --- a/gateway-client/Cargo.toml +++ b/gateway-client/Cargo.toml @@ -15,4 +15,4 @@ serde_json.workspace = true schemars.workspace = true slog.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/gateway-test-utils/Cargo.toml b/gateway-test-utils/Cargo.toml index 9d80e63f05..81b7686eb2 100644 --- a/gateway-test-utils/Cargo.toml +++ b/gateway-test-utils/Cargo.toml @@ -14,4 +14,4 @@ slog.workspace = true sp-sim.workspace = true tokio.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/gateway/Cargo.toml b/gateway/Cargo.toml index f5abce88e9..07934a6ad3 100644 --- a/gateway/Cargo.toml +++ b/gateway/Cargo.toml @@ -34,7 +34,7 @@ tokio-tungstenite.workspace = true tokio-util.workspace = true toml.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/illumos-utils/Cargo.toml b/illumos-utils/Cargo.toml index e292097bc5..e521b54d02 100644 --- a/illumos-utils/Cargo.toml +++ b/illumos-utils/Cargo.toml @@ -29,7 +29,7 @@ zone.workspace = true # only enabled via the `testing` feature mockall = { workspace = true, optional = true } -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [target.'cfg(target_os = "illumos")'.dependencies] opte-ioctl.workspace = true diff --git a/installinator-artifact-client/Cargo.toml b/installinator-artifact-client/Cargo.toml index 18447b8e83..c3ddc529d9 100644 --- a/installinator-artifact-client/Cargo.toml +++ b/installinator-artifact-client/Cargo.toml @@ -15,4 +15,4 @@ serde_json.workspace = true slog.workspace = true update-engine.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/installinator-artifactd/Cargo.toml b/installinator-artifactd/Cargo.toml index 9318b725db..b14ca4002f 100644 --- a/installinator-artifactd/Cargo.toml +++ b/installinator-artifactd/Cargo.toml @@ -20,7 +20,7 @@ uuid.workspace = true installinator-common.workspace = true omicron-common.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/installinator-common/Cargo.toml b/installinator-common/Cargo.toml index 0f1bf86901..8fea234e20 100644 --- a/installinator-common/Cargo.toml +++ b/installinator-common/Cargo.toml @@ -15,4 +15,4 @@ serde_json.workspace = true serde_with.workspace = true thiserror.workspace = true update-engine.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/installinator/Cargo.toml b/installinator/Cargo.toml index 428ea0d08e..a4f170ddba 100644 --- a/installinator/Cargo.toml +++ b/installinator/Cargo.toml @@ -42,7 +42,7 @@ toml.workspace = true tufaceous-lib.workspace = true update-engine.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] omicron-test-utils.workspace = true @@ -57,4 +57,4 @@ tokio-stream.workspace = true [features] image-standard = [] image-trampoline = [] -rack-topology-single-sled = [] \ No newline at end of file +rack-topology-single-sled = [] diff --git a/internal-dns-cli/Cargo.toml b/internal-dns-cli/Cargo.toml index fb5780d22a..dab92c6d7c 100644 --- a/internal-dns-cli/Cargo.toml +++ b/internal-dns-cli/Cargo.toml @@ -13,4 +13,4 @@ omicron-common.workspace = true slog.workspace = true tokio.workspace = true trust-dns-resolver.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/internal-dns/Cargo.toml b/internal-dns/Cargo.toml index d680ab3ce1..ecb2d48bda 100644 --- a/internal-dns/Cargo.toml +++ b/internal-dns/Cargo.toml @@ -17,7 +17,7 @@ thiserror.workspace = true trust-dns-proto.workspace = true trust-dns-resolver.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] assert_matches.workspace = true diff --git a/ipcc-key-value/Cargo.toml b/ipcc-key-value/Cargo.toml index 128fde9a01..04aea9f939 100644 --- a/ipcc-key-value/Cargo.toml +++ b/ipcc-key-value/Cargo.toml @@ -11,7 +11,7 @@ omicron-common.workspace = true serde.workspace = true thiserror.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] omicron-common = { workspace = true, features = ["testing"] } diff --git a/key-manager/Cargo.toml b/key-manager/Cargo.toml index 69ae3b25bd..c44ec61ea4 100644 --- a/key-manager/Cargo.toml +++ b/key-manager/Cargo.toml @@ -14,5 +14,5 @@ slog.workspace = true thiserror.workspace = true tokio.workspace = true zeroize.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/nexus-client/Cargo.toml b/nexus-client/Cargo.toml index d59c013992..2734142f9f 100644 --- a/nexus-client/Cargo.toml +++ b/nexus-client/Cargo.toml @@ -18,4 +18,4 @@ serde.workspace = true serde_json.workspace = true slog.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 91872e2c32..3de6dac7c0 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -90,7 +90,7 @@ oximeter.workspace = true oximeter-instruments = { workspace = true, features = ["http-instruments"] } oximeter-producer.workspace = true rustls = { workspace = true } -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] async-bb8-diesel.workspace = true diff --git a/nexus/authz-macros/Cargo.toml b/nexus/authz-macros/Cargo.toml index 3d55afa477..15f18cb9c8 100644 --- a/nexus/authz-macros/Cargo.toml +++ b/nexus/authz-macros/Cargo.toml @@ -14,4 +14,4 @@ quote.workspace = true serde.workspace = true serde_tokenstream.workspace = true syn.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/nexus/db-macros/Cargo.toml b/nexus/db-macros/Cargo.toml index ce206bb56e..053c381ac9 100644 --- a/nexus/db-macros/Cargo.toml +++ b/nexus/db-macros/Cargo.toml @@ -15,7 +15,7 @@ quote.workspace = true serde.workspace = true serde_tokenstream.workspace = true syn = { workspace = true, features = ["extra-traits"] } -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] rustfmt-wrapper.workspace = true diff --git a/nexus/db-model/Cargo.toml b/nexus/db-model/Cargo.toml index aedbb9168b..a5cb9a06be 100644 --- a/nexus/db-model/Cargo.toml +++ b/nexus/db-model/Cargo.toml @@ -36,7 +36,7 @@ nexus-defaults.workspace = true nexus-types.workspace = true omicron-passwords.workspace = true sled-agent-client.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/nexus/db-queries/Cargo.toml b/nexus/db-queries/Cargo.toml index af01c1732b..eaf3dc1295 100644 --- a/nexus/db-queries/Cargo.toml +++ b/nexus/db-queries/Cargo.toml @@ -63,7 +63,7 @@ nexus-types.workspace = true omicron-common.workspace = true omicron-passwords.workspace = true oximeter.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] assert_matches.workspace = true diff --git a/nexus/defaults/Cargo.toml b/nexus/defaults/Cargo.toml index 09a95fa839..0724b5bf4d 100644 --- a/nexus/defaults/Cargo.toml +++ b/nexus/defaults/Cargo.toml @@ -11,4 +11,4 @@ rand.workspace = true serde_json.workspace = true omicron-common.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/nexus/test-interface/Cargo.toml b/nexus/test-interface/Cargo.toml index e0743e84bc..0071ffaa28 100644 --- a/nexus/test-interface/Cargo.toml +++ b/nexus/test-interface/Cargo.toml @@ -12,4 +12,4 @@ nexus-types.workspace = true omicron-common.workspace = true slog.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/nexus/test-utils-macros/Cargo.toml b/nexus/test-utils-macros/Cargo.toml index 1bfa25017a..d3d28a7640 100644 --- a/nexus/test-utils-macros/Cargo.toml +++ b/nexus/test-utils-macros/Cargo.toml @@ -11,4 +11,4 @@ proc-macro = true proc-macro2.workspace = true quote.workspace = true syn = { workspace = true, features = [ "fold", "parsing" ] } -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/nexus/test-utils/Cargo.toml b/nexus/test-utils/Cargo.toml index a2e7600e93..8eb8df4a5b 100644 --- a/nexus/test-utils/Cargo.toml +++ b/nexus/test-utils/Cargo.toml @@ -38,4 +38,4 @@ tempfile.workspace = true trust-dns-proto.workspace = true trust-dns-resolver.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index f7ffafec52..c499714c31 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -25,4 +25,4 @@ api_identity.workspace = true dns-service-client.workspace = true omicron-common.workspace = true omicron-passwords.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/oxide-client/Cargo.toml b/oxide-client/Cargo.toml index df34ab9721..3cb411729d 100644 --- a/oxide-client/Cargo.toml +++ b/oxide-client/Cargo.toml @@ -21,4 +21,4 @@ thiserror.workspace = true tokio = { workspace = true, features = [ "net" ] } trust-dns-resolver.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/oximeter-client/Cargo.toml b/oximeter-client/Cargo.toml index 297dfb6c92..a8aa7de02c 100644 --- a/oximeter-client/Cargo.toml +++ b/oximeter-client/Cargo.toml @@ -12,4 +12,4 @@ reqwest = { workspace = true, features = ["json", "rustls-tls", "stream"] } serde.workspace = true slog.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/oximeter/collector/Cargo.toml b/oximeter/collector/Cargo.toml index c8c4030dba..bc8cc19634 100644 --- a/oximeter/collector/Cargo.toml +++ b/oximeter/collector/Cargo.toml @@ -22,7 +22,7 @@ thiserror.workspace = true tokio.workspace = true toml.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/oximeter/db/Cargo.toml b/oximeter/db/Cargo.toml index 77bce09db9..ad6d584b1b 100644 --- a/oximeter/db/Cargo.toml +++ b/oximeter/db/Cargo.toml @@ -25,7 +25,7 @@ thiserror.workspace = true tokio = { workspace = true, features = [ "rt-multi-thread", "macros" ] } usdt.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] itertools.workspace = true diff --git a/oximeter/instruments/Cargo.toml b/oximeter/instruments/Cargo.toml index 4adff0463a..3653ab8011 100644 --- a/oximeter/instruments/Cargo.toml +++ b/oximeter/instruments/Cargo.toml @@ -12,7 +12,7 @@ oximeter.workspace = true tokio.workspace = true http = { workspace = true, optional = true } uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true [features] default = ["http-instruments"] diff --git a/oximeter/oximeter-macro-impl/Cargo.toml b/oximeter/oximeter-macro-impl/Cargo.toml index ff116e1c9d..df9ed547ed 100644 --- a/oximeter/oximeter-macro-impl/Cargo.toml +++ b/oximeter/oximeter-macro-impl/Cargo.toml @@ -12,4 +12,4 @@ proc-macro = true proc-macro2.workspace = true quote.workspace = true syn = { workspace = true, features = [ "full", "extra-traits" ] } -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/oximeter/oximeter/Cargo.toml b/oximeter/oximeter/Cargo.toml index b2aa15f85e..7d01b8f8be 100644 --- a/oximeter/oximeter/Cargo.toml +++ b/oximeter/oximeter/Cargo.toml @@ -15,7 +15,7 @@ schemars = { workspace = true, features = [ "uuid1", "bytes", "chrono" ] } serde.workspace = true thiserror.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] approx.workspace = true diff --git a/oximeter/producer/Cargo.toml b/oximeter/producer/Cargo.toml index f171f57e8a..3f74ba753f 100644 --- a/oximeter/producer/Cargo.toml +++ b/oximeter/producer/Cargo.toml @@ -19,4 +19,4 @@ slog-dtrace.workspace = true tokio.workspace = true thiserror.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/package/Cargo.toml b/package/Cargo.toml index 9fc4610020..b840938db0 100644 --- a/package/Cargo.toml +++ b/package/Cargo.toml @@ -34,7 +34,7 @@ tokio = { workspace = true, features = [ "full" ] } toml.workspace = true topological-sort.workspace = true walkdir.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/passwords/Cargo.toml b/passwords/Cargo.toml index cbd569ef4c..8adcf75a2e 100644 --- a/passwords/Cargo.toml +++ b/passwords/Cargo.toml @@ -11,7 +11,7 @@ thiserror.workspace = true schemars.workspace = true serde.workspace = true serde_with.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] argon2alt = { package = "rust-argon2", version = "1.0" } diff --git a/rpaths/Cargo.toml b/rpaths/Cargo.toml index 7671be4968..45e6c9b925 100644 --- a/rpaths/Cargo.toml +++ b/rpaths/Cargo.toml @@ -5,4 +5,4 @@ edition = "2021" license = "MPL-2.0" [dependencies] -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/sled-agent-client/Cargo.toml b/sled-agent-client/Cargo.toml index 01c1032a51..b2ed07caba 100644 --- a/sled-agent-client/Cargo.toml +++ b/sled-agent-client/Cargo.toml @@ -15,4 +15,4 @@ reqwest = { workspace = true, features = [ "json", "rustls-tls", "stream" ] } serde.workspace = true slog.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/sled-agent/Cargo.toml b/sled-agent/Cargo.toml index d4ccfc97c8..82d7411d1a 100644 --- a/sled-agent/Cargo.toml +++ b/sled-agent/Cargo.toml @@ -76,7 +76,7 @@ uuid.workspace = true zeroize.workspace = true zone.workspace = true static_assertions.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [target.'cfg(target_os = "illumos")'.dependencies] opte-ioctl.workspace = true diff --git a/sled-hardware/Cargo.toml b/sled-hardware/Cargo.toml index 880f93441c..14ae15996b 100644 --- a/sled-hardware/Cargo.toml +++ b/sled-hardware/Cargo.toml @@ -24,7 +24,7 @@ thiserror.workspace = true tofino.workspace = true tokio.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [target.'cfg(target_os = "illumos")'.dependencies] illumos-devinfo = { git = "https://github.com/oxidecomputer/illumos-devinfo", branch = "main" } diff --git a/sp-sim/Cargo.toml b/sp-sim/Cargo.toml index 2a1ae19468..07d956e41e 100644 --- a/sp-sim/Cargo.toml +++ b/sp-sim/Cargo.toml @@ -21,7 +21,7 @@ sprockets-rot.workspace = true thiserror.workspace = true tokio = { workspace = true, features = [ "full" ] } toml.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [[bin]] name = "sp-sim" diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index a0227a4de2..9e21f3ca12 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -25,7 +25,7 @@ usdt.workspace = true rcgen.workspace = true regex.workspace = true reqwest.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/tufaceous-lib/Cargo.toml b/tufaceous-lib/Cargo.toml index 8b5c4fa7ca..bcfcee6b9c 100644 --- a/tufaceous-lib/Cargo.toml +++ b/tufaceous-lib/Cargo.toml @@ -32,7 +32,7 @@ toml.workspace = true tough.workspace = true url = "2.4.1" zip.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] omicron-test-utils.workspace = true diff --git a/tufaceous/Cargo.toml b/tufaceous/Cargo.toml index f3e3b815d2..e48513e24c 100644 --- a/tufaceous/Cargo.toml +++ b/tufaceous/Cargo.toml @@ -18,7 +18,7 @@ slog-async.workspace = true slog-envlogger.workspace = true slog-term.workspace = true tufaceous-lib.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] assert_cmd.workspace = true diff --git a/update-engine/Cargo.toml b/update-engine/Cargo.toml index 25ade83f34..af988bf091 100644 --- a/update-engine/Cargo.toml +++ b/update-engine/Cargo.toml @@ -21,7 +21,7 @@ schemars = { workspace = true, features = ["uuid1"] } slog.workspace = true tokio = { workspace = true, features = ["macros", "sync", "time", "rt-multi-thread"] } uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] buf-list.workspace = true diff --git a/wicket-common/Cargo.toml b/wicket-common/Cargo.toml index 229561cd38..b87e742133 100644 --- a/wicket-common/Cargo.toml +++ b/wicket-common/Cargo.toml @@ -13,4 +13,4 @@ serde.workspace = true serde_json.workspace = true thiserror.workspace = true update-engine.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/wicket-dbg/Cargo.toml b/wicket-dbg/Cargo.toml index bc22424c69..e7e8a58468 100644 --- a/wicket-dbg/Cargo.toml +++ b/wicket-dbg/Cargo.toml @@ -22,7 +22,7 @@ wicket.workspace = true # used only by wicket-dbg binary reedline = "0.23.0" -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [[bin]] name = "wicket-dbg" diff --git a/wicket/Cargo.toml b/wicket/Cargo.toml index 58605c8037..5392e72e9f 100644 --- a/wicket/Cargo.toml +++ b/wicket/Cargo.toml @@ -46,7 +46,7 @@ omicron-passwords.workspace = true update-engine.workspace = true wicket-common.workspace = true wicketd-client.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] assert_cmd.workspace = true diff --git a/wicketd-client/Cargo.toml b/wicketd-client/Cargo.toml index 2d959f1f8d..814309b975 100644 --- a/wicketd-client/Cargo.toml +++ b/wicketd-client/Cargo.toml @@ -18,4 +18,4 @@ slog.workspace = true update-engine.workspace = true uuid.workspace = true wicket-common.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/wicketd/Cargo.toml b/wicketd/Cargo.toml index 6df5e0e4e5..1044e1ff51 100644 --- a/wicketd/Cargo.toml +++ b/wicketd/Cargo.toml @@ -54,7 +54,7 @@ sled-hardware.workspace = true tufaceous-lib.workspace = true update-engine.workspace = true wicket-common.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [[bin]] name = "wicketd" From b0487d3777a87edf7cb7b3a11badf47a75931323 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 4 Oct 2023 17:08:49 -0500 Subject: [PATCH 17/35] Bump web console (#4204) The only functional change should be the form validation one. And one icon. But there are some tweaks to our build setup that I'd like to make sure we didn't mess up. https://github.com/oxidecomputer/console/compare/af6536d5...0cc1e03a * [0cc1e03a](https://github.com/oxidecomputer/console/commit/0cc1e03a) oxidecomputer/console#1770 * [48aea2f4](https://github.com/oxidecomputer/console/commit/48aea2f4) npm audit fix * [84aff1de](https://github.com/oxidecomputer/console/commit/84aff1de) oxidecomputer/console#1769 * [c127febd](https://github.com/oxidecomputer/console/commit/c127febd) oxidecomputer/console#1768 * [8c9513c1](https://github.com/oxidecomputer/console/commit/8c9513c1) oxidecomputer/console#1765 * [0314fd72](https://github.com/oxidecomputer/console/commit/0314fd72) oxidecomputer/console#1742 * [8918ffa9](https://github.com/oxidecomputer/console/commit/8918ffa9) skip the other flaky test in safari for now. I'm suffering * [b357246e](https://github.com/oxidecomputer/console/commit/b357246e) increase playwright total time to 20 minutes * [4f7d401d](https://github.com/oxidecomputer/console/commit/4f7d401d) be sneakier about PR numbers in commit messages in bump omicron PR --- tools/console_version | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/console_version b/tools/console_version index dba32c3e94..0c30c707e1 100644 --- a/tools/console_version +++ b/tools/console_version @@ -1,2 +1,2 @@ -COMMIT="af6536d587a17a65398407ca03d364345aa24342" -SHA2="00701652eb1e495fd22409dcdf74ebae2ba081529f65fb41c5ac3a2fef50a149" +COMMIT="0cc1e03a24b3f5da275d15b969978a385d6b3b27" +SHA2="46a186fc3bf919a3aa2871aeab8441e4a13ed134f912b5d76c7ff891fed66cee" From ce81dd12e5a69c50979f8c5049a17d84dbdc0a01 Mon Sep 17 00:00:00 2001 From: bnaecker Date: Wed, 4 Oct 2023 15:39:43 -0700 Subject: [PATCH 18/35] Adds functionality to run oximeter standalone (#4117) - Adds a "standalone" mode for the `oximeter-collector` crate, including the binary and main inner types. This runs in a slightly different mode, in which the ClickHouse database itself isn't strictly required. In this case, a task to simply print the results will be spawned in place of the normal results-sink task which inserts records into the database. - Creates a tiny fake Nexus server, which includes only the API needed to register collectors and producers. This is started automatically when running `oximeter standalone`, and used to assign producers / collectors as the real Nexus does, but without a database. The assignments are only in memory. - Adds internal `oximeter` API for listing / deleting a producer for each oximeter collector, and an `omdb` subcommand which exercises the listing. --- Cargo.lock | 12 + common/src/api/internal/nexus.rs | 2 +- dev-tools/omdb/Cargo.toml | 2 + dev-tools/omdb/src/bin/omdb/main.rs | 4 + dev-tools/omdb/src/bin/omdb/oximeter.rs | 94 ++++ dev-tools/omdb/tests/usage_errors.out | 2 + docs/how-to-run.adoc | 34 ++ openapi/oximeter.json | 130 +++++ oximeter-client/Cargo.toml | 1 + oximeter/collector/Cargo.toml | 7 + oximeter/collector/src/bin/oximeter.rs | 103 +++- oximeter/collector/src/lib.rs | 473 ++++++++++++++++-- oximeter/collector/src/standalone.rs | 263 ++++++++++ .../tests/output/cmd-oximeter-noargs-stderr | 8 +- oximeter/producer/Cargo.toml | 4 + oximeter/producer/examples/producer.rs | 45 +- oximeter/producer/src/lib.rs | 142 ++++-- 17 files changed, 1215 insertions(+), 111 deletions(-) create mode 100644 dev-tools/omdb/src/bin/omdb/oximeter.rs create mode 100644 oximeter/collector/src/standalone.rs diff --git a/Cargo.lock b/Cargo.lock index c6335cb32e..b931918b9c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5125,6 +5125,7 @@ dependencies = [ "diesel", "dropshot", "expectorate", + "futures", "humantime", "internal-dns 0.1.0", "ipnetwork", @@ -5139,6 +5140,7 @@ dependencies = [ "omicron-rpaths", "omicron-test-utils", "omicron-workspace-hack", + "oximeter-client", "pq-sys", "regex", "serde", @@ -5716,6 +5718,7 @@ name = "oximeter-client" version = "0.1.0" dependencies = [ "chrono", + "futures", "omicron-common 0.1.0", "omicron-workspace-hack", "progenitor", @@ -5729,24 +5732,31 @@ dependencies = [ name = "oximeter-collector" version = "0.1.0" dependencies = [ + "anyhow", "clap 4.4.3", "dropshot", "expectorate", "futures", "internal-dns 0.1.0", "nexus-client 0.1.0", + "nexus-types", "omicron-common 0.1.0", "omicron-test-utils", "omicron-workspace-hack", "openapi-lint", "openapiv3", "oximeter 0.1.0", + "oximeter-client", "oximeter-db", + "rand 0.8.5", "reqwest", + "schemars", "serde", "serde_json", "slog", + "slog-async", "slog-dtrace", + "slog-term", "subprocess", "thiserror", "tokio", @@ -5821,7 +5831,9 @@ dependencies = [ name = "oximeter-producer" version = "0.1.0" dependencies = [ + "anyhow", "chrono", + "clap 4.4.3", "dropshot", "nexus-client 0.1.0", "omicron-common 0.1.0", diff --git a/common/src/api/internal/nexus.rs b/common/src/api/internal/nexus.rs index 018869ce14..983976bbb7 100644 --- a/common/src/api/internal/nexus.rs +++ b/common/src/api/internal/nexus.rs @@ -67,7 +67,7 @@ pub struct InstanceRuntimeState { /// Information announced by a metric server, used so that clients can contact it and collect /// available metric data from it. -#[derive(Debug, Clone, JsonSchema, Serialize, Deserialize)] +#[derive(Clone, Debug, Deserialize, JsonSchema, Serialize, PartialEq)] pub struct ProducerEndpoint { pub id: Uuid, pub address: SocketAddr, diff --git a/dev-tools/omdb/Cargo.toml b/dev-tools/omdb/Cargo.toml index f865acff2b..cd4af6e947 100644 --- a/dev-tools/omdb/Cargo.toml +++ b/dev-tools/omdb/Cargo.toml @@ -16,11 +16,13 @@ diesel.workspace = true dropshot.workspace = true humantime.workspace = true internal-dns.workspace = true +futures.workspace = true nexus-client.workspace = true nexus-db-model.workspace = true nexus-db-queries.workspace = true nexus-types.workspace = true omicron-common.workspace = true +oximeter-client.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. pq-sys = "*" serde.workspace = true diff --git a/dev-tools/omdb/src/bin/omdb/main.rs b/dev-tools/omdb/src/bin/omdb/main.rs index 166ed3043f..d1a56e1d80 100644 --- a/dev-tools/omdb/src/bin/omdb/main.rs +++ b/dev-tools/omdb/src/bin/omdb/main.rs @@ -42,6 +42,7 @@ use std::net::SocketAddrV6; mod db; mod nexus; +mod oximeter; mod sled_agent; #[tokio::main] @@ -57,6 +58,7 @@ async fn main() -> Result<(), anyhow::Error> { match &args.command { OmdbCommands::Db(db) => db.run_cmd(&args, &log).await, OmdbCommands::Nexus(nexus) => nexus.run_cmd(&args, &log).await, + OmdbCommands::Oximeter(oximeter) => oximeter.run_cmd(&log).await, OmdbCommands::SledAgent(sled) => sled.run_cmd(&args, &log).await, } } @@ -155,6 +157,8 @@ enum OmdbCommands { Db(db::DbArgs), /// Debug a specific Nexus instance Nexus(nexus::NexusArgs), + /// Query oximeter collector state + Oximeter(oximeter::OximeterArgs), /// Debug a specific Sled SledAgent(sled_agent::SledAgentArgs), } diff --git a/dev-tools/omdb/src/bin/omdb/oximeter.rs b/dev-tools/omdb/src/bin/omdb/oximeter.rs new file mode 100644 index 0000000000..e0f20556a2 --- /dev/null +++ b/dev-tools/omdb/src/bin/omdb/oximeter.rs @@ -0,0 +1,94 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! omdb commands that query oximeter + +use anyhow::Context; +use clap::Args; +use clap::Subcommand; +use futures::TryStreamExt; +use oximeter_client::types::ProducerEndpoint; +use oximeter_client::Client; +use slog::Logger; +use std::net::SocketAddr; +use std::time::Duration; +use tabled::Table; +use tabled::Tabled; +use uuid::Uuid; + +#[derive(Debug, Args)] +pub struct OximeterArgs { + /// URL of the oximeter collector to query + #[arg(long, env("OMDB_OXIMETER_URL"))] + oximeter_url: String, + + #[command(subcommand)] + command: OximeterCommands, +} + +/// Subcommands that query oximeter collector state +#[derive(Debug, Subcommand)] +enum OximeterCommands { + /// List the producers the collector is assigned to poll + ListProducers, +} + +impl OximeterArgs { + fn client(&self, log: &Logger) -> Client { + Client::new( + &self.oximeter_url, + log.new(slog::o!("component" => "oximeter-client")), + ) + } + + pub async fn run_cmd(&self, log: &Logger) -> anyhow::Result<()> { + let client = self.client(log); + match self.command { + OximeterCommands::ListProducers => { + self.list_producers(client).await + } + } + } + + async fn list_producers(&self, client: Client) -> anyhow::Result<()> { + let info = client + .collector_info() + .await + .context("failed to fetch collector info")?; + let producers: Vec = client + .producers_list_stream(None) + .map_ok(Producer::from) + .try_collect() + .await + .context("failed to list producers")?; + let table = Table::new(producers) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + println!("Collector ID: {}\n", info.id); + println!("{table}"); + Ok(()) + } +} + +#[derive(Tabled)] +#[tabled(rename_all = "SCREAMING_SNAKE_CASE")] +struct Producer { + id: Uuid, + address: SocketAddr, + base_route: String, + interval: String, +} + +impl From for Producer { + fn from(p: ProducerEndpoint) -> Self { + let interval = Duration::new(p.interval.secs, p.interval.nanos); + Self { + id: p.id, + address: p.address.parse().unwrap(), + base_route: p.base_route, + interval: humantime::format_duration(interval).to_string(), + } + } +} diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index b5421b76af..dc2a16bc47 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -11,6 +11,7 @@ Usage: omdb [OPTIONS] Commands: db Query the control plane database (CockroachDB) nexus Debug a specific Nexus instance + oximeter Query oximeter collector state sled-agent Debug a specific Sled help Print this message or the help of the given subcommand(s) @@ -33,6 +34,7 @@ Usage: omdb [OPTIONS] Commands: db Query the control plane database (CockroachDB) nexus Debug a specific Nexus instance + oximeter Query oximeter collector state sled-agent Debug a specific Sled help Print this message or the help of the given subcommand(s) diff --git a/docs/how-to-run.adoc b/docs/how-to-run.adoc index 7539c5183f..aa1ee3c73d 100644 --- a/docs/how-to-run.adoc +++ b/docs/how-to-run.adoc @@ -697,3 +697,37 @@ To build a recovery host image: ---- $ ./tools/build-host-image.sh -R $HELIOS_PATH /work/trampoline-global-zone-packages.tar.gz ---- + + +== Running `oximeter` in standalone mode + +`oximeter` is the program used to collect metrics from producers in the control +plane. Normally, the producers register themselves with `nexus`, which creates a +durable assignment between the producer and an `oximeter` collector in the +database. That allows components to survive restarts, while still producing +metrics. + +To ease development, `oximeter` can be run in "standalone" mode. In this case, a +mock `nexus` server is started, with only the minimal subset of the internal API +needed to register producers and collectors. Neither CockroachDB nor ClickHouse +is required, although ClickHouse _can_ be used, if one wants to see how data is +inserted into the database. + +To run `oximeter` in standalone, use: + +[source,console] +---- +$ cargo run --bin oximeter -- standalone +---- + +The producer should still register with `nexus` as normal, which is usually done +with an explicit IP address and port. This defaults to `[::1]:12221`. + +When run this way, `oximeter` will print the samples it collects from the +producers to its logs, like so: + +[source,console] +---- +Sep 26 17:48:56.006 INFO sample: Sample { measurement: Measurement { timestamp: 2023-09-26T17:48:56.004565890Z, datum: CumulativeF64(Cumulative { start_time: 2023-09-26T17:48:45.997404777Z, value: 10.007154703 }) }, timeseries_name: "virtual_machine:cpu_busy", target: FieldSet { name: "virtual_machine", fields: {"instance_id": Field { name: "instance_id", value: Uuid(564ef6df-d5f6-4204-88f7-5c615859cfa7) }, "project_id": Field { name: "project_id", value: Uuid(2dc7e1c9-f8ac-49d7-8292-46e9e2b1a61d) }} }, metric: FieldSet { name: "cpu_busy", fields: {"cpu_id": Field { name: "cpu_id", value: I64(0) }} } }, component: results-sink, collector_id: 78c7c9a5-1569-460a-8899-aada9ad5db6c, component: oximeter-standalone, component: nexus-standalone, file: oximeter/collector/src/lib.rs:280 +Sep 26 17:48:56.006 INFO sample: Sample { measurement: Measurement { timestamp: 2023-09-26T17:48:56.004700841Z, datum: CumulativeF64(Cumulative { start_time: 2023-09-26T17:48:45.997405187Z, value: 10.007154703 }) }, timeseries_name: "virtual_machine:cpu_busy", target: FieldSet { name: "virtual_machine", fields: {"instance_id": Field { name: "instance_id", value: Uuid(564ef6df-d5f6-4204-88f7-5c615859cfa7) }, "project_id": Field { name: "project_id", value: Uuid(2dc7e1c9-f8ac-49d7-8292-46e9e2b1a61d) }} }, metric: FieldSet { name: "cpu_busy", fields: {"cpu_id": Field { name: "cpu_id", value: I64(1) }} } }, component: results-sink, collector_id: 78c7c9a5-1569-460a-8899-aada9ad5db6c, component: oximeter-standalone, component: nexus-standalone, file: oximeter/collector/src/lib.rs:280 +---- diff --git a/openapi/oximeter.json b/openapi/oximeter.json index 6781b77892..ebc7957c2e 100644 --- a/openapi/oximeter.json +++ b/openapi/oximeter.json @@ -10,7 +10,76 @@ "version": "0.0.1" }, "paths": { + "/info": { + "get": { + "operationId": "collector_info", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CollectorInfo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/producers": { + "get": { + "operationId": "producers_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProducerEndpointResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, "post": { "operationId": "producers_post", "requestBody": { @@ -35,6 +104,33 @@ } } } + }, + "/producers/{producer_id}": { + "delete": { + "operationId": "producer_delete", + "parameters": [ + { + "in": "path", + "name": "producer_id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } } }, "components": { @@ -51,6 +147,19 @@ } }, "schemas": { + "CollectorInfo": { + "type": "object", + "properties": { + "id": { + "description": "The collector's UUID.", + "type": "string", + "format": "uuid" + } + }, + "required": [ + "id" + ] + }, "Duration": { "type": "object", "properties": { @@ -113,6 +222,27 @@ "id", "interval" ] + }, + "ProducerEndpointResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/ProducerEndpoint" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] } } } diff --git a/oximeter-client/Cargo.toml b/oximeter-client/Cargo.toml index a8aa7de02c..e54b152415 100644 --- a/oximeter-client/Cargo.toml +++ b/oximeter-client/Cargo.toml @@ -6,6 +6,7 @@ license = "MPL-2.0" [dependencies] chrono.workspace = true +futures.workspace = true omicron-common.workspace = true progenitor.workspace = true reqwest = { workspace = true, features = ["json", "rustls-tls", "stream"] } diff --git a/oximeter/collector/Cargo.toml b/oximeter/collector/Cargo.toml index bc8cc19634..470d9db312 100644 --- a/oximeter/collector/Cargo.toml +++ b/oximeter/collector/Cargo.toml @@ -6,18 +6,25 @@ description = "The oximeter metric collection server" license = "MPL-2.0" [dependencies] +anyhow.workspace = true clap.workspace = true dropshot.workspace = true futures.workspace = true internal-dns.workspace = true nexus-client.workspace = true +nexus-types.workspace = true omicron-common.workspace = true oximeter.workspace = true +oximeter-client.workspace = true oximeter-db.workspace = true +rand.workspace = true reqwest = { workspace = true, features = [ "json" ] } +schemars.workspace = true serde.workspace = true slog.workspace = true +slog-async.workspace = true slog-dtrace.workspace = true +slog-term.workspace = true thiserror.workspace = true tokio.workspace = true toml.workspace = true diff --git a/oximeter/collector/src/bin/oximeter.rs b/oximeter/collector/src/bin/oximeter.rs index bf54cf33fa..8c4bf0e27c 100644 --- a/oximeter/collector/src/bin/oximeter.rs +++ b/oximeter/collector/src/bin/oximeter.rs @@ -3,12 +3,21 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. //! Main entry point to run an `oximeter` server in the control plane. -// Copyright 2021 Oxide Computer Company + +// Copyright 2023 Oxide Computer Company use clap::Parser; use omicron_common::cmd::fatal; use omicron_common::cmd::CmdError; -use oximeter_collector::{oximeter_api, Config, Oximeter, OximeterArguments}; +use oximeter_collector::oximeter_api; +use oximeter_collector::standalone_nexus_api; +use oximeter_collector::Config; +use oximeter_collector::Oximeter; +use oximeter_collector::OximeterArguments; +use oximeter_collector::StandaloneNexus; +use slog::Level; +use std::net::Ipv6Addr; +use std::net::SocketAddr; use std::net::SocketAddrV6; use std::path::PathBuf; use uuid::Uuid; @@ -23,6 +32,16 @@ pub fn run_openapi() -> Result<(), String> { .map_err(|e| e.to_string()) } +pub fn run_standalone_openapi() -> Result<(), String> { + standalone_nexus_api() + .openapi("Oxide Nexus API", "0.0.1") + .description("API for interacting with Nexus") + .contact_url("https://oxide.computer") + .contact_email("api@oxide.computer") + .write(&mut std::io::stdout()) + .map_err(|e| e.to_string()) +} + /// Run an oximeter metric collection server in the Oxide Control Plane. #[derive(Parser)] #[clap(name = "oximeter", about = "See README.adoc for more information")] @@ -36,12 +55,71 @@ enum Args { #[clap(name = "CONFIG_FILE", action)] config_file: PathBuf, + /// The UUID for this instance of the `oximeter` collector. #[clap(short, long, action)] id: Uuid, + /// The socket address at which `oximeter`'s HTTP server runs. #[clap(short, long, action)] address: SocketAddrV6, }, + + /// Run `oximeter` in standalone mode for development. + /// + /// In this mode, `oximeter` can be used to test the collection of metrics + /// from producers, without requiring all the normal machinery of the + /// control plane. The collector is run as usual, but additionally starts a + /// API server to stand-in for Nexus. The registrations of the producers and + /// collectors occurs through the normal code path, but uses this mock Nexus + /// instead of the real thing. + Standalone { + /// The ID for the collector. + /// + /// Default is to generate a new, random UUID. + #[arg(long, default_value_t = Uuid::new_v4())] + id: Uuid, + + /// Address at which `oximeter` itself listens. + /// + /// This address can be used to register new producers, after the + /// program has already started. + #[arg( + long, + default_value_t = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 12223, 0, 0) + )] + address: SocketAddrV6, + + /// The address for the mock Nexus server used to register. + /// + /// This program starts a mock version of Nexus, which is used only to + /// register the producers and collectors. This allows them to operate + /// as they usually would, registering each other with Nexus so that an + /// assignment between them can be made. + #[arg( + long, + default_value_t = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 12221, 0, 0) + )] + nexus: SocketAddrV6, + + /// The address for ClickHouse. + /// + /// If not provided, `oximeter` will not attempt to insert records into + /// the database at all. In this mode, the program will print the + /// collected samples, instead of inserting them into the database. + #[arg(long)] + clickhouse: Option, + + /// The log-level. + #[arg(long, default_value_t = Level::Info, value_parser = parse_log_level)] + log_level: Level, + }, + + /// Print the fake Nexus's standalone API. + StandaloneOpenapi, +} + +fn parse_log_level(s: &str) -> Result { + s.parse().map_err(|_| "Invalid log level".to_string()) } #[tokio::main] @@ -65,5 +143,26 @@ async fn do_run() -> Result<(), CmdError> { .await .map_err(|e| CmdError::Failure(e.to_string())) } + Args::Standalone { id, address, nexus, clickhouse, log_level } => { + // Start the standalone Nexus server, for registration of both the + // collector and producers. + let nexus_server = StandaloneNexus::new(nexus.into(), log_level) + .map_err(|e| CmdError::Failure(e.to_string()))?; + let args = OximeterArguments { id, address }; + Oximeter::new_standalone( + nexus_server.log(), + &args, + nexus_server.local_addr(), + clickhouse, + ) + .await + .unwrap() + .serve_forever() + .await + .map_err(|e| CmdError::Failure(e.to_string())) + } + Args::StandaloneOpenapi => { + run_standalone_openapi().map_err(CmdError::Failure) + } } } diff --git a/oximeter/collector/src/lib.rs b/oximeter/collector/src/lib.rs index bf75b567ea..6674d65ecd 100644 --- a/oximeter/collector/src/lib.rs +++ b/oximeter/collector/src/lib.rs @@ -4,35 +4,71 @@ //! Implementation of the `oximeter` metric collection server. -// Copyright 2021 Oxide Computer Company - -use dropshot::{ - endpoint, ApiDescription, ConfigDropshot, ConfigLogging, HttpError, - HttpResponseUpdatedNoContent, HttpServer, HttpServerStarter, - RequestContext, TypedBody, -}; -use internal_dns::resolver::{ResolveError, Resolver}; +// Copyright 2023 Oxide Computer Company + +use anyhow::anyhow; +use anyhow::Context; +use dropshot::endpoint; +use dropshot::ApiDescription; +use dropshot::ConfigDropshot; +use dropshot::ConfigLogging; +use dropshot::EmptyScanParams; +use dropshot::HttpError; +use dropshot::HttpResponseDeleted; +use dropshot::HttpResponseOk; +use dropshot::HttpResponseUpdatedNoContent; +use dropshot::HttpServer; +use dropshot::HttpServerStarter; +use dropshot::PaginationParams; +use dropshot::Query; +use dropshot::RequestContext; +use dropshot::ResultsPage; +use dropshot::TypedBody; +use dropshot::WhichPage; +use internal_dns::resolver::ResolveError; +use internal_dns::resolver::Resolver; use internal_dns::ServiceName; -use omicron_common::address::{CLICKHOUSE_PORT, NEXUS_INTERNAL_PORT}; +use omicron_common::address::CLICKHOUSE_PORT; +use omicron_common::address::NEXUS_INTERNAL_PORT; use omicron_common::api::internal::nexus::ProducerEndpoint; -use omicron_common::{backoff, FileKv}; -use oximeter::types::{ProducerResults, ProducerResultsItem}; -use oximeter_db::{Client, DbWrite}; -use serde::{Deserialize, Serialize}; -use slog::{debug, error, info, o, trace, warn, Drain, Logger}; -use std::collections::{btree_map::Entry, BTreeMap}; -use std::net::{SocketAddr, SocketAddrV6}; +use omicron_common::backoff; +use omicron_common::FileKv; +use oximeter::types::ProducerResults; +use oximeter::types::ProducerResultsItem; +use oximeter_db::Client; +use oximeter_db::DbWrite; +use serde::Deserialize; +use serde::Serialize; +use slog::debug; +use slog::error; +use slog::info; +use slog::o; +use slog::trace; +use slog::warn; +use slog::Drain; +use slog::Logger; +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; +use std::net::SocketAddr; +use std::net::SocketAddrV6; +use std::ops::Bound; use std::path::Path; use std::sync::Arc; use std::time::Duration; use thiserror::Error; -use tokio::{ - sync::mpsc, sync::oneshot, sync::Mutex, task::JoinHandle, time::interval, -}; +use tokio::sync::mpsc; +use tokio::sync::oneshot; +use tokio::sync::Mutex; +use tokio::task::JoinHandle; +use tokio::time::interval; use uuid::Uuid; +mod standalone; +pub use standalone::standalone_nexus_api; +pub use standalone::Server as StandaloneNexus; + /// Errors collecting metric data -#[derive(Debug, Clone, Error)] +#[derive(Debug, Error)] pub enum Error { #[error("Error running Oximeter collector server: {0}")] Server(String), @@ -45,6 +81,48 @@ pub enum Error { #[error(transparent)] ResolveError(#[from] ResolveError), + + #[error("No producer is registered with ID")] + NoSuchProducer(Uuid), + + #[error("Error running standalone")] + Standalone(#[from] anyhow::Error), +} + +impl From for HttpError { + fn from(e: Error) -> Self { + match e { + Error::NoSuchProducer(id) => HttpError::for_not_found( + None, + format!("No such producer: {id}"), + ), + _ => HttpError::for_internal_error(e.to_string()), + } + } +} + +/// A simple representation of a producer, used mostly for standalone mode. +/// +/// These are usually specified as a structured string, formatted like: +/// `"@
"`. +#[derive(Copy, Clone, Debug)] +pub struct ProducerInfo { + /// The ID of the producer. + pub id: Uuid, + /// The address on which the producer listens. + pub address: SocketAddr, +} + +impl std::str::FromStr for ProducerInfo { + type Err = anyhow::Error; + fn from_str(s: &str) -> Result { + let (id, addr) = s + .split_once('@') + .context("Producer info should written as @
")?; + let id = id.parse().context("Invalid UUID")?; + let address = addr.parse().context("Invalid address")?; + Ok(Self { id, address }) + } } type CollectionToken = oneshot::Sender<()>; @@ -61,7 +139,6 @@ enum CollectionMessage { // from its producer. Update(ProducerEndpoint), // Request that the task exit - #[allow(dead_code)] Shutdown, } @@ -72,7 +149,7 @@ async fn perform_collection( outbox: &mpsc::Sender<(Option, ProducerResults)>, token: Option, ) { - info!(log, "collecting from producer"); + debug!(log, "collecting from producer"); let res = client .get(format!( "http://{}{}", @@ -187,6 +264,44 @@ struct CollectionTask { pub task: JoinHandle<()>, } +// A task run by `oximeter` in standalone mode, which simply prints results as +// they're received. +async fn results_printer( + log: Logger, + mut rx: mpsc::Receiver<(Option, ProducerResults)>, +) { + loop { + match rx.recv().await { + Some((_, results)) => { + for res in results.into_iter() { + match res { + ProducerResultsItem::Ok(samples) => { + for sample in samples.into_iter() { + info!( + log, + ""; + "sample" => ?sample, + ); + } + } + ProducerResultsItem::Err(e) => { + error!( + log, + "received error from a producer"; + "err" => ?e, + ); + } + } + } + } + None => { + debug!(log, "result queue closed, exiting"); + return; + } + } + } +} + // Aggregation point for all results, from all collection tasks. async fn results_sink( log: Logger, @@ -286,6 +401,20 @@ pub struct DbConfig { pub batch_interval: u64, } +impl DbConfig { + pub const DEFAULT_BATCH_SIZE: usize = 1000; + pub const DEFAULT_BATCH_INTERVAL: u64 = 5; + + // Construct config with an address, using the defaults for other fields + fn with_address(address: SocketAddr) -> Self { + Self { + address: Some(address), + batch_size: Self::DEFAULT_BATCH_SIZE, + batch_interval: Self::DEFAULT_BATCH_INTERVAL, + } + } +} + /// The internal agent the oximeter server uses to collect metrics from producers. #[derive(Debug)] pub struct OximeterAgent { @@ -295,7 +424,8 @@ pub struct OximeterAgent { // Handle to the TX-side of a channel for collecting results from the collection tasks result_sender: mpsc::Sender<(Option, ProducerResults)>, // The actual tokio tasks running the collection on a timer. - collection_tasks: Arc>>, + collection_tasks: + Arc>>, } impl OximeterAgent { @@ -307,7 +437,10 @@ impl OximeterAgent { log: &Logger, ) -> Result { let (result_sender, result_receiver) = mpsc::channel(8); - let log = log.new(o!("component" => "oximeter-agent", "collector_id" => id.to_string())); + let log = log.new(o!( + "component" => "oximeter-agent", + "collector_id" => id.to_string(), + )); let insertion_log = log.new(o!("component" => "results-sink")); // Construct the ClickHouse client first, propagate an error if we can't reach the @@ -347,6 +480,61 @@ impl OximeterAgent { }) } + /// Construct a new standalone `oximeter` collector. + pub async fn new_standalone( + id: Uuid, + db_config: Option, + log: &Logger, + ) -> Result { + let (result_sender, result_receiver) = mpsc::channel(8); + let log = log.new(o!( + "component" => "oximeter-standalone", + "collector_id" => id.to_string(), + )); + + // If we have configuration for ClickHouse, we'll spawn the results + // sink task as usual. If not, we'll spawn a dummy task that simply + // prints the results as they're received. + let insertion_log = log.new(o!("component" => "results-sink")); + if let Some(db_config) = db_config { + let Some(address) = db_config.address else { + return Err(Error::Standalone(anyhow!( + "Must provide explicit IP address in standalone mode" + ))); + }; + let client = Client::new(address, &log); + let replicated = client.is_oximeter_cluster().await?; + if !replicated { + client.init_single_node_db().await?; + } else { + client.init_replicated_db().await?; + } + + // Spawn the task for aggregating and inserting all metrics + tokio::spawn(async move { + results_sink( + insertion_log, + client, + db_config.batch_size, + Duration::from_secs(db_config.batch_interval), + result_receiver, + ) + .await + }); + } else { + tokio::spawn(results_printer(insertion_log, result_receiver)); + } + + // Construct the ClickHouse client first, propagate an error if we can't reach the + // database. + Ok(Self { + id, + log, + result_sender, + collection_tasks: Arc::new(Mutex::new(BTreeMap::new())), + }) + } + /// Register a new producer with this oximeter instance. pub async fn register_producer( &self, @@ -355,30 +543,36 @@ impl OximeterAgent { let id = info.id; match self.collection_tasks.lock().await.entry(id) { Entry::Vacant(value) => { - info!(self.log, "registered new metric producer"; - "producer_id" => id.to_string(), - "address" => info.address, + debug!( + self.log, + "registered new metric producer"; + "producer_id" => id.to_string(), + "address" => info.address, ); // Build channel to control the task and receive results. let (tx, rx) = mpsc::channel(4); let q = self.result_sender.clone(); let log = self.log.new(o!("component" => "collection-task", "producer_id" => id.to_string())); + let info_clone = info.clone(); let task = tokio::spawn(async move { - collection_task(log, info, rx, q).await; + collection_task(log, info_clone, rx, q).await; }); - value.insert(CollectionTask { inbox: tx, task }); + value.insert((info, CollectionTask { inbox: tx, task })); } - Entry::Occupied(value) => { - info!( + Entry::Occupied(mut value) => { + debug!( self.log, - "received request to register existing metric producer, updating collection information"; + "received request to register existing metric \ + producer, updating collection information"; "producer_id" => id.to_string(), "interval" => ?info.interval, "address" => info.address, ); + value.get_mut().0 = info.clone(); value .get() + .1 .inbox .send(CollectionMessage::Update(info)) .await @@ -395,10 +589,10 @@ impl OximeterAgent { pub async fn force_collection(&self) { let mut collection_oneshots = vec![]; let collection_tasks = self.collection_tasks.lock().await; - for task in collection_tasks.iter() { + for (_id, (_endpoint, task)) in collection_tasks.iter() { let (tx, rx) = oneshot::channel(); // Scrape from each producer, into oximeter... - task.1.inbox.send(CollectionMessage::Collect(tx)).await.unwrap(); + task.inbox.send(CollectionMessage::Collect(tx)).await.unwrap(); // ... and keep track of the token that indicates once the metric // has made it into Clickhouse. collection_oneshots.push(rx); @@ -412,6 +606,55 @@ impl OximeterAgent { // successfully, or an error occurred in the collection pathway. futures::future::join_all(collection_oneshots).await; } + + /// List existing producers. + pub async fn list_producers( + &self, + start_id: Option, + limit: usize, + ) -> Vec { + let start = if let Some(id) = start_id { + Bound::Excluded(id) + } else { + Bound::Unbounded + }; + self.collection_tasks + .lock() + .await + .range((start, Bound::Unbounded)) + .take(limit) + .map(|(_id, (info, _t))| info.clone()) + .collect() + } + + /// Delete a producer by ID, stopping its collection task. + pub async fn delete_producer(&self, id: Uuid) -> Result<(), Error> { + let (_info, task) = self + .collection_tasks + .lock() + .await + .remove(&id) + .ok_or_else(|| Error::NoSuchProducer(id))?; + debug!( + self.log, + "removed collection task from set"; + "producer_id" => %id, + ); + match task.inbox.send(CollectionMessage::Shutdown).await { + Ok(_) => debug!( + self.log, + "shut down collection task"; + "producer_id" => %id, + ), + Err(e) => error!( + self.log, + "failed to shut down collection task"; + "producer_id" => %id, + "error" => ?e, + ), + } + Ok(()) + } } /// Configuration used to initialize an oximeter server @@ -440,6 +683,7 @@ impl Config { } } +/// Arguments for running the `oximeter` collector. pub struct OximeterArguments { pub id: Uuid, pub address: SocketAddrV6, @@ -447,7 +691,7 @@ pub struct OximeterArguments { /// A server used to collect metrics from components in the control plane. pub struct Oximeter { - _agent: Arc, + agent: Arc, server: HttpServer>, } @@ -572,7 +816,67 @@ impl Oximeter { .expect("Expected an infinite retry loop contacting Nexus"); info!(log, "oximeter registered with nexus"; "id" => ?agent.id); - Ok(Self { _agent: agent, server }) + Ok(Self { agent, server }) + } + + /// Create a new `oximeter` collector running in standalone mode. + pub async fn new_standalone( + log: &Logger, + args: &OximeterArguments, + nexus: SocketAddr, + clickhouse: Option, + ) -> Result { + let db_config = clickhouse.map(DbConfig::with_address); + let agent = Arc::new( + OximeterAgent::new_standalone(args.id, db_config, &log).await?, + ); + + let dropshot_log = log.new(o!("component" => "dropshot")); + let server = HttpServerStarter::new( + &ConfigDropshot { + bind_address: SocketAddr::V6(args.address), + ..Default::default() + }, + oximeter_api(), + Arc::clone(&agent), + &dropshot_log, + ) + .map_err(|e| Error::Server(e.to_string()))? + .start(); + info!(log, "started oximeter standalone server"); + + // Notify the standalone nexus. + let client = reqwest::Client::new(); + let notify_nexus = || async { + debug!(log, "contacting nexus"); + client + .post(format!("http://{}/metrics/collectors", nexus)) + .json(&nexus_client::types::OximeterInfo { + address: server.local_addr().to_string(), + collector_id: agent.id, + }) + .send() + .await + .map_err(|e| backoff::BackoffError::transient(e.to_string()))? + .error_for_status() + .map_err(|e| backoff::BackoffError::transient(e.to_string())) + }; + let log_notification_failure = |error, delay| { + warn!( + log, + "failed to contact nexus, will retry in {:?}", delay; + "error" => ?error + ); + }; + backoff::retry_notify( + backoff::retry_policy_internal_service(), + notify_nexus, + log_notification_failure, + ) + .await + .expect("Expected an infinite retry loop contacting Nexus"); + + Ok(Self { agent, server }) } /// Serve requests forever, consuming the server. @@ -592,6 +896,20 @@ impl Oximeter { pub async fn force_collect(&self) { self.server.app_private().force_collection().await } + + /// List producers. + pub async fn list_producers( + &self, + start: Option, + limit: usize, + ) -> Vec { + self.agent.list_producers(start, limit).await + } + + /// Delete a producer by ID, stopping its collection task. + pub async fn delete_producer(&self, id: Uuid) -> Result<(), Error> { + self.agent.delete_producer(id).await + } } // Build the HTTP API internal to the control plane @@ -599,6 +917,12 @@ pub fn oximeter_api() -> ApiDescription> { let mut api = ApiDescription::new(); api.register(producers_post) .expect("Could not register producers_post API handler"); + api.register(producers_list) + .expect("Could not register producers_list API handler"); + api.register(producer_delete) + .expect("Could not register producers_delete API handler"); + api.register(collector_info) + .expect("Could not register collector_info API handler"); api } @@ -616,6 +940,79 @@ async fn producers_post( agent .register_producer(producer_info) .await - .map_err(|e| HttpError::for_internal_error(e.to_string()))?; - Ok(HttpResponseUpdatedNoContent()) + .map_err(HttpError::from) + .map(|_| HttpResponseUpdatedNoContent()) +} + +// Parameters for paginating the list of producers. +#[derive(Clone, Copy, Debug, Deserialize, schemars::JsonSchema, Serialize)] +struct ProducerPage { + id: Uuid, +} + +// List all producers +#[endpoint { + method = GET, + path = "/producers", +}] +async fn producers_list( + request_context: RequestContext>, + query: Query>, +) -> Result>, HttpError> { + let agent = request_context.context(); + let pagination = query.into_inner(); + let limit = request_context.page_limit(&pagination)?.get() as usize; + let start = match &pagination.page { + WhichPage::First(..) => None, + WhichPage::Next(ProducerPage { id }) => Some(*id), + }; + let producers = agent.list_producers(start, limit).await; + ResultsPage::new( + producers, + &EmptyScanParams {}, + |info: &ProducerEndpoint, _| ProducerPage { id: info.id }, + ) + .map(HttpResponseOk) +} + +#[derive(Clone, Copy, Debug, Deserialize, schemars::JsonSchema, Serialize)] +struct ProducerIdPathParams { + producer_id: Uuid, +} + +// Delete a producer by ID. +#[endpoint { + method = DELETE, + path = "/producers/{producer_id}", +}] +async fn producer_delete( + request_context: RequestContext>, + path: dropshot::Path, +) -> Result { + let agent = request_context.context(); + let producer_id = path.into_inner().producer_id; + agent + .delete_producer(producer_id) + .await + .map_err(HttpError::from) + .map(|_| HttpResponseDeleted()) +} + +#[derive(Clone, Copy, Debug, Deserialize, schemars::JsonSchema, Serialize)] +pub struct CollectorInfo { + /// The collector's UUID. + pub id: Uuid, +} + +// Return identifying information about this collector +#[endpoint { + method = GET, + path = "/info", +}] +async fn collector_info( + request_context: RequestContext>, +) -> Result, HttpError> { + let agent = request_context.context(); + let info = CollectorInfo { id: agent.id }; + Ok(HttpResponseOk(info)) } diff --git a/oximeter/collector/src/standalone.rs b/oximeter/collector/src/standalone.rs new file mode 100644 index 0000000000..826a5f4663 --- /dev/null +++ b/oximeter/collector/src/standalone.rs @@ -0,0 +1,263 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Implementation of a standalone fake Nexus, simply for registering producers +//! and collectors with one another. + +// Copyright 2023 Oxide Computer Company + +use crate::Error; +use dropshot::endpoint; +use dropshot::ApiDescription; +use dropshot::ConfigDropshot; +use dropshot::HttpError; +use dropshot::HttpResponseUpdatedNoContent; +use dropshot::HttpServer; +use dropshot::HttpServerStarter; +use dropshot::RequestContext; +use dropshot::TypedBody; +use nexus_types::internal_api::params::OximeterInfo; +use omicron_common::api::internal::nexus::ProducerEndpoint; +use omicron_common::FileKv; +use oximeter_client::Client; +use rand::seq::IteratorRandom; +use slog::debug; +use slog::error; +use slog::info; +use slog::o; +use slog::Drain; +use slog::Level; +use slog::Logger; +use std::collections::HashMap; +use std::net::SocketAddr; +use std::sync::Arc; +use tokio::sync::Mutex; +use uuid::Uuid; + +// An assignment of a producer to an oximeter collector. +#[derive(Debug)] +struct ProducerAssignment { + producer: ProducerEndpoint, + collector_id: Uuid, +} + +#[derive(Debug)] +struct Inner { + // Map of producers by ID to their information and assigned oximeter + // collector. + producers: HashMap, + // Map of available oximeter collectors. + collectors: HashMap, +} + +impl Inner { + fn random_collector(&self) -> Option<(Uuid, OximeterInfo)> { + self.collectors + .iter() + .choose(&mut rand::thread_rng()) + .map(|(id, info)| (*id, *info)) + } +} + +// A stripped-down Nexus server, with only the APIs for registering metric +// producers and collectors. +#[derive(Debug)] +pub struct StandaloneNexus { + pub log: Logger, + inner: Mutex, +} + +impl StandaloneNexus { + fn new(log: Logger) -> Self { + Self { + log, + inner: Mutex::new(Inner { + producers: HashMap::new(), + collectors: HashMap::new(), + }), + } + } + + async fn register_producer( + &self, + info: &ProducerEndpoint, + ) -> Result<(), HttpError> { + let mut inner = self.inner.lock().await; + let assignment = match inner.producers.get_mut(&info.id) { + None => { + // There is no record for this producer. + // + // Select a random collector, and assign it to the producer. + // We'll return the assignment from this match block. + let Some((collector_id, collector_info)) = + inner.random_collector() + else { + return Err(HttpError::for_unavail( + None, + String::from("No collectors available"), + )); + }; + let client = Client::new( + format!("http://{}", collector_info.address).as_str(), + self.log.clone(), + ); + client.producers_post(&info.into()).await.map_err(|e| { + HttpError::for_internal_error(e.to_string()) + })?; + let assignment = + ProducerAssignment { producer: info.clone(), collector_id }; + assignment + } + Some(existing_assignment) => { + // We have a record, first check if it matches the assignment we + // have. + if &existing_assignment.producer == info { + return Ok(()); + } + + // This appears to be a re-registration, e.g., the producer + // changed its IP address. Re-register it with the collector to + // which it's already assigned. + let collector_id = existing_assignment.collector_id; + let collector_info = + inner.collectors.get(&collector_id).unwrap(); + let client = Client::new( + format!("http://{}", collector_info.address).as_str(), + self.log.clone(), + ); + client.producers_post(&info.into()).await.map_err(|e| { + HttpError::for_internal_error(e.to_string()) + })?; + ProducerAssignment { producer: info.clone(), collector_id } + } + }; + inner.producers.insert(info.id, assignment); + Ok(()) + } + + async fn register_collector( + &self, + info: OximeterInfo, + ) -> Result<(), HttpError> { + // If this is being registered again, send all its assignments again. + let mut inner = self.inner.lock().await; + if inner.collectors.insert(info.collector_id, info).is_some() { + let client = Client::new( + format!("http://{}", info.address).as_str(), + self.log.clone(), + ); + for producer_info in + inner.producers.values().filter_map(|assignment| { + if assignment.collector_id == info.collector_id { + Some(&assignment.producer) + } else { + None + } + }) + { + client.producers_post(&producer_info.into()).await.map_err( + |e| HttpError::for_internal_error(e.to_string()), + )?; + } + } + Ok(()) + } +} + +// Build the HTTP API of the fake Nexus for registration. +pub fn standalone_nexus_api() -> ApiDescription> { + let mut api = ApiDescription::new(); + api.register(cpapi_producers_post) + .expect("Could not register cpapi_producers_post API handler"); + api.register(cpapi_collectors_post) + .expect("Could not register cpapi_collectors_post API handler"); + api +} + +/// Accept a registration from a new metric producer +#[endpoint { + method = POST, + path = "/metrics/producers", + }] +async fn cpapi_producers_post( + request_context: RequestContext>, + producer_info: TypedBody, +) -> Result { + let context = request_context.context(); + let producer_info = producer_info.into_inner(); + context + .register_producer(&producer_info) + .await + .map(|_| HttpResponseUpdatedNoContent()) + .map_err(|e| HttpError::for_internal_error(e.to_string())) +} + +/// Accept a notification of a new oximeter collection server. +#[endpoint { + method = POST, + path = "/metrics/collectors", + }] +async fn cpapi_collectors_post( + request_context: RequestContext>, + oximeter_info: TypedBody, +) -> Result { + let context = request_context.context(); + let oximeter_info = oximeter_info.into_inner(); + context + .register_collector(oximeter_info) + .await + .map(|_| HttpResponseUpdatedNoContent()) + .map_err(|e| HttpError::for_internal_error(e.to_string())) +} + +/// A standalone Nexus server, with APIs only for registering metric collectors +/// and producers. +pub struct Server { + server: HttpServer>, +} + +impl Server { + /// Create a new server listening on the provided address. + pub fn new(address: SocketAddr, log_level: Level) -> Result { + let decorator = slog_term::TermDecorator::new().build(); + let drain = slog_term::FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).build().fuse(); + let drain = slog::LevelFilter::new(drain, log_level).fuse(); + let (drain, registration) = slog_dtrace::with_drain(drain); + let log = slog::Logger::root(drain.fuse(), o!(FileKv)); + if let slog_dtrace::ProbeRegistration::Failed(e) = registration { + let msg = format!("failed to register DTrace probes: {}", e); + error!(log, "{}", msg); + return Err(Error::Server(msg)); + } else { + debug!(log, "registered DTrace probes"); + } + + let nexus = Arc::new(StandaloneNexus::new( + log.new(slog::o!("component" => "nexus-standalone")), + )); + let server = HttpServerStarter::new( + &ConfigDropshot { bind_address: address, ..Default::default() }, + standalone_nexus_api(), + Arc::clone(&nexus), + &log, + ) + .map_err(|e| Error::Server(e.to_string()))? + .start(); + info!( + log, + "created standalone nexus server for metric collections"; + "address" => %address, + ); + Ok(Self { server }) + } + + pub fn log(&self) -> &Logger { + &self.server.app_private().log + } + + pub fn local_addr(&self) -> SocketAddr { + self.server.local_addr() + } +} diff --git a/oximeter/collector/tests/output/cmd-oximeter-noargs-stderr b/oximeter/collector/tests/output/cmd-oximeter-noargs-stderr index 7b736fe8a1..3f0fd4726d 100644 --- a/oximeter/collector/tests/output/cmd-oximeter-noargs-stderr +++ b/oximeter/collector/tests/output/cmd-oximeter-noargs-stderr @@ -3,9 +3,11 @@ See README.adoc for more information Usage: oximeter Commands: - openapi Print the external OpenAPI Spec document and exit - run Start an Oximeter server - help Print this message or the help of the given subcommand(s) + openapi Print the external OpenAPI Spec document and exit + run Start an Oximeter server + standalone Run `oximeter` in standalone mode for development + standalone-openapi Print the fake Nexus's standalone API + help Print this message or the help of the given subcommand(s) Options: -h, --help Print help diff --git a/oximeter/producer/Cargo.toml b/oximeter/producer/Cargo.toml index 3f74ba753f..ef2f16c8ad 100644 --- a/oximeter/producer/Cargo.toml +++ b/oximeter/producer/Cargo.toml @@ -20,3 +20,7 @@ tokio.workspace = true thiserror.workspace = true uuid.workspace = true omicron-workspace-hack.workspace = true + +[dev-dependencies] +anyhow.workspace = true +clap.workspace = true diff --git a/oximeter/producer/examples/producer.rs b/oximeter/producer/examples/producer.rs index 9ff30032ca..dd9722c80a 100644 --- a/oximeter/producer/examples/producer.rs +++ b/oximeter/producer/examples/producer.rs @@ -6,14 +6,17 @@ // Copyright 2023 Oxide Computer Company +use anyhow::Context; use chrono::DateTime; use chrono::Utc; +use clap::Parser; use dropshot::ConfigDropshot; use dropshot::ConfigLogging; use dropshot::ConfigLoggingLevel; use dropshot::HandlerTaskMode; use omicron_common::api::internal::nexus::ProducerEndpoint; use oximeter::types::Cumulative; +use oximeter::types::ProducerRegistry; use oximeter::types::Sample; use oximeter::Metric; use oximeter::MetricsError; @@ -22,9 +25,22 @@ use oximeter::Target; use oximeter_producer::Config; use oximeter_producer::LogConfig; use oximeter_producer::Server; +use std::net::SocketAddr; use std::time::Duration; use uuid::Uuid; +/// Run an example oximeter metric producer. +#[derive(Parser)] +struct Args { + /// The address to use for the producer server. + #[arg(long, default_value = "[::1]:0")] + address: SocketAddr, + + /// The address of nexus at which to register. + #[arg(long, default_value = "[::1]:12221")] + nexus: SocketAddr, +} + /// Example target describing a virtual machine. #[derive(Debug, Clone, Target)] pub struct VirtualMachine { @@ -93,30 +109,29 @@ impl Producer for CpuBusyProducer { } #[tokio::main] -async fn main() { - let address = "[::1]:0".parse().unwrap(); +async fn main() -> anyhow::Result<()> { + let args = Args::parse(); let dropshot = ConfigDropshot { - bind_address: address, + bind_address: args.address, request_body_max_bytes: 2048, default_handler_task_mode: HandlerTaskMode::Detached, }; let log = LogConfig::Config(ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Debug, }); + let registry = ProducerRegistry::new(); + let producer = CpuBusyProducer::new(4); + registry.register_producer(producer).unwrap(); let server_info = ProducerEndpoint { - id: Uuid::new_v4(), - address, + id: registry.producer_id(), + address: args.address, base_route: "/collect".to_string(), interval: Duration::from_secs(10), }; - let config = Config { - server_info, - registration_address: "[::1]:12221".parse().unwrap(), - dropshot, - log, - }; - let server = Server::start(&config).await.unwrap(); - let producer = CpuBusyProducer::new(4); - server.registry().register_producer(producer).unwrap(); - server.serve_forever().await.unwrap(); + let config = + Config { server_info, registration_address: args.nexus, dropshot, log }; + let server = Server::with_registry(registry, &config) + .await + .context("failed to create producer")?; + server.serve_forever().await.context("server failed") } diff --git a/oximeter/producer/src/lib.rs b/oximeter/producer/src/lib.rs index 01910af8e8..2354f9c217 100644 --- a/oximeter/producer/src/lib.rs +++ b/oximeter/producer/src/lib.rs @@ -40,6 +40,9 @@ pub enum Error { #[error("Error registering as metric producer: {0}")] RegistrationError(String), + + #[error("Producer registry and config UUIDs do not match")] + UuidMismatch, } /// Either configuration for building a logger, or an actual logger already @@ -82,14 +85,59 @@ impl Server { /// Start a new metric server, registering it with the chosen endpoint, and listening for /// requests on the associated address and route. pub async fn start(config: &Config) -> Result { - // Clone mutably, as we may update the address after the server starts, see below. - let mut config = config.clone(); + Self::with_registry( + ProducerRegistry::with_id(config.server_info.id), + &config, + ) + .await + } + + /// Create a new metric producer server, with an existing registry. + pub async fn with_registry( + registry: ProducerRegistry, + config: &Config, + ) -> Result { + Self::new_impl( + registry, + config.server_info.clone(), + &config.registration_address, + &config.dropshot, + &config.log, + ) + .await + } + + /// Serve requests for metrics. + pub async fn serve_forever(self) -> Result<(), Error> { + self.server.await.map_err(Error::Server) + } + + /// Close the server + pub async fn close(self) -> Result<(), Error> { + self.server.close().await.map_err(Error::Server) + } + + /// Return the [`ProducerRegistry`] managed by this server. + /// + /// The registry is thread-safe and clonable, so the returned reference can be used throughout + /// an application to register types implementing the [`Producer`](oximeter::traits::Producer) + /// trait. The samples generated by the registered producers will be included in response to a + /// request on the collection endpoint. + pub fn registry(&self) -> &ProducerRegistry { + &self.registry + } + + /// Return the server's local listening address + pub fn address(&self) -> std::net::SocketAddr { + self.server.local_addr() + } + fn build_logger(log: &LogConfig) -> Result { // Build a logger, either using the configuration or actual logger // provided. First build the base logger from the configuration or a // clone of the provided logger, and then add the DTrace and Dropshot // loggers on top of it. - let base_logger = match config.log { + let base_logger = match log { LogConfig::Config(conf) => conf .to_logger("metric-server") .map_err(|msg| Error::Server(msg.to_string()))?, @@ -104,74 +152,64 @@ impl Server { } else { debug!(log, "registered DTrace probes"); } - let dropshot_log = log.new(o!("component" => "dropshot")); + Ok(log) + } - // Build the producer registry and server that uses it as its context. - let registry = ProducerRegistry::with_id(config.server_info.id); - let server = HttpServerStarter::new( - &config.dropshot, + fn build_dropshot_server( + log: &Logger, + registry: &ProducerRegistry, + dropshot: &ConfigDropshot, + ) -> Result, Error> { + let dropshot_log = log.new(o!("component" => "dropshot")); + HttpServerStarter::new( + dropshot, metric_server_api(), registry.clone(), &dropshot_log, ) - .map_err(|e| Error::Server(e.to_string()))? - .start(); - - // Client code may decide to assign a specific address and/or port, or to listen on any - // available address and port, assigned by the OS. For example, `[::1]:0` would assign any - // port on localhost. If needed, update the address in the `ProducerEndpoint` with the - // actual address the server has bound. - // - // TODO-robustness: Is there a better way to do this? We'd like to support users picking an - // exact address or using whatever's available. The latter is useful during tests or other - // situations in which we don't know which ports are available. - if config.server_info.address != server.local_addr() { - assert_eq!(config.server_info.address.port(), 0); + .map_err(|e| Error::Server(e.to_string())) + .map(HttpServerStarter::start) + } + + // Create a new server registering with Nexus. + async fn new_impl( + registry: ProducerRegistry, + mut server_info: ProducerEndpoint, + registration_address: &SocketAddr, + dropshot: &ConfigDropshot, + log: &LogConfig, + ) -> Result { + if registry.producer_id() != server_info.id { + return Err(Error::UuidMismatch); + } + let log = Self::build_logger(log)?; + let server = Self::build_dropshot_server(&log, ®istry, dropshot)?; + + // Update the producer endpoint address with the actual server's + // address, to handle cases where client listens on any available + // address. + if server_info.address != server.local_addr() { + assert_eq!(server_info.address.port(), 0); debug!( log, "Requested any available port, Dropshot server has been bound to {}", server.local_addr(), ); - config.server_info.address = server.local_addr(); + server_info.address = server.local_addr(); } debug!(log, "registering metric server as a producer"); - register(config.registration_address, &log, &config.server_info) - .await?; + register(*registration_address, &log, &server_info).await?; info!( log, - "starting oximeter metric server"; - "route" => config.server_info.collection_route(), + "starting oximeter metric producer server"; + "route" => server_info.collection_route(), "producer_id" => ?registry.producer_id(), - "address" => config.server_info.address, + "address" => server.local_addr(), + "interval" => ?server_info.interval, ); Ok(Self { registry, server }) } - - /// Serve requests for metrics. - pub async fn serve_forever(self) -> Result<(), Error> { - self.server.await.map_err(Error::Server) - } - - /// Close the server - pub async fn close(self) -> Result<(), Error> { - self.server.close().await.map_err(Error::Server) - } - - /// Return the [`ProducerRegistry`] managed by this server. - /// - /// The registry is thread-safe and clonable, so the returned reference can be used throughout - /// an application to register types implementing the [`Producer`](oximeter::traits::Producer) - /// trait. The samples generated by the registered producers will be included in response to a - /// request on the collection endpoint. - pub fn registry(&self) -> &ProducerRegistry { - &self.registry - } - - /// Return the server's local listening address - pub fn address(&self) -> std::net::SocketAddr { - self.server.local_addr() - } } // Register API endpoints of the `Server`. From bb4e0cc64814d8ed6b43bfc20301abd0adad5b5c Mon Sep 17 00:00:00 2001 From: Alan Hanson Date: Wed, 4 Oct 2023 18:00:49 -0700 Subject: [PATCH 19/35] Update Propolis and Crucible to latest (#4195) Crucible updates all Crucible connections should set TCP_NODELAY (#983) Use a fixed size for tag and nonce (#957) Log crucible opts on start, order crutest options (#974) Lock the Downstairs less (#966) Cache dirty flag locally, reducing SQLite operations (#970) Make stats mutex synchronous (#961) Optimize requeue during flow control conditions (#962) Update Rust crate base64 to 0.21.4 (#950) Do less in control (#949) Fix --flush-per-blocks (#959) Fast dependency checking (#916) Update actions/checkout action to v4 (#960) Use `cargo hakari` for better workspace deps (#956) Update actions/checkout digest to 8ade135 (#939) Cache block size in Guest (#947) Update Rust crate ringbuffer to 0.15.0 (#954) Update Rust crate toml to 0.8 (#955) Update Rust crate reedline to 0.24.0 (#953) Update Rust crate libc to 0.2.148 (#952) Update Rust crate indicatif to 0.17.7 (#951) Remove unused async (#943) Use a synchronous mutex for bw/iop_tokens (#946) Make flush ID non-locking (#945) Use `oneshot` channels instead of `mpsc` for notification (#918) Use a strong type for upstairs negotiation (#941) Add a "dynamometer" option to crucible-downstairs (#931) Get new work and active count in one lock (#938) A bunch of misc test cleanup stuff (#937) Wait for a snapshot to finish on all downstairs (#920) dsc and clippy cleanup. (#935) No need to sort ackable_work (#934) Use a strong type for repair ID (#928) Keep new jobs sorted (#929) Remove state_count function on Downstairs (#927) Small cleanup to IOStateCount (#932) let cmon and IOStateCount use ClientId (#930) Fast return for zero length IOs (#926) Use a strong type for client ID (#925) A few Crucible Agent fixes (#922) Use a newtype for `JobId` (#919) Don't pass MutexGuard into functions (#917) Crutest updates, rename tests, new options (#911) Propolis updates Update tungstenite crates to 0.20 Use `strum` crate for enum-related utilities Wire up bits for CPUID customization PHD: improve artifact store (#529) Revert abort-on-panic in 'dev' cargo profile --------- Co-authored-by: Alan Hanson --- Cargo.lock | 220 ++++++++++++++++++++----------- Cargo.toml | 14 +- package-manifest.toml | 12 +- sled-agent/src/sim/sled_agent.rs | 10 +- workspace-hack/Cargo.toml | 32 +++-- 5 files changed, 182 insertions(+), 106 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b931918b9c..27a165c307 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -67,6 +67,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ahash" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +dependencies = [ + "getrandom 0.2.10", + "once_cell", + "version_check", +] + [[package]] name = "ahash" version = "0.8.3" @@ -474,20 +485,20 @@ dependencies = [ [[package]] name = "bhyve_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "bhyve_api_sys", "libc", - "num_enum 0.5.11", + "strum", ] [[package]] name = "bhyve_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "libc", - "num_enum 0.5.11", + "strum", ] [[package]] @@ -1211,6 +1222,18 @@ dependencies = [ "libc", ] +[[package]] +name = "cpuid_profile_config" +version = "0.0.0" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" +dependencies = [ + "propolis", + "serde", + "serde_derive", + "thiserror", + "toml 0.7.8", +] + [[package]] name = "crc" version = "3.0.1" @@ -1413,7 +1436,7 @@ dependencies = [ [[package]] name = "crucible" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=aeb69dda26c7e1a8b6eada425670cd4b83f91c07#aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source = "git+https://github.com/oxidecomputer/crucible?rev=20273bcca1fd5834ebc3e67dfa7020f0e99ad681#20273bcca1fd5834ebc3e67dfa7020f0e99ad681" dependencies = [ "aes-gcm-siv", "anyhow", @@ -1447,17 +1470,18 @@ dependencies = [ "tokio", "tokio-rustls", "tokio-util", - "toml 0.7.8", + "toml 0.8.0", "tracing", "usdt", "uuid", "version_check", + "workspace-hack", ] [[package]] name = "crucible-agent-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=aeb69dda26c7e1a8b6eada425670cd4b83f91c07#aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source = "git+https://github.com/oxidecomputer/crucible?rev=20273bcca1fd5834ebc3e67dfa7020f0e99ad681#20273bcca1fd5834ebc3e67dfa7020f0e99ad681" dependencies = [ "anyhow", "chrono", @@ -1467,24 +1491,26 @@ dependencies = [ "schemars", "serde", "serde_json", + "workspace-hack", ] [[package]] name = "crucible-client-types" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/crucible?rev=aeb69dda26c7e1a8b6eada425670cd4b83f91c07#aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source = "git+https://github.com/oxidecomputer/crucible?rev=20273bcca1fd5834ebc3e67dfa7020f0e99ad681#20273bcca1fd5834ebc3e67dfa7020f0e99ad681" dependencies = [ "base64 0.21.4", "schemars", "serde", "serde_json", "uuid", + "workspace-hack", ] [[package]] name = "crucible-common" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=aeb69dda26c7e1a8b6eada425670cd4b83f91c07#aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source = "git+https://github.com/oxidecomputer/crucible?rev=20273bcca1fd5834ebc3e67dfa7020f0e99ad681#20273bcca1fd5834ebc3e67dfa7020f0e99ad681" dependencies = [ "anyhow", "atty", @@ -1502,16 +1528,17 @@ dependencies = [ "tempfile", "thiserror", "tokio-rustls", - "toml 0.7.8", + "toml 0.8.0", "twox-hash", "uuid", "vergen", + "workspace-hack", ] [[package]] name = "crucible-pantry-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=aeb69dda26c7e1a8b6eada425670cd4b83f91c07#aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source = "git+https://github.com/oxidecomputer/crucible?rev=20273bcca1fd5834ebc3e67dfa7020f0e99ad681#20273bcca1fd5834ebc3e67dfa7020f0e99ad681" dependencies = [ "anyhow", "chrono", @@ -1522,32 +1549,36 @@ dependencies = [ "serde", "serde_json", "uuid", + "workspace-hack", ] [[package]] name = "crucible-protocol" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/crucible?rev=aeb69dda26c7e1a8b6eada425670cd4b83f91c07#aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source = "git+https://github.com/oxidecomputer/crucible?rev=20273bcca1fd5834ebc3e67dfa7020f0e99ad681#20273bcca1fd5834ebc3e67dfa7020f0e99ad681" dependencies = [ "anyhow", "bincode", "bytes", "crucible-common", "num_enum 0.7.0", + "schemars", "serde", "tokio-util", "uuid", + "workspace-hack", ] [[package]] name = "crucible-smf" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/crucible?rev=aeb69dda26c7e1a8b6eada425670cd4b83f91c07#aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source = "git+https://github.com/oxidecomputer/crucible?rev=20273bcca1fd5834ebc3e67dfa7020f0e99ad681#20273bcca1fd5834ebc3e67dfa7020f0e99ad681" dependencies = [ "libc", "num-derive", "num-traits", "thiserror", + "workspace-hack", ] [[package]] @@ -1985,10 +2016,10 @@ checksum = "7e1a8646b2c125eeb9a84ef0faa6d2d102ea0d5da60b824ade2743263117b848" [[package]] name = "dladm" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "libc", - "num_enum 0.5.11", + "strum", ] [[package]] @@ -2322,26 +2353,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "enum-iterator" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7add3873b5dd076766ee79c8e406ad1a472c385476b9e38849f8eec24f1be689" -dependencies = [ - "enum-iterator-derive", -] - -[[package]] -name = "enum-iterator-derive" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.32", -] - [[package]] name = "env_logger" version = "0.9.3" @@ -2965,6 +2976,9 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.6", +] [[package]] name = "hashbrown" @@ -2972,7 +2986,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash", + "ahash 0.8.3", ] [[package]] @@ -2981,7 +2995,7 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" dependencies = [ - "ahash", + "ahash 0.8.3", "allocator-api2", ] @@ -5372,6 +5386,7 @@ dependencies = [ "futures", "futures-channel", "futures-core", + "futures-executor", "futures-io", "futures-sink", "futures-task", @@ -5379,11 +5394,13 @@ dependencies = [ "gateway-messages", "generic-array", "getrandom 0.2.10", + "hashbrown 0.12.3", "hashbrown 0.13.2", "hashbrown 0.14.0", "hex", "hyper", "hyper-rustls", + "indexmap 1.9.3", "indexmap 2.0.0", "inout", "ipnetwork", @@ -5401,7 +5418,9 @@ dependencies = [ "num-traits", "once_cell", "openapiv3", + "parking_lot 0.12.1", "petgraph", + "phf_shared 0.11.2", "postgres-types", "ppv-lite86", "predicates 3.0.3", @@ -5435,6 +5454,7 @@ dependencies = [ "toml_datetime", "toml_edit 0.19.15", "tracing", + "tracing-core", "trust-dns-proto", "unicode-bidi", "unicode-normalization", @@ -6568,7 +6588,7 @@ dependencies = [ [[package]] name = "propolis" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "anyhow", "bhyve_api", @@ -6583,7 +6603,6 @@ dependencies = [ "lazy_static", "libc", "nexus-client 0.1.0 (git+https://github.com/oxidecomputer/omicron?branch=main)", - "num_enum 0.5.11", "oximeter 0.1.0 (git+https://github.com/oxidecomputer/omicron?branch=main)", "propolis_types", "rfb", @@ -6591,6 +6610,7 @@ dependencies = [ "serde_arrays", "serde_json", "slog", + "strum", "thiserror", "tokio", "usdt", @@ -6601,7 +6621,7 @@ dependencies = [ [[package]] name = "propolis-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "async-trait", "base64 0.21.4", @@ -6618,14 +6638,14 @@ dependencies = [ "slog", "thiserror", "tokio", - "tokio-tungstenite 0.17.2", + "tokio-tungstenite 0.20.1", "uuid", ] [[package]] name = "propolis-server" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "anyhow", "async-trait", @@ -6640,7 +6660,6 @@ dependencies = [ "const_format", "crucible-client-types", "dropshot", - "enum-iterator", "erased-serde", "futures", "http", @@ -6648,7 +6667,6 @@ dependencies = [ "internal-dns 0.1.0 (git+https://github.com/oxidecomputer/omicron?branch=main)", "lazy_static", "nexus-client 0.1.0 (git+https://github.com/oxidecomputer/omicron?branch=main)", - "num_enum 0.5.11", "omicron-common 0.1.0 (git+https://github.com/oxidecomputer/omicron?branch=main)", "oximeter 0.1.0 (git+https://github.com/oxidecomputer/omicron?branch=main)", "oximeter-producer 0.1.0 (git+https://github.com/oxidecomputer/omicron?branch=main)", @@ -6666,9 +6684,10 @@ dependencies = [ "slog-bunyan", "slog-dtrace", "slog-term", + "strum", "thiserror", "tokio", - "tokio-tungstenite 0.17.2", + "tokio-tungstenite 0.20.1", "tokio-util", "toml 0.7.8", "usdt", @@ -6678,8 +6697,9 @@ dependencies = [ [[package]] name = "propolis-server-config" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ + "cpuid_profile_config", "serde", "serde_derive", "thiserror", @@ -6689,7 +6709,7 @@ dependencies = [ [[package]] name = "propolis_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "schemars", "serde", @@ -7156,9 +7176,9 @@ dependencies = [ [[package]] name = "ringbuffer" -version = "0.14.2" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eba9638e96ac5a324654f8d47fb71c5e21abef0f072740ed9c1d4b0801faa37" +checksum = "3df6368f71f205ff9c33c076d170dd56ebf68e8161c733c0caa07a7a5509ed53" [[package]] name = "ron" @@ -7920,17 +7940,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "sha-1" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest", -] - [[package]] name = "sha1" version = "0.10.5" @@ -9031,26 +9040,26 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.17.2" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" +checksum = "54319c93411147bced34cb5609a80e0a8e44c5999c93903a81cd866630ec0bfd" dependencies = [ "futures-util", "log", "tokio", - "tungstenite 0.17.3", + "tungstenite 0.18.0", ] [[package]] name = "tokio-tungstenite" -version = "0.18.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54319c93411147bced34cb5609a80e0a8e44c5999c93903a81cd866630ec0bfd" +checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" dependencies = [ "futures-util", "log", "tokio", - "tungstenite 0.18.0", + "tungstenite 0.20.1", ] [[package]] @@ -9218,6 +9227,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", + "valuable", ] [[package]] @@ -9399,9 +9409,9 @@ dependencies = [ [[package]] name = "tungstenite" -version = "0.17.3" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" +checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" dependencies = [ "base64 0.13.1", "byteorder", @@ -9410,7 +9420,7 @@ dependencies = [ "httparse", "log", "rand 0.8.5", - "sha-1", + "sha1", "thiserror", "url", "utf-8", @@ -9418,13 +9428,13 @@ dependencies = [ [[package]] name = "tungstenite" -version = "0.18.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" +checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" dependencies = [ - "base64 0.13.1", "byteorder", "bytes", + "data-encoding", "http", "httparse", "log", @@ -9706,6 +9716,12 @@ dependencies = [ "serde", ] +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + [[package]] name = "vcpkg" version = "0.2.15" @@ -9740,17 +9756,16 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "viona_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "libc", - "num_enum 0.5.11", "viona_api_sys", ] [[package]] name = "viona_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "libc", ] @@ -10329,6 +10344,61 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "workspace-hack" +version = "0.1.0" +source = "git+https://github.com/oxidecomputer/crucible?rev=20273bcca1fd5834ebc3e67dfa7020f0e99ad681#20273bcca1fd5834ebc3e67dfa7020f0e99ad681" +dependencies = [ + "bitflags 2.4.0", + "bytes", + "cc", + "chrono", + "console", + "crossbeam-utils", + "crypto-common", + "digest", + "either", + "futures-channel", + "futures-core", + "futures-executor", + "futures-sink", + "futures-util", + "getrandom 0.2.10", + "hashbrown 0.12.3", + "hex", + "hyper", + "indexmap 1.9.3", + "libc", + "log", + "mio", + "num-traits", + "once_cell", + "openapiv3", + "parking_lot 0.12.1", + "phf_shared 0.11.2", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_core 0.6.4", + "reqwest", + "rustls", + "schemars", + "semver 1.0.18", + "serde", + "slog", + "syn 1.0.109", + "syn 2.0.32", + "time", + "time-macros", + "tokio", + "tokio-util", + "toml_datetime", + "toml_edit 0.19.15", + "tracing", + "tracing-core", + "usdt", + "uuid", +] + [[package]] name = "wyz" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index fb610128ed..2af44b5559 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -161,10 +161,10 @@ cookie = "0.16" criterion = { version = "0.5.1", features = [ "async_tokio" ] } crossbeam = "0.8" crossterm = { version = "0.27.0", features = ["event-stream"] } -crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "aeb69dda26c7e1a8b6eada425670cd4b83f91c07" } -crucible-client-types = { git = "https://github.com/oxidecomputer/crucible", rev = "aeb69dda26c7e1a8b6eada425670cd4b83f91c07" } -crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "aeb69dda26c7e1a8b6eada425670cd4b83f91c07" } -crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "aeb69dda26c7e1a8b6eada425670cd4b83f91c07" } +crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "20273bcca1fd5834ebc3e67dfa7020f0e99ad681" } +crucible-client-types = { git = "https://github.com/oxidecomputer/crucible", rev = "20273bcca1fd5834ebc3e67dfa7020f0e99ad681" } +crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "20273bcca1fd5834ebc3e67dfa7020f0e99ad681" } +crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "20273bcca1fd5834ebc3e67dfa7020f0e99ad681" } curve25519-dalek = "4" datatest-stable = "0.1.3" display-error-chain = "0.1.1" @@ -277,9 +277,9 @@ pretty-hex = "0.3.0" proc-macro2 = "1.0" progenitor = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } progenitor-client = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } -bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "de6369aa45a255f896da0a3ddd2b7152c036a4e9" } -propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "de6369aa45a255f896da0a3ddd2b7152c036a4e9", features = [ "generated-migration" ] } -propolis-server = { git = "https://github.com/oxidecomputer/propolis", rev = "de6369aa45a255f896da0a3ddd2b7152c036a4e9", default-features = false, features = ["mock-only"] } +bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "42c878b71a58d430dfc306126af5d40ca816d70f" } +propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "42c878b71a58d430dfc306126af5d40ca816d70f", features = [ "generated-migration" ] } +propolis-server = { git = "https://github.com/oxidecomputer/propolis", rev = "42c878b71a58d430dfc306126af5d40ca816d70f", default-features = false, features = ["mock-only"] } proptest = "1.2.0" quote = "1.0" rand = "0.8.5" diff --git a/package-manifest.toml b/package-manifest.toml index c776f6d96d..ff229e5def 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -381,10 +381,10 @@ only_for_targets.image = "standard" # 3. Use source.type = "manual" instead of "prebuilt" source.type = "prebuilt" source.repo = "crucible" -source.commit = "aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source.commit = "20273bcca1fd5834ebc3e67dfa7020f0e99ad681" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible.sha256.txt -source.sha256 = "3845327bde9df585ee8771c85eefc3e63a48981f14298d5fca62f4f6fe25c917" +source.sha256 = "0671570dfed8bff8e64c42a41269d961426bdd07e72b9ca8c2e3f28e7ead3c1c" output.type = "zone" [package.crucible-pantry] @@ -392,10 +392,10 @@ service_name = "crucible_pantry" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "crucible" -source.commit = "aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source.commit = "20273bcca1fd5834ebc3e67dfa7020f0e99ad681" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible-pantry.sha256.txt -source.sha256 = "a3f2fc92d9ae184a66c402dfe33b1d1c128f356d6be70671de421be600d4064a" +source.sha256 = "c35cc24945d047f8d77e438ee606e6a83be64f0f97356fdc3308be716dcf3718" output.type = "zone" # Refer to @@ -406,10 +406,10 @@ service_name = "propolis-server" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "propolis" -source.commit = "de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source.commit = "42c878b71a58d430dfc306126af5d40ca816d70f" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/propolis/image//propolis-server.sha256.txt -source.sha256 = "182597a153793096826992f499a94be54c746e346a3566802e1fe7e78b2ccf2f" +source.sha256 = "dce4d82bb936e990262abcaa279eee7e33a19930880b23f49fa3851cded18567" output.type = "zone" [package.maghemite] diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index e53295f823..42fff355a5 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -617,14 +617,8 @@ impl SledAgent { ..Default::default() }; let propolis_log = log.new(o!("component" => "propolis-server-mock")); - let config = propolis_server::config::Config { - bootrom: Default::default(), - pci_bridges: Default::default(), - chipset: Default::default(), - devices: Default::default(), - block_devs: Default::default(), - }; - let private = Arc::new(PropolisContext::new(config, propolis_log)); + let private = + Arc::new(PropolisContext::new(Default::default(), propolis_log)); info!(log, "Starting mock propolis-server..."); let dropshot_log = log.new(o!("component" => "dropshot")); let mock_api = propolis_server::mock_server::api(); diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 820b2d2336..8854ef27bc 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -39,6 +39,7 @@ flate2 = { version = "1.0.27" } futures = { version = "0.3.28" } futures-channel = { version = "0.3.28", features = ["sink"] } futures-core = { version = "0.3.28" } +futures-executor = { version = "0.3.28" } futures-io = { version = "0.3.28", default-features = false, features = ["std"] } futures-sink = { version = "0.3.28" } futures-task = { version = "0.3.28", default-features = false, features = ["std"] } @@ -48,9 +49,11 @@ generic-array = { version = "0.14.7", default-features = false, features = ["mor getrandom = { version = "0.2.10", default-features = false, features = ["js", "rdrand", "std"] } hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14.0", features = ["raw"] } hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13.2" } +hashbrown-5ef9efb8ec2df382 = { package = "hashbrown", version = "0.12.3", features = ["raw"] } hex = { version = "0.4.3", features = ["serde"] } hyper = { version = "0.14.27", features = ["full"] } -indexmap = { version = "2.0.0", features = ["serde"] } +indexmap-dff4ba8e3ae991db = { package = "indexmap", version = "1.9.3", default-features = false, features = ["serde-1", "std"] } +indexmap-f595c2ba2a3f28df = { package = "indexmap", version = "2.0.0", features = ["serde"] } inout = { version = "0.1.3", default-features = false, features = ["std"] } ipnetwork = { version = "0.20.0", features = ["schemars"] } itertools = { version = "0.10.5" } @@ -65,11 +68,13 @@ num-integer = { version = "0.1.45", features = ["i128"] } num-iter = { version = "0.1.43", default-features = false, features = ["i128"] } num-traits = { version = "0.2.16", features = ["i128", "libm"] } openapiv3 = { version = "1.0.3", default-features = false, features = ["skip_serializing_defaults"] } +parking_lot = { version = "0.12.1", features = ["send_guard"] } petgraph = { version = "0.6.4", features = ["serde-1"] } +phf_shared = { version = "0.11.2" } postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } ppv-lite86 = { version = "0.2.17", default-features = false, features = ["simd", "std"] } predicates = { version = "3.0.3" } -rand = { version = "0.8.5", features = ["min_const_gen"] } +rand = { version = "0.8.5", features = ["min_const_gen", "small_rng"] } rand_chacha = { version = "0.3.1" } regex = { version = "1.9.5" } regex-automata = { version = "0.3.8", default-features = false, features = ["dfa-onepass", "dfa-search", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } @@ -86,7 +91,7 @@ slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "rele spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } subtle = { version = "2.5.0" } -syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } +syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.32", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } textwrap = { version = "0.16.0" } time = { version = "0.3.27", features = ["formatting", "local-offset", "macros", "parsing"] } @@ -94,9 +99,8 @@ tokio = { version = "1.32.0", features = ["full", "test-util"] } tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } tokio-stream = { version = "0.1.14", features = ["net"] } toml = { version = "0.7.8" } -toml_datetime = { version = "0.6.3", default-features = false, features = ["serde"] } -toml_edit = { version = "0.19.15", features = ["serde"] } tracing = { version = "0.1.37", features = ["log"] } +tracing-core = { version = "0.1.31" } trust-dns-proto = { version = "0.22.0" } unicode-bidi = { version = "0.3.13" } unicode-normalization = { version = "0.1.22" } @@ -133,6 +137,7 @@ flate2 = { version = "1.0.27" } futures = { version = "0.3.28" } futures-channel = { version = "0.3.28", features = ["sink"] } futures-core = { version = "0.3.28" } +futures-executor = { version = "0.3.28" } futures-io = { version = "0.3.28", default-features = false, features = ["std"] } futures-sink = { version = "0.3.28" } futures-task = { version = "0.3.28", default-features = false, features = ["std"] } @@ -142,9 +147,11 @@ generic-array = { version = "0.14.7", default-features = false, features = ["mor getrandom = { version = "0.2.10", default-features = false, features = ["js", "rdrand", "std"] } hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14.0", features = ["raw"] } hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13.2" } +hashbrown-5ef9efb8ec2df382 = { package = "hashbrown", version = "0.12.3", features = ["raw"] } hex = { version = "0.4.3", features = ["serde"] } hyper = { version = "0.14.27", features = ["full"] } -indexmap = { version = "2.0.0", features = ["serde"] } +indexmap-dff4ba8e3ae991db = { package = "indexmap", version = "1.9.3", default-features = false, features = ["serde-1", "std"] } +indexmap-f595c2ba2a3f28df = { package = "indexmap", version = "2.0.0", features = ["serde"] } inout = { version = "0.1.3", default-features = false, features = ["std"] } ipnetwork = { version = "0.20.0", features = ["schemars"] } itertools = { version = "0.10.5" } @@ -159,11 +166,13 @@ num-integer = { version = "0.1.45", features = ["i128"] } num-iter = { version = "0.1.43", default-features = false, features = ["i128"] } num-traits = { version = "0.2.16", features = ["i128", "libm"] } openapiv3 = { version = "1.0.3", default-features = false, features = ["skip_serializing_defaults"] } +parking_lot = { version = "0.12.1", features = ["send_guard"] } petgraph = { version = "0.6.4", features = ["serde-1"] } +phf_shared = { version = "0.11.2" } postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } ppv-lite86 = { version = "0.2.17", default-features = false, features = ["simd", "std"] } predicates = { version = "3.0.3" } -rand = { version = "0.8.5", features = ["min_const_gen"] } +rand = { version = "0.8.5", features = ["min_const_gen", "small_rng"] } rand_chacha = { version = "0.3.1" } regex = { version = "1.9.5" } regex-automata = { version = "0.3.8", default-features = false, features = ["dfa-onepass", "dfa-search", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } @@ -180,7 +189,7 @@ slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "rele spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } subtle = { version = "2.5.0" } -syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } +syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.32", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } textwrap = { version = "0.16.0" } time = { version = "0.3.27", features = ["formatting", "local-offset", "macros", "parsing"] } @@ -189,9 +198,8 @@ tokio = { version = "1.32.0", features = ["full", "test-util"] } tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } tokio-stream = { version = "0.1.14", features = ["net"] } toml = { version = "0.7.8" } -toml_datetime = { version = "0.6.3", default-features = false, features = ["serde"] } -toml_edit = { version = "0.19.15", features = ["serde"] } tracing = { version = "0.1.37", features = ["log"] } +tracing-core = { version = "0.1.31" } trust-dns-proto = { version = "0.22.0" } unicode-bidi = { version = "0.3.13" } unicode-normalization = { version = "0.1.22" } @@ -250,6 +258,8 @@ hyper-rustls = { version = "0.24.1" } mio = { version = "0.8.8", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } rustix = { version = "0.38.9", features = ["fs", "termios"] } +toml_datetime = { version = "0.6.3", default-features = false, features = ["serde"] } +toml_edit = { version = "0.19.15", features = ["serde"] } [target.x86_64-unknown-illumos.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } @@ -257,5 +267,7 @@ hyper-rustls = { version = "0.24.1" } mio = { version = "0.8.8", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } rustix = { version = "0.38.9", features = ["fs", "termios"] } +toml_datetime = { version = "0.6.3", default-features = false, features = ["serde"] } +toml_edit = { version = "0.19.15", features = ["serde"] } ### END HAKARI SECTION From 9aabe2aee4bbfe9af1cb9424a93cf6a59f13b9a6 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 5 Oct 2023 09:46:26 -0700 Subject: [PATCH 20/35] Add transaction retry to schema upgrade integration tests (#4209) Fixes https://github.com/oxidecomputer/omicron/issues/4207 --- nexus/tests/integration_tests/schema.rs | 51 ++++++++++++++++++++----- 1 file changed, 42 insertions(+), 9 deletions(-) diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index 2c62f156e1..1d4556e8ed 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -62,6 +62,47 @@ async fn test_setup<'a>( builder } +// Attempts to apply an update as a transaction. +// +// Only returns an error if the transaction failed to commit. +async fn apply_update_as_transaction_inner( + client: &omicron_test_utils::dev::db::Client, + sql: &str, +) -> Result<(), tokio_postgres::Error> { + client.batch_execute("BEGIN;").await.expect("Failed to BEGIN transaction"); + client.batch_execute(&sql).await.expect("Failed to execute update"); + client.batch_execute("COMMIT;").await?; + Ok(()) +} + +// Applies an update as a transaction. +// +// Automatically retries transactions that can be retried client-side. +async fn apply_update_as_transaction( + log: &Logger, + client: &omicron_test_utils::dev::db::Client, + sql: &str, +) { + loop { + match apply_update_as_transaction_inner(client, sql).await { + Ok(()) => break, + Err(err) => { + client + .batch_execute("ROLLBACK;") + .await + .expect("Failed to ROLLBACK failed transaction"); + if let Some(code) = err.code() { + if code == &tokio_postgres::error::SqlState::T_R_SERIALIZATION_FAILURE { + warn!(log, "Transaction retrying"); + continue; + } + } + panic!("Failed to apply update: {err}"); + } + } + } +} + async fn apply_update( log: &Logger, crdb: &CockroachInstance, @@ -87,15 +128,7 @@ async fn apply_update( for _ in 0..times_to_apply { for sql in sqls.iter() { - client - .batch_execute("BEGIN;") - .await - .expect("Failed to BEGIN update"); - client.batch_execute(&sql).await.expect("Failed to execute update"); - client - .batch_execute("COMMIT;") - .await - .expect("Failed to COMMIT update"); + apply_update_as_transaction(log, &client, sql).await; } } From 6cf8181ba678855e3f131ad2914e90d06de02ac3 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 5 Oct 2023 10:28:21 -0700 Subject: [PATCH 21/35] top-level cleanup: move `thing-flinger` into `dev-tools` (#4213) --- Cargo.toml | 4 ++-- {deploy => dev-tools/thing-flinger}/.gitignore | 0 {deploy => dev-tools/thing-flinger}/Cargo.toml | 0 {deploy => dev-tools/thing-flinger}/README.adoc | 0 .../thing-flinger}/src/bin/deployment-example.toml | 0 {deploy => dev-tools/thing-flinger}/src/bin/thing-flinger.rs | 0 6 files changed, 2 insertions(+), 2 deletions(-) rename {deploy => dev-tools/thing-flinger}/.gitignore (100%) rename {deploy => dev-tools/thing-flinger}/Cargo.toml (100%) rename {deploy => dev-tools/thing-flinger}/README.adoc (100%) rename {deploy => dev-tools/thing-flinger}/src/bin/deployment-example.toml (100%) rename {deploy => dev-tools/thing-flinger}/src/bin/thing-flinger.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 2af44b5559..29291e8a19 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,9 +8,9 @@ members = [ "common", "crdb-seed", "ddm-admin-client", - "deploy", "dev-tools/omdb", "dev-tools/omicron-dev", + "dev-tools/thing-flinger", "dev-tools/xtask", "dns-server", "dns-service-client", @@ -75,9 +75,9 @@ default-members = [ "common", "ddm-admin-client", "dpd-client", - "deploy", "dev-tools/omdb", "dev-tools/omicron-dev", + "dev-tools/thing-flinger", "dev-tools/xtask", "dns-server", "dns-service-client", diff --git a/deploy/.gitignore b/dev-tools/thing-flinger/.gitignore similarity index 100% rename from deploy/.gitignore rename to dev-tools/thing-flinger/.gitignore diff --git a/deploy/Cargo.toml b/dev-tools/thing-flinger/Cargo.toml similarity index 100% rename from deploy/Cargo.toml rename to dev-tools/thing-flinger/Cargo.toml diff --git a/deploy/README.adoc b/dev-tools/thing-flinger/README.adoc similarity index 100% rename from deploy/README.adoc rename to dev-tools/thing-flinger/README.adoc diff --git a/deploy/src/bin/deployment-example.toml b/dev-tools/thing-flinger/src/bin/deployment-example.toml similarity index 100% rename from deploy/src/bin/deployment-example.toml rename to dev-tools/thing-flinger/src/bin/deployment-example.toml diff --git a/deploy/src/bin/thing-flinger.rs b/dev-tools/thing-flinger/src/bin/thing-flinger.rs similarity index 100% rename from deploy/src/bin/thing-flinger.rs rename to dev-tools/thing-flinger/src/bin/thing-flinger.rs From ba291b8ab2293eb3e4cdf85a1bae072d75343b5e Mon Sep 17 00:00:00 2001 From: Jordan Hendricks Date: Thu, 5 Oct 2023 14:06:04 -0700 Subject: [PATCH 22/35] Add example of using an SSH tunnel to access the console in development deployments (#4200) --- docs/how-to-run.adoc | 32 ++++++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/docs/how-to-run.adoc b/docs/how-to-run.adoc index aa1ee3c73d..04d274da8b 100644 --- a/docs/how-to-run.adoc +++ b/docs/how-to-run.adoc @@ -143,7 +143,10 @@ $ svcadm enable ipfilter Other network configurations are possible but beyond the scope of this doc. -When making this choice, note that **in order to use the system once it's set up, you will need to be able to access it from a web browser.** If you go with option 2 here, you may need to use an ssh tunnel or the like to do this. +When making this choice, note that **in order to use the system once it's set +up, you will need to be able to access it from a web browser.** If you go with +option 2 here, you may need to use an SSH tunnel (see: +<>) or the like to do this. === Picking a "machine" type @@ -433,7 +436,32 @@ Where did 192.168.1.20 come from? That's the external address of the external DNS server. We knew that because it's listed in the `external_dns_ips` entry of the `config-rss.toml` file we're using. -Having looked this up, the easiest thing will be to use `http://192.168.1.21` for your URL (replacing with `https` if you used a certificate, and replacing that IP if needed). If you've set up networking right, you should be able to reach this from your web browser. You may have to instruct the browser to accept a self-signed TLS certificate. See also <<_connecting_securely_with_tls_using_the_cli>>. +Having looked this up, the easiest thing will be to use `http://192.168.1.21` for your URL (replacing with `https` if you used a certificate, and replacing that IP if needed). If you've set up networking right, you should be able to reach this from your web browser. You may have to instruct the browser to accept a self-signed TLS certificate. See also <>. + +=== Setting up an SSH tunnel for console access + +If you set up a fake external network (method 2 in <>), one +way to be able to access the console of your deployment is by setting up an SSH +tunnel. Console access is required to use the CLI for device authentication. +The following is an example of how to access the console with an SSH tunnel. + +Nexus serves the console, so first get a nexus IP from the instructions above. + +In this example, Omicron is running on the lab machine `dunkin`. Usually, you'll +want to set up the tunnel from the machine where you run a browser, to the +machine running Omicron. In this example, one would run this on the machine +running the browser: + +``` +$ ssh -L 1234:192.168.1.22:80 dunkin.eng.oxide.computer +``` + +The above command configures `ssh` to bind to the TCP port `1234` on the machine +running the browser, forward packets through the ssh connection, and redirect +them to 192.168.1.22 port 80 *as seen from the other side of the connection*. + +Now you should be able to access the console from the browser on this machine, +via something like: `127.0.0.1:1234`, using the port from the `ssh` command. === Using the CLI From a2bb889cd21aeef7c287ee2da469a771bc684c01 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 5 Oct 2023 14:29:43 -0700 Subject: [PATCH 23/35] top-level cleanup: consolidate clients (#4214) --- Cargo.toml | 78 +++++++++---------- .../bootstrap-agent-client}/Cargo.toml | 0 .../bootstrap-agent-client}/src/lib.rs | 2 +- .../ddm-admin-client}/Cargo.toml | 0 .../ddm-admin-client}/build.rs | 14 ++-- .../ddm-admin-client}/src/lib.rs | 0 .../dns-service-client}/Cargo.toml | 0 .../dns-service-client}/src/lib.rs | 2 +- {dpd-client => clients/dpd-client}/Cargo.toml | 0 {dpd-client => clients/dpd-client}/build.rs | 18 ++--- {dpd-client => clients/dpd-client}/src/lib.rs | 0 .../gateway-client}/Cargo.toml | 0 .../gateway-client}/src/lib.rs | 2 +- .../installinator-artifact-client}/Cargo.toml | 0 .../installinator-artifact-client}/src/lib.rs | 2 +- .../nexus-client}/Cargo.toml | 0 .../nexus-client}/src/lib.rs | 2 +- .../oxide-client}/Cargo.toml | 0 .../oxide-client}/src/lib.rs | 2 +- .../oximeter-client}/Cargo.toml | 0 .../oximeter-client}/src/lib.rs | 2 +- .../sled-agent-client}/Cargo.toml | 0 .../sled-agent-client}/src/lib.rs | 2 +- .../wicketd-client}/Cargo.toml | 0 .../wicketd-client}/src/lib.rs | 2 +- 25 files changed, 64 insertions(+), 64 deletions(-) rename {bootstrap-agent-client => clients/bootstrap-agent-client}/Cargo.toml (100%) rename {bootstrap-agent-client => clients/bootstrap-agent-client}/src/lib.rs (97%) rename {ddm-admin-client => clients/ddm-admin-client}/Cargo.toml (100%) rename {ddm-admin-client => clients/ddm-admin-client}/build.rs (87%) rename {ddm-admin-client => clients/ddm-admin-client}/src/lib.rs (100%) rename {dns-service-client => clients/dns-service-client}/Cargo.toml (100%) rename {dns-service-client => clients/dns-service-client}/src/lib.rs (98%) rename {dpd-client => clients/dpd-client}/Cargo.toml (100%) rename {dpd-client => clients/dpd-client}/build.rs (87%) rename {dpd-client => clients/dpd-client}/src/lib.rs (100%) rename {gateway-client => clients/gateway-client}/Cargo.toml (100%) rename {gateway-client => clients/gateway-client}/src/lib.rs (98%) rename {installinator-artifact-client => clients/installinator-artifact-client}/Cargo.toml (100%) rename {installinator-artifact-client => clients/installinator-artifact-client}/src/lib.rs (96%) rename {nexus-client => clients/nexus-client}/Cargo.toml (100%) rename {nexus-client => clients/nexus-client}/src/lib.rs (99%) rename {oxide-client => clients/oxide-client}/Cargo.toml (100%) rename {oxide-client => clients/oxide-client}/src/lib.rs (99%) rename {oximeter-client => clients/oximeter-client}/Cargo.toml (100%) rename {oximeter-client => clients/oximeter-client}/src/lib.rs (93%) rename {sled-agent-client => clients/sled-agent-client}/Cargo.toml (100%) rename {sled-agent-client => clients/sled-agent-client}/src/lib.rs (99%) rename {wicketd-client => clients/wicketd-client}/Cargo.toml (100%) rename {wicketd-client => clients/wicketd-client}/src/lib.rs (99%) diff --git a/Cargo.toml b/Cargo.toml index 29291e8a19..1ca8a02886 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,26 +2,31 @@ members = [ "api_identity", "bootstore", - "bootstrap-agent-client", "caboose-util", "certificates", + "clients/bootstrap-agent-client", + "clients/ddm-admin-client", + "clients/dns-service-client", + "clients/dpd-client", + "clients/gateway-client", + "clients/installinator-artifact-client", + "clients/nexus-client", + "clients/oxide-client", + "clients/oximeter-client", + "clients/sled-agent-client", + "clients/wicketd-client", "common", "crdb-seed", - "ddm-admin-client", "dev-tools/omdb", "dev-tools/omicron-dev", "dev-tools/thing-flinger", "dev-tools/xtask", "dns-server", - "dns-service-client", - "dpd-client", "end-to-end-tests", "gateway-cli", - "gateway-client", "gateway-test-utils", "gateway", "illumos-utils", - "installinator-artifact-client", "installinator-artifactd", "installinator-common", "installinator", @@ -29,7 +34,6 @@ members = [ "internal-dns", "ipcc-key-value", "key-manager", - "nexus-client", "nexus", "nexus/authz-macros", "nexus/db-macros", @@ -40,8 +44,6 @@ members = [ "nexus/test-utils-macros", "nexus/test-utils", "nexus/types", - "oxide-client", - "oximeter-client", "oximeter/collector", "oximeter/db", "oximeter/instruments", @@ -51,7 +53,6 @@ members = [ "package", "passwords", "rpaths", - "sled-agent-client", "sled-agent", "sled-hardware", "sp-sim", @@ -62,70 +63,69 @@ members = [ "wicket-common", "wicket-dbg", "wicket", - "wicketd-client", "wicketd", "workspace-hack", ] default-members = [ - "bootstrap-agent-client", "bootstore", "caboose-util", "certificates", + "clients/bootstrap-agent-client", + "clients/ddm-admin-client", + "clients/dns-service-client", + "clients/dpd-client", + "clients/gateway-client", + "clients/installinator-artifact-client", + "clients/nexus-client", + "clients/oxide-client", + "clients/oximeter-client", + "clients/sled-agent-client", + "clients/wicketd-client", "common", - "ddm-admin-client", - "dpd-client", "dev-tools/omdb", "dev-tools/omicron-dev", "dev-tools/thing-flinger", "dev-tools/xtask", "dns-server", - "dns-service-client", - "gateway", "gateway-cli", - "gateway-client", "gateway-test-utils", + "gateway", "illumos-utils", - "installinator", - "installinator-artifact-client", "installinator-artifactd", "installinator-common", - "internal-dns", + "installinator", "internal-dns-cli", + "internal-dns", "ipcc-key-value", "key-manager", "nexus", - "nexus-client", "nexus/authz-macros", "nexus/db-macros", "nexus/db-model", "nexus/db-queries", "nexus/defaults", "nexus/types", - "oxide-client", - "oximeter-client", "oximeter/collector", "oximeter/db", "oximeter/instruments", - "oximeter/oximeter", "oximeter/oximeter-macro-impl", + "oximeter/oximeter", "oximeter/producer", "package", "passwords", "rpaths", "sled-agent", - "sled-agent-client", "sled-hardware", "sp-sim", "test-utils", - "tufaceous", "tufaceous-lib", + "tufaceous", "update-engine", - "wicket", "wicket-common", "wicket-dbg", + "wicket", "wicketd", - "wicketd-client", ] resolver = "2" @@ -144,7 +144,7 @@ bb8 = "0.8.1" bcs = "0.1.5" bincode = "1.3.3" bootstore = { path = "bootstore" } -bootstrap-agent-client = { path = "bootstrap-agent-client" } +bootstrap-agent-client = { path = "clients/bootstrap-agent-client" } buf-list = { version = "1.0.3", features = ["tokio1"] } byteorder = "1.4.3" bytes = "1.5.0" @@ -168,7 +168,7 @@ crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "20273 curve25519-dalek = "4" datatest-stable = "0.1.3" display-error-chain = "0.1.1" -ddm-admin-client = { path = "ddm-admin-client" } +ddm-admin-client = { path = "clients/ddm-admin-client" } db-macros = { path = "nexus/db-macros" } debug-ignore = "1.0.5" derive_more = "0.99.17" @@ -176,8 +176,8 @@ derive-where = "1.2.5" diesel = { version = "2.1.1", features = ["postgres", "r2d2", "chrono", "serde_json", "network-address", "uuid"] } diesel-dtrace = { git = "https://github.com/oxidecomputer/diesel-dtrace", branch = "main" } dns-server = { path = "dns-server" } -dns-service-client = { path = "dns-service-client" } -dpd-client = { path = "dpd-client" } +dns-service-client = { path = "clients/dns-service-client" } +dpd-client = { path = "clients/dpd-client" } dropshot = { git = "https://github.com/oxidecomputer/dropshot", branch = "main", features = [ "usdt-probes" ] } either = "1.9.0" expectorate = "1.1.0" @@ -187,7 +187,7 @@ flume = "0.11.0" foreign-types = "0.3.2" fs-err = "2.9.0" futures = "0.3.28" -gateway-client = { path = "gateway-client" } +gateway-client = { path = "clients/gateway-client" } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "1e180ae55e56bd17af35cb868ffbd18ce487351d", default-features = false, features = ["std"] } gateway-sp-comms = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "1e180ae55e56bd17af35cb868ffbd18ce487351d" } gateway-test-utils = { path = "gateway-test-utils" } @@ -209,7 +209,7 @@ indexmap = "2.0.0" indicatif = { version = "0.17.6", features = ["rayon"] } installinator = { path = "installinator" } installinator-artifactd = { path = "installinator-artifactd" } -installinator-artifact-client = { path = "installinator-artifact-client" } +installinator-artifact-client = { path = "clients/installinator-artifact-client" } installinator-common = { path = "installinator-common" } internal-dns = { path = "internal-dns" } ipcc-key-value = { path = "ipcc-key-value" } @@ -223,7 +223,7 @@ macaddr = { version = "1.0.1", features = ["serde_std"] } mime_guess = "2.0.4" mockall = "0.11" newtype_derive = "0.1.6" -nexus-client = { path = "nexus-client" } +nexus-client = { path = "clients/nexus-client" } nexus-db-model = { path = "nexus/db-model" } nexus-db-queries = { path = "nexus/db-queries" } nexus-defaults = { path = "nexus/defaults" } @@ -244,7 +244,7 @@ omicron-rpaths = { path = "rpaths" } omicron-sled-agent = { path = "sled-agent" } omicron-test-utils = { path = "test-utils" } omicron-zone-package = "0.8.3" -oxide-client = { path = "oxide-client" } +oxide-client = { path = "clients/oxide-client" } oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "98d33125413f01722947e322f82caf9d22209434", features = [ "api", "std" ] } once_cell = "1.18.0" openapi-lint = { git = "https://github.com/oxidecomputer/openapi-lint", branch = "main" } @@ -257,7 +257,7 @@ opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "98d33125413 oso = "0.26" owo-colors = "3.5.0" oximeter = { path = "oximeter/oximeter" } -oximeter-client = { path = "oximeter-client" } +oximeter-client = { path = "clients/oximeter-client" } oximeter-db = { path = "oximeter/db/" } oximeter-collector = { path = "oximeter/collector" } oximeter-instruments = { path = "oximeter/instruments" } @@ -315,7 +315,7 @@ signal-hook = "0.3" signal-hook-tokio = { version = "0.3", features = [ "futures-v0_3" ] } similar-asserts = "1.5.0" sled = "0.34" -sled-agent-client = { path = "sled-agent-client" } +sled-agent-client = { path = "clients/sled-agent-client" } sled-hardware = { path = "sled-hardware" } slog = { version = "2.7", features = [ "dynamic-keys", "max_level_trace", "release_max_level_debug" ] } slog-async = "2.8" @@ -370,7 +370,7 @@ usdt = "0.3" walkdir = "2.4" wicket = { path = "wicket" } wicket-common = { path = "wicket-common" } -wicketd-client = { path = "wicketd-client" } +wicketd-client = { path = "clients/wicketd-client" } zeroize = { version = "1.6.0", features = ["zeroize_derive", "std"] } zip = { version = "0.6.6", default-features = false, features = ["deflate","bzip2"] } zone = { version = "0.2", default-features = false, features = ["async"] } diff --git a/bootstrap-agent-client/Cargo.toml b/clients/bootstrap-agent-client/Cargo.toml similarity index 100% rename from bootstrap-agent-client/Cargo.toml rename to clients/bootstrap-agent-client/Cargo.toml diff --git a/bootstrap-agent-client/src/lib.rs b/clients/bootstrap-agent-client/src/lib.rs similarity index 97% rename from bootstrap-agent-client/src/lib.rs rename to clients/bootstrap-agent-client/src/lib.rs index 5a159e299a..3f8b20e1f5 100644 --- a/bootstrap-agent-client/src/lib.rs +++ b/clients/bootstrap-agent-client/src/lib.rs @@ -5,7 +5,7 @@ //! Interface for making API requests to a Bootstrap Agent progenitor::generate_api!( - spec = "../openapi/bootstrap-agent.json", + spec = "../../openapi/bootstrap-agent.json", inner_type = slog::Logger, pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { slog::debug!(log, "client request"; diff --git a/ddm-admin-client/Cargo.toml b/clients/ddm-admin-client/Cargo.toml similarity index 100% rename from ddm-admin-client/Cargo.toml rename to clients/ddm-admin-client/Cargo.toml diff --git a/ddm-admin-client/build.rs b/clients/ddm-admin-client/build.rs similarity index 87% rename from ddm-admin-client/build.rs rename to clients/ddm-admin-client/build.rs index ef4183fee3..e3c1345eda 100644 --- a/ddm-admin-client/build.rs +++ b/clients/ddm-admin-client/build.rs @@ -16,23 +16,23 @@ use std::path::Path; fn main() -> Result<()> { // Find the current maghemite repo commit from our package manifest. - let manifest = fs::read_to_string("../package-manifest.toml") - .context("failed to read ../package-manifest.toml")?; - println!("cargo:rerun-if-changed=../package-manifest.toml"); + let manifest = fs::read_to_string("../../package-manifest.toml") + .context("failed to read ../../package-manifest.toml")?; + println!("cargo:rerun-if-changed=../../package-manifest.toml"); let config: Config = toml::from_str(&manifest) - .context("failed to parse ../package-manifest.toml")?; + .context("failed to parse ../../package-manifest.toml")?; let maghemite = config .packages .get("maghemite") - .context("missing maghemite package in ../package-manifest.toml")?; + .context("missing maghemite package in ../../package-manifest.toml")?; let local_path = match &maghemite.source { PackageSource::Prebuilt { commit, .. } => { // Report a relatively verbose error if we haven't downloaded the requisite // openapi spec. let local_path = - format!("../out/downloads/ddm-admin-{commit}.json"); + format!("../../out/downloads/ddm-admin-{commit}.json"); if !Path::new(&local_path).exists() { bail!("{local_path} doesn't exist; rerun `tools/ci_download_maghemite_openapi` (after updating `tools/maghemite_openapi_version` if the maghemite commit in package-manifest.toml has changed)"); } @@ -42,7 +42,7 @@ fn main() -> Result<()> { PackageSource::Manual => { let local_path = - "../out/downloads/ddm-admin-manual.json".to_string(); + "../../out/downloads/ddm-admin-manual.json".to_string(); if !Path::new(&local_path).exists() { bail!("{local_path} doesn't exist, please copy manually built ddm-admin.json there!"); } diff --git a/ddm-admin-client/src/lib.rs b/clients/ddm-admin-client/src/lib.rs similarity index 100% rename from ddm-admin-client/src/lib.rs rename to clients/ddm-admin-client/src/lib.rs diff --git a/dns-service-client/Cargo.toml b/clients/dns-service-client/Cargo.toml similarity index 100% rename from dns-service-client/Cargo.toml rename to clients/dns-service-client/Cargo.toml diff --git a/dns-service-client/src/lib.rs b/clients/dns-service-client/src/lib.rs similarity index 98% rename from dns-service-client/src/lib.rs rename to clients/dns-service-client/src/lib.rs index 9b729b1c5c..931e68322f 100644 --- a/dns-service-client/src/lib.rs +++ b/clients/dns-service-client/src/lib.rs @@ -3,7 +3,7 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. progenitor::generate_api!( - spec = "../openapi/dns-server.json", + spec = "../../openapi/dns-server.json", inner_type = slog::Logger, derives = [schemars::JsonSchema, Eq, PartialEq], pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { diff --git a/dpd-client/Cargo.toml b/clients/dpd-client/Cargo.toml similarity index 100% rename from dpd-client/Cargo.toml rename to clients/dpd-client/Cargo.toml diff --git a/dpd-client/build.rs b/clients/dpd-client/build.rs similarity index 87% rename from dpd-client/build.rs rename to clients/dpd-client/build.rs index 2aaa8437e7..6a65ab9495 100644 --- a/dpd-client/build.rs +++ b/clients/dpd-client/build.rs @@ -22,23 +22,23 @@ use std::path::Path; fn main() -> Result<()> { // Find the current dendrite repo commit from our package manifest. - let manifest = fs::read_to_string("../package-manifest.toml") - .context("failed to read ../package-manifest.toml")?; - println!("cargo:rerun-if-changed=../package-manifest.toml"); + let manifest = fs::read_to_string("../../package-manifest.toml") + .context("failed to read ../../package-manifest.toml")?; + println!("cargo:rerun-if-changed=../../package-manifest.toml"); let config: Config = toml::from_str(&manifest) - .context("failed to parse ../package-manifest.toml")?; + .context("failed to parse ../../package-manifest.toml")?; let dendrite = config .packages .get("dendrite-asic") - .context("missing dendrite package in ../package-manifest.toml")?; + .context("missing dendrite package in ../../package-manifest.toml")?; let local_path = match &dendrite.source { PackageSource::Prebuilt { commit, .. } => { - // Report a relatively verbose error if we haven't downloaded the requisite - // openapi spec. - let local_path = format!("../out/downloads/dpd-{commit}.json"); + // Report a relatively verbose error if we haven't downloaded the + // requisite openapi spec. + let local_path = format!("../../out/downloads/dpd-{commit}.json"); if !Path::new(&local_path).exists() { bail!("{local_path} doesn't exist; rerun `tools/ci_download_dendrite_openapi` (after updating `tools/dendrite_openapi_version` if the dendrite commit in package-manifest.toml has changed)"); } @@ -47,7 +47,7 @@ fn main() -> Result<()> { } PackageSource::Manual => { - let local_path = "../out/downloads/dpd-manual.json".to_string(); + let local_path = "../../out/downloads/dpd-manual.json".to_string(); if !Path::new(&local_path).exists() { bail!("{local_path} doesn't exist, please copy manually built dpd.json there!"); } diff --git a/dpd-client/src/lib.rs b/clients/dpd-client/src/lib.rs similarity index 100% rename from dpd-client/src/lib.rs rename to clients/dpd-client/src/lib.rs diff --git a/gateway-client/Cargo.toml b/clients/gateway-client/Cargo.toml similarity index 100% rename from gateway-client/Cargo.toml rename to clients/gateway-client/Cargo.toml diff --git a/gateway-client/src/lib.rs b/clients/gateway-client/src/lib.rs similarity index 98% rename from gateway-client/src/lib.rs rename to clients/gateway-client/src/lib.rs index 7992eff9e4..800254b197 100644 --- a/gateway-client/src/lib.rs +++ b/clients/gateway-client/src/lib.rs @@ -34,7 +34,7 @@ // it is no longer useful to directly expose the JsonSchema types, we can go // back to reusing `omicron_common`. progenitor::generate_api!( - spec = "../openapi/gateway.json", + spec = "../../openapi/gateway.json", inner_type = slog::Logger, pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { slog::debug!(log, "client request"; diff --git a/installinator-artifact-client/Cargo.toml b/clients/installinator-artifact-client/Cargo.toml similarity index 100% rename from installinator-artifact-client/Cargo.toml rename to clients/installinator-artifact-client/Cargo.toml diff --git a/installinator-artifact-client/src/lib.rs b/clients/installinator-artifact-client/src/lib.rs similarity index 96% rename from installinator-artifact-client/src/lib.rs rename to clients/installinator-artifact-client/src/lib.rs index aa5ceb863a..de3072a34a 100644 --- a/installinator-artifact-client/src/lib.rs +++ b/clients/installinator-artifact-client/src/lib.rs @@ -5,7 +5,7 @@ //! Interface for making API requests to installinator-artifactd. progenitor::generate_api!( - spec = "../openapi/installinator-artifactd.json", + spec = "../../openapi/installinator-artifactd.json", inner_type = slog::Logger, pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { slog::debug!(log, "client request"; diff --git a/nexus-client/Cargo.toml b/clients/nexus-client/Cargo.toml similarity index 100% rename from nexus-client/Cargo.toml rename to clients/nexus-client/Cargo.toml diff --git a/nexus-client/src/lib.rs b/clients/nexus-client/src/lib.rs similarity index 99% rename from nexus-client/src/lib.rs rename to clients/nexus-client/src/lib.rs index e5cec83f39..412ca70497 100644 --- a/nexus-client/src/lib.rs +++ b/clients/nexus-client/src/lib.rs @@ -8,7 +8,7 @@ use std::collections::HashMap; progenitor::generate_api!( - spec = "../openapi/nexus-internal.json", + spec = "../../openapi/nexus-internal.json", derives = [schemars::JsonSchema, PartialEq], inner_type = slog::Logger, pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { diff --git a/oxide-client/Cargo.toml b/clients/oxide-client/Cargo.toml similarity index 100% rename from oxide-client/Cargo.toml rename to clients/oxide-client/Cargo.toml diff --git a/oxide-client/src/lib.rs b/clients/oxide-client/src/lib.rs similarity index 99% rename from oxide-client/src/lib.rs rename to clients/oxide-client/src/lib.rs index 7d34697002..07a190c38e 100644 --- a/oxide-client/src/lib.rs +++ b/clients/oxide-client/src/lib.rs @@ -16,7 +16,7 @@ use trust_dns_resolver::config::{ use trust_dns_resolver::TokioAsyncResolver; progenitor::generate_api!( - spec = "../openapi/nexus.json", + spec = "../../openapi/nexus.json", interface = Builder, tags = Separate, ); diff --git a/oximeter-client/Cargo.toml b/clients/oximeter-client/Cargo.toml similarity index 100% rename from oximeter-client/Cargo.toml rename to clients/oximeter-client/Cargo.toml diff --git a/oximeter-client/src/lib.rs b/clients/oximeter-client/src/lib.rs similarity index 93% rename from oximeter-client/src/lib.rs rename to clients/oximeter-client/src/lib.rs index 9f326fdee8..7bd17d7e76 100644 --- a/oximeter-client/src/lib.rs +++ b/clients/oximeter-client/src/lib.rs @@ -6,7 +6,7 @@ //! Interface for API requests to an Oximeter metric collection server -omicron_common::generate_logging_api!("../openapi/oximeter.json"); +omicron_common::generate_logging_api!("../../openapi/oximeter.json"); impl omicron_common::api::external::ClientError for types::Error { fn message(&self) -> String { diff --git a/sled-agent-client/Cargo.toml b/clients/sled-agent-client/Cargo.toml similarity index 100% rename from sled-agent-client/Cargo.toml rename to clients/sled-agent-client/Cargo.toml diff --git a/sled-agent-client/src/lib.rs b/clients/sled-agent-client/src/lib.rs similarity index 99% rename from sled-agent-client/src/lib.rs rename to clients/sled-agent-client/src/lib.rs index 98e7f207e3..68e60e8d95 100644 --- a/sled-agent-client/src/lib.rs +++ b/clients/sled-agent-client/src/lib.rs @@ -9,7 +9,7 @@ use omicron_common::generate_logging_api; use std::convert::TryFrom; use uuid::Uuid; -generate_logging_api!("../openapi/sled-agent.json"); +generate_logging_api!("../../openapi/sled-agent.json"); impl omicron_common::api::external::ClientError for types::Error { fn message(&self) -> String { diff --git a/wicketd-client/Cargo.toml b/clients/wicketd-client/Cargo.toml similarity index 100% rename from wicketd-client/Cargo.toml rename to clients/wicketd-client/Cargo.toml diff --git a/wicketd-client/src/lib.rs b/clients/wicketd-client/src/lib.rs similarity index 99% rename from wicketd-client/src/lib.rs rename to clients/wicketd-client/src/lib.rs index 3f113ea271..ff45232520 100644 --- a/wicketd-client/src/lib.rs +++ b/clients/wicketd-client/src/lib.rs @@ -5,7 +5,7 @@ //! Interface for making API requests to wicketd progenitor::generate_api!( - spec = "../openapi/wicketd.json", + spec = "../../openapi/wicketd.json", inner_type = slog::Logger, pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { slog::debug!(log, "client request"; From fb2b4a1d3286bb0c1bb6207fd1aa541037709295 Mon Sep 17 00:00:00 2001 From: Luqman Aden Date: Thu, 5 Oct 2023 15:16:22 -0700 Subject: [PATCH 24/35] tools/install_opte: Freeze the package to the pinned version. (#4215) --- tools/install_opte.sh | 27 +++++++++++++++++++++++++++ tools/uninstall_opte.sh | 14 ++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/tools/install_opte.sh b/tools/install_opte.sh index f670adf163..20a33b05a5 100755 --- a/tools/install_opte.sh +++ b/tools/install_opte.sh @@ -51,6 +51,26 @@ fi # Grab the version of the opte package to install OPTE_VERSION="$(cat "$OMICRON_TOP/tools/opte_version")" +OMICRON_FROZEN_PKG_COMMENT="OMICRON-PINNED-PACKAGE" + +# Once we install, we mark the package as frozen at that particular version. +# This makes sure that a `pkg update` won't automatically move us forward +# (and hence defeat the whole point of pinning). +# But this also prevents us from installig the next version so we must +# unfreeze first. +if PKG_FROZEN=$(pkg freeze | grep driver/network/opte); then + FROZEN_COMMENT=$(echo "$PKG_FROZEN" | awk '{ print $(NF) }') + + # Compare the comment to make sure this is indeed our previous doing + if [ "$FROZEN_COMMENT" != "$OMICRON_FROZEN_PKG_COMMENT" ]; then + echo "Found driver/network/opte previously frozen but not by us:" + echo $PKG_FROZEN + exit 1 + fi + + pfexec pkg unfreeze driver/network/opte +fi + # Actually install the xde kernel module and opteadm tool RC=0 pfexec pkg install -v pkg://helios-dev/driver/network/opte@"$OPTE_VERSION" || RC=$? @@ -63,6 +83,13 @@ else exit "$RC" fi +RC=0 +pfexec pkg freeze -c "$OMICRON_FROZEN_PKG_COMMENT" driver/network/opte@"$OPTE_VERSION" || RC=$? +if [[ "$RC" -ne 0 ]]; then + echo "Failed to pin opte package to $OPTE_VERSION" + exit $RC +fi + # Check the user's path RC=0 which opteadm > /dev/null || RC=$? diff --git a/tools/uninstall_opte.sh b/tools/uninstall_opte.sh index a833d029aa..c8ee0f5b28 100755 --- a/tools/uninstall_opte.sh +++ b/tools/uninstall_opte.sh @@ -165,6 +165,19 @@ function restore_xde_and_opte { fi } +function unfreeze_opte_pkg { + OMICRON_FROZEN_PKG_COMMENT="OMICRON-PINNED-PACKAGE" + + # If we've frozen a particular version, let's be good citizens + # and clear that as well. + if PKG_FROZEN=$(pkg freeze | grep driver/network/opte); then + FROZEN_COMMENT=$(echo "$PKG_FROZEN" | awk '{ print $(NF) }') + if [ "$FROZEN_COMMENT" == "$OMICRON_FROZEN_PKG_COMMENT" ]; then + pkg unfreeze driver/network/opte + fi + fi +} + function ensure_not_already_on_helios { local RC=0 pkg list "$STOCK_CONSOLIDATION"* || RC=$? @@ -179,5 +192,6 @@ uninstall_xde_and_opte for PUBLISHER in "${PUBLISHERS[@]}"; do remove_publisher "$PUBLISHER" done +unfreeze_opte_pkg ensure_not_already_on_helios to_stock_helios "$CONSOLIDATION" From c76ca69d34729d430f2779bfabb556c33eaf9bd6 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Thu, 5 Oct 2023 18:22:25 -0500 Subject: [PATCH 25/35] [trivial] add doc comments to snapshot ID and image ID on disk response type (#4217) Noticed these fields on the disk response, wondered what they are, and was surprised to see no description in the docs. https://docs-2meozyi0z-oxidecomputer.vercel.app/api/disk_view image --- common/src/api/external/mod.rs | 2 ++ openapi/nexus.json | 2 ++ 2 files changed, 4 insertions(+) diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 1d7e6884d1..91ed7e4240 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -952,7 +952,9 @@ pub struct Disk { #[serde(flatten)] pub identity: IdentityMetadata, pub project_id: Uuid, + /// ID of snapshot from which disk was created, if any pub snapshot_id: Option, + /// ID of image from which disk was created, if any pub image_id: Option, pub size: ByteCount, pub block_size: ByteCount, diff --git a/openapi/nexus.json b/openapi/nexus.json index 779b1f556c..9330b0ef47 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -9615,6 +9615,7 @@ }, "image_id": { "nullable": true, + "description": "ID of image from which disk was created, if any", "type": "string", "format": "uuid" }, @@ -9635,6 +9636,7 @@ }, "snapshot_id": { "nullable": true, + "description": "ID of snapshot from which disk was created, if any", "type": "string", "format": "uuid" }, From 230637ab5c22e4e0d6c82d5198887e2240289958 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 5 Oct 2023 19:25:47 -0700 Subject: [PATCH 26/35] `omdb` support for showing devices visible to MGS (#4162) --- Cargo.lock | 5 + clients/gateway-client/src/lib.rs | 16 +- dev-tools/omdb/Cargo.toml | 5 +- dev-tools/omdb/src/bin/omdb/main.rs | 4 + dev-tools/omdb/src/bin/omdb/mgs.rs | 488 ++++++++++++++++++ dev-tools/omdb/tests/successes.out | 105 ++++ dev-tools/omdb/tests/test_all_output.rs | 16 +- dev-tools/omdb/tests/usage_errors.out | 20 + dev-tools/omicron-dev/Cargo.toml | 2 + dev-tools/omicron-dev/src/bin/omicron-dev.rs | 38 ++ .../output/cmd-omicron-dev-noargs-stderr | 1 + 11 files changed, 697 insertions(+), 3 deletions(-) create mode 100644 dev-tools/omdb/src/bin/omdb/mgs.rs diff --git a/Cargo.lock b/Cargo.lock index 27a165c307..aad3a8782a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4951,6 +4951,8 @@ dependencies = [ "dropshot", "expectorate", "futures", + "gateway-messages", + "gateway-test-utils", "libc", "nexus-test-interface", "nexus-test-utils", @@ -5140,6 +5142,9 @@ dependencies = [ "dropshot", "expectorate", "futures", + "gateway-client", + "gateway-messages", + "gateway-test-utils", "humantime", "internal-dns 0.1.0", "ipnetwork", diff --git a/clients/gateway-client/src/lib.rs b/clients/gateway-client/src/lib.rs index 800254b197..b071d34975 100644 --- a/clients/gateway-client/src/lib.rs +++ b/clients/gateway-client/src/lib.rs @@ -48,7 +48,7 @@ progenitor::generate_api!( }), derives = [schemars::JsonSchema], patch = { - SpIdentifier = { derives = [Copy, PartialEq, Hash, Eq, PartialOrd, Ord, Serialize, Deserialize] }, + SpIdentifier = { derives = [Copy, PartialEq, Hash, Eq, Serialize, Deserialize] }, SpIgnition = { derives = [PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize] }, SpIgnitionSystemType = { derives = [Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize] }, SpState = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize] }, @@ -59,3 +59,17 @@ progenitor::generate_api!( HostPhase2RecoveryImageId = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize] }, }, ); + +// Override the impl of Ord for SpIdentifier because the default one orders the +// fields in a different order than people are likely to want. +impl Ord for crate::types::SpIdentifier { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.type_.cmp(&other.type_).then(self.slot.cmp(&other.slot)) + } +} + +impl PartialOrd for crate::types::SpIdentifier { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} diff --git a/dev-tools/omdb/Cargo.toml b/dev-tools/omdb/Cargo.toml index cd4af6e947..ff3c650d6d 100644 --- a/dev-tools/omdb/Cargo.toml +++ b/dev-tools/omdb/Cargo.toml @@ -14,9 +14,12 @@ chrono.workspace = true clap.workspace = true diesel.workspace = true dropshot.workspace = true +futures.workspace = true +gateway-client.workspace = true +gateway-messages.workspace = true +gateway-test-utils.workspace = true humantime.workspace = true internal-dns.workspace = true -futures.workspace = true nexus-client.workspace = true nexus-db-model.workspace = true nexus-db-queries.workspace = true diff --git a/dev-tools/omdb/src/bin/omdb/main.rs b/dev-tools/omdb/src/bin/omdb/main.rs index d1a56e1d80..32141d2809 100644 --- a/dev-tools/omdb/src/bin/omdb/main.rs +++ b/dev-tools/omdb/src/bin/omdb/main.rs @@ -41,6 +41,7 @@ use std::net::SocketAddr; use std::net::SocketAddrV6; mod db; +mod mgs; mod nexus; mod oximeter; mod sled_agent; @@ -57,6 +58,7 @@ async fn main() -> Result<(), anyhow::Error> { match &args.command { OmdbCommands::Db(db) => db.run_cmd(&args, &log).await, + OmdbCommands::Mgs(mgs) => mgs.run_cmd(&args, &log).await, OmdbCommands::Nexus(nexus) => nexus.run_cmd(&args, &log).await, OmdbCommands::Oximeter(oximeter) => oximeter.run_cmd(&log).await, OmdbCommands::SledAgent(sled) => sled.run_cmd(&args, &log).await, @@ -155,6 +157,8 @@ impl Omdb { enum OmdbCommands { /// Query the control plane database (CockroachDB) Db(db::DbArgs), + /// Debug a specific Management Gateway Service instance + Mgs(mgs::MgsArgs), /// Debug a specific Nexus instance Nexus(nexus::NexusArgs), /// Query oximeter collector state diff --git a/dev-tools/omdb/src/bin/omdb/mgs.rs b/dev-tools/omdb/src/bin/omdb/mgs.rs new file mode 100644 index 0000000000..d2938418e1 --- /dev/null +++ b/dev-tools/omdb/src/bin/omdb/mgs.rs @@ -0,0 +1,488 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Prototype code for collecting information from systems in the rack + +use crate::Omdb; +use anyhow::Context; +use clap::Args; +use clap::Subcommand; +use futures::StreamExt; +use gateway_client::types::PowerState; +use gateway_client::types::RotSlot; +use gateway_client::types::RotState; +use gateway_client::types::SpComponentCaboose; +use gateway_client::types::SpComponentInfo; +use gateway_client::types::SpIdentifier; +use gateway_client::types::SpIgnition; +use gateway_client::types::SpIgnitionInfo; +use gateway_client::types::SpIgnitionSystemType; +use gateway_client::types::SpState; +use gateway_client::types::SpType; +use tabled::Tabled; + +/// Arguments to the "omdb mgs" subcommand +#[derive(Debug, Args)] +pub struct MgsArgs { + /// URL of an MGS instance to query + #[clap(long, env("OMDB_MGS_URL"))] + mgs_url: Option, + + #[command(subcommand)] + command: MgsCommands, +} + +#[derive(Debug, Subcommand)] +enum MgsCommands { + /// Show information about devices and components visible to MGS + Inventory(InventoryArgs), +} + +#[derive(Debug, Args)] +struct InventoryArgs {} + +impl MgsArgs { + pub(crate) async fn run_cmd( + &self, + omdb: &Omdb, + log: &slog::Logger, + ) -> Result<(), anyhow::Error> { + let mgs_url = match &self.mgs_url { + Some(cli_or_env_url) => cli_or_env_url.clone(), + None => { + eprintln!( + "note: MGS URL not specified. Will pick one from DNS." + ); + let addrs = omdb + .dns_lookup_all( + log.clone(), + internal_dns::ServiceName::ManagementGatewayService, + ) + .await?; + let addr = addrs.into_iter().next().expect( + "expected at least one MGS address from \ + successful DNS lookup", + ); + format!("http://{}", addr) + } + }; + eprintln!("note: using MGS URL {}", &mgs_url); + let mgs_client = gateway_client::Client::new(&mgs_url, log.clone()); + + match &self.command { + MgsCommands::Inventory(inventory_args) => { + cmd_mgs_inventory(&mgs_client, inventory_args).await + } + } + } +} + +/// Runs `omdb mgs inventory` +/// +/// Shows devices and components that are visible to an MGS instance. +async fn cmd_mgs_inventory( + mgs_client: &gateway_client::Client, + _args: &InventoryArgs, +) -> Result<(), anyhow::Error> { + // Report all the SP identifiers that MGS is configured to talk to. + println!("ALL CONFIGURED SPs\n"); + let mut sp_ids = mgs_client + .sp_all_ids() + .await + .context("listing SP identifiers")? + .into_inner(); + sp_ids.sort(); + show_sp_ids(&sp_ids)?; + println!(""); + + // Report which SPs are visible via Ignition. + println!("SPs FOUND THROUGH IGNITION\n"); + let mut sp_list_ignition = mgs_client + .ignition_list() + .await + .context("listing ignition")? + .into_inner(); + sp_list_ignition.sort_by(|a, b| a.id.cmp(&b.id)); + show_sps_from_ignition(&sp_list_ignition)?; + println!(""); + + // Print basic state about each SP that's visible to ignition. + println!("SERVICE PROCESSOR STATES\n"); + let mgs_client = std::sync::Arc::new(mgs_client); + let c = &mgs_client; + let mut sp_infos = + futures::stream::iter(sp_list_ignition.iter().filter_map(|ignition| { + if matches!(ignition.details, SpIgnition::Yes { .. }) { + Some(ignition.id) + } else { + None + } + })) + .then(|sp_id| async move { + c.sp_get(sp_id.type_, sp_id.slot) + .await + .with_context(|| format!("fetching info about SP {:?}", sp_id)) + .map(|s| (sp_id, s)) + }) + .collect::>>() + .await + .into_iter() + .filter_map(|r| match r { + Ok((sp_id, v)) => Some((sp_id, v.into_inner())), + Err(error) => { + eprintln!("error: {:?}", error); + None + } + }) + .collect::>(); + sp_infos.sort(); + show_sp_states(&sp_infos)?; + println!(""); + + // Print detailed information about each SP that we've found so far. + for (sp_id, sp_state) in &sp_infos { + show_sp_details(&mgs_client, sp_id, sp_state).await?; + } + + Ok(()) +} + +fn sp_type_to_str(s: &SpType) -> &'static str { + match s { + SpType::Sled => "Sled", + SpType::Power => "Power", + SpType::Switch => "Switch", + } +} + +fn show_sp_ids(sp_ids: &[SpIdentifier]) -> Result<(), anyhow::Error> { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct SpIdRow { + #[tabled(rename = "TYPE")] + type_: &'static str, + slot: u32, + } + + impl<'a> From<&'a SpIdentifier> for SpIdRow { + fn from(id: &SpIdentifier) -> Self { + SpIdRow { type_: sp_type_to_str(&id.type_), slot: id.slot } + } + } + + let table_rows = sp_ids.iter().map(SpIdRow::from); + let table = tabled::Table::new(table_rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + println!("{}", textwrap::indent(&table.to_string(), " ")); + Ok(()) +} + +fn show_sps_from_ignition( + sp_list_ignition: &[SpIgnitionInfo], +) -> Result<(), anyhow::Error> { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct IgnitionRow { + #[tabled(rename = "TYPE")] + type_: &'static str, + slot: u32, + system_type: String, + } + + impl<'a> From<&'a SpIgnitionInfo> for IgnitionRow { + fn from(value: &SpIgnitionInfo) -> Self { + IgnitionRow { + type_: sp_type_to_str(&value.id.type_), + slot: value.id.slot, + system_type: match value.details { + SpIgnition::No => "-".to_string(), + SpIgnition::Yes { + id: SpIgnitionSystemType::Gimlet, + .. + } => "Gimlet".to_string(), + SpIgnition::Yes { + id: SpIgnitionSystemType::Sidecar, + .. + } => "Sidecar".to_string(), + SpIgnition::Yes { + id: SpIgnitionSystemType::Psc, .. + } => "PSC".to_string(), + SpIgnition::Yes { + id: SpIgnitionSystemType::Unknown(v), + .. + } => format!("unknown: type {}", v), + }, + } + } + } + + let table_rows = sp_list_ignition.iter().map(IgnitionRow::from); + let table = tabled::Table::new(table_rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + println!("{}", textwrap::indent(&table.to_string(), " ")); + Ok(()) +} + +fn show_sp_states( + sp_states: &[(SpIdentifier, SpState)], +) -> Result<(), anyhow::Error> { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct SpStateRow<'a> { + #[tabled(rename = "TYPE")] + type_: &'static str, + slot: u32, + model: String, + serial: String, + rev: u32, + hubris: &'a str, + pwr: &'static str, + rot_active: String, + } + + impl<'a> From<&'a (SpIdentifier, SpState)> for SpStateRow<'a> { + fn from((id, v): &'a (SpIdentifier, SpState)) -> Self { + SpStateRow { + type_: sp_type_to_str(&id.type_), + slot: id.slot, + model: v.model.clone(), + serial: v.serial_number.clone(), + rev: v.revision, + hubris: &v.hubris_archive_id, + pwr: match v.power_state { + PowerState::A0 => "A0", + PowerState::A1 => "A1", + PowerState::A2 => "A2", + }, + rot_active: match &v.rot { + RotState::CommunicationFailed { message } => { + format!("error: {}", message) + } + RotState::Enabled { active: RotSlot::A, .. } => { + "slot A".to_string() + } + RotState::Enabled { active: RotSlot::B, .. } => { + "slot B".to_string() + } + }, + } + } + } + + let table_rows = sp_states.iter().map(SpStateRow::from); + let table = tabled::Table::new(table_rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + println!("{}", textwrap::indent(&table.to_string(), " ")); + Ok(()) +} + +const COMPONENTS_WITH_CABOOSES: &'static [&'static str] = &["sp", "rot"]; + +async fn show_sp_details( + mgs_client: &gateway_client::Client, + sp_id: &SpIdentifier, + sp_state: &SpState, +) -> Result<(), anyhow::Error> { + println!( + "SP DETAILS: type {:?} slot {}\n", + sp_type_to_str(&sp_id.type_), + sp_id.slot + ); + + println!(" ROOT OF TRUST\n"); + match &sp_state.rot { + RotState::CommunicationFailed { message } => { + println!(" error: {}", message); + } + RotState::Enabled { + active, + pending_persistent_boot_preference, + persistent_boot_preference, + slot_a_sha3_256_digest, + slot_b_sha3_256_digest, + transient_boot_preference, + } => { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct Row { + name: &'static str, + value: String, + } + + let rows = vec![ + Row { + name: "active slot", + value: format!("slot {:?}", active), + }, + Row { + name: "persistent boot preference", + value: format!("slot {:?}", persistent_boot_preference), + }, + Row { + name: "pending persistent boot preference", + value: pending_persistent_boot_preference + .map(|s| format!("slot {:?}", s)) + .unwrap_or_else(|| "-".to_string()), + }, + Row { + name: "transient boot preference", + value: transient_boot_preference + .map(|s| format!("slot {:?}", s)) + .unwrap_or_else(|| "-".to_string()), + }, + Row { + name: "slot A SHA3 256 digest", + value: slot_a_sha3_256_digest + .clone() + .unwrap_or_else(|| "-".to_string()), + }, + Row { + name: "slot B SHA3 256 digest", + value: slot_b_sha3_256_digest + .clone() + .unwrap_or_else(|| "-".to_string()), + }, + ]; + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + println!("{}", textwrap::indent(&table.to_string(), " ")); + println!(""); + } + } + + let component_list = mgs_client + .sp_component_list(sp_id.type_, sp_id.slot) + .await + .with_context(|| format!("fetching components for SP {:?}", sp_id)); + let list = match component_list { + Ok(l) => l.into_inner(), + Err(e) => { + eprintln!("error: {:#}", e); + return Ok(()); + } + }; + + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct SpComponentRow<'a> { + name: &'a str, + description: &'a str, + device: &'a str, + presence: String, + serial: String, + } + + impl<'a> From<&'a SpComponentInfo> for SpComponentRow<'a> { + fn from(v: &'a SpComponentInfo) -> Self { + SpComponentRow { + name: &v.component, + description: &v.description, + device: &v.device, + presence: format!("{:?}", v.presence), + serial: format!("{:?}", v.serial_number), + } + } + } + + if list.components.is_empty() { + println!(" COMPONENTS: none found\n"); + return Ok(()); + } + + let table_rows = list.components.iter().map(SpComponentRow::from); + let table = tabled::Table::new(table_rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + println!(" COMPONENTS\n"); + println!("{}", textwrap::indent(&table.to_string(), " ")); + println!(""); + + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct CabooseRow { + component: String, + board: String, + git_commit: String, + name: String, + version: String, + } + + impl<'a> From<(&'a SpIdentifier, &'a SpComponentInfo, SpComponentCaboose)> + for CabooseRow + { + fn from( + (_sp_id, component, caboose): ( + &'a SpIdentifier, + &'a SpComponentInfo, + SpComponentCaboose, + ), + ) -> Self { + CabooseRow { + component: component.component.clone(), + board: caboose.board, + git_commit: caboose.git_commit, + name: caboose.name, + version: caboose.version.unwrap_or_else(|| "-".to_string()), + } + } + } + + let mut cabooses = Vec::new(); + for c in &list.components { + if !COMPONENTS_WITH_CABOOSES.contains(&c.component.as_str()) { + continue; + } + + for i in 0..1 { + let r = mgs_client + .sp_component_caboose_get( + sp_id.type_, + sp_id.slot, + &c.component, + i, + ) + .await + .with_context(|| { + format!( + "get caboose for sp type {:?} sp slot {} \ + component {:?} slot {}", + sp_id.type_, sp_id.slot, &c.component, i + ) + }); + match r { + Ok(v) => { + cabooses.push(CabooseRow::from((sp_id, c, v.into_inner()))) + } + Err(error) => { + eprintln!("warn: {:#}", error); + } + } + } + } + + if cabooses.is_empty() { + println!(" CABOOSES: none found\n"); + return Ok(()); + } + + let table = tabled::Table::new(cabooses) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + println!(" COMPONENT CABOOSES\n"); + println!("{}", textwrap::indent(&table.to_string(), " ")); + println!(""); + + Ok(()) +} diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index b1464cb824..eb075a84ea 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -84,6 +84,111 @@ stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable note: database schema version matches expected (5.0.0) ============================================= +EXECUTING COMMAND: omdb ["mgs", "inventory"] +termination: Exited(0) +--------------------------------------------- +stdout: +ALL CONFIGURED SPs + + TYPE SLOT + Sled 0 + Sled 1 + Switch 0 + Switch 1 + +SPs FOUND THROUGH IGNITION + + TYPE SLOT SYSTEM_TYPE + Sled 0 Gimlet + Sled 1 Gimlet + Switch 0 Sidecar + Switch 1 Sidecar + +SERVICE PROCESSOR STATES + + TYPE SLOT MODEL SERIAL REV HUBRIS PWR ROT_ACTIVE + Sled 0 FAKE_SIM_GIMLET SimGimlet00 0 0000000000000000 A2 slot A + Sled 1 FAKE_SIM_GIMLET SimGimlet01 0 0000000000000000 A2 slot A + Switch 0 FAKE_SIM_SIDECAR SimSidecar0 0 0000000000000000 A2 slot A + Switch 1 FAKE_SIM_SIDECAR SimSidecar1 0 0000000000000000 A2 slot A + +SP DETAILS: type "Sled" slot 0 + + ROOT OF TRUST + + NAME VALUE + active slot slot A + persistent boot preference slot A + pending persistent boot preference - + transient boot preference - + slot A SHA3 256 digest - + slot B SHA3 256 digest - + + COMPONENTS + + NAME DESCRIPTION DEVICE PRESENCE SERIAL + sp3-host-cpu FAKE host cpu sp3-host-cpu Present None + dev-0 FAKE temperature sensor fake-tmp-sensor Failed None + + CABOOSES: none found + +SP DETAILS: type "Sled" slot 1 + + ROOT OF TRUST + + NAME VALUE + active slot slot A + persistent boot preference slot A + pending persistent boot preference - + transient boot preference - + slot A SHA3 256 digest - + slot B SHA3 256 digest - + + COMPONENTS + + NAME DESCRIPTION DEVICE PRESENCE SERIAL + sp3-host-cpu FAKE host cpu sp3-host-cpu Present None + + CABOOSES: none found + +SP DETAILS: type "Switch" slot 0 + + ROOT OF TRUST + + NAME VALUE + active slot slot A + persistent boot preference slot A + pending persistent boot preference - + transient boot preference - + slot A SHA3 256 digest - + slot B SHA3 256 digest - + + COMPONENTS + + NAME DESCRIPTION DEVICE PRESENCE SERIAL + dev-0 FAKE temperature sensor 1 fake-tmp-sensor Present None + dev-1 FAKE temperature sensor 2 fake-tmp-sensor Failed None + + CABOOSES: none found + +SP DETAILS: type "Switch" slot 1 + + ROOT OF TRUST + + NAME VALUE + active slot slot A + persistent boot preference slot A + pending persistent boot preference - + transient boot preference - + slot A SHA3 256 digest - + slot B SHA3 256 digest - + + COMPONENTS: none found + +--------------------------------------------- +stderr: +note: using MGS URL http://[::1]:REDACTED_PORT/ +============================================= EXECUTING COMMAND: omdb ["nexus", "background-tasks", "doc"] termination: Exited(0) --------------------------------------------- diff --git a/dev-tools/omdb/tests/test_all_output.rs b/dev-tools/omdb/tests/test_all_output.rs index d757369ead..90e93ee429 100644 --- a/dev-tools/omdb/tests/test_all_output.rs +++ b/dev-tools/omdb/tests/test_all_output.rs @@ -42,6 +42,7 @@ async fn test_omdb_usage_errors() { &["db", "dns", "names"], &["db", "services"], &["db", "network"], + &["mgs"], &["nexus"], &["nexus", "background-tasks"], &["sled-agent"], @@ -58,10 +59,16 @@ async fn test_omdb_usage_errors() { #[nexus_test] async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { + let gwtestctx = gateway_test_utils::setup::test_setup( + "test_omdb_success_case", + gateway_messages::SpPort::One, + ) + .await; let cmd_path = path_to_executable(CMD_OMDB); let postgres_url = cptestctx.database.listen_url(); let nexus_internal_url = format!("http://{}/", cptestctx.internal_client.bind_address); + let mgs_url = format!("http://{}/", gwtestctx.client.bind_address); let mut output = String::new(); let invocations: &[&[&'static str]] = &[ &["db", "dns", "show"], @@ -70,6 +77,7 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { &["db", "services", "list-instances"], &["db", "services", "list-by-sled"], &["db", "sleds"], + &["mgs", "inventory"], &["nexus", "background-tasks", "doc"], &["nexus", "background-tasks", "show"], // We can't easily test the sled agent output because that's only @@ -81,9 +89,14 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { println!("running commands with args: {:?}", args); let p = postgres_url.to_string(); let u = nexus_internal_url.clone(); + let g = mgs_url.clone(); do_run( &mut output, - move |exec| exec.env("OMDB_DB_URL", &p).env("OMDB_NEXUS_URL", &u), + move |exec| { + exec.env("OMDB_DB_URL", &p) + .env("OMDB_NEXUS_URL", &u) + .env("OMDB_MGS_URL", &g) + }, &cmd_path, args, ) @@ -91,6 +104,7 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { } assert_contents("tests/successes.out", &output); + gwtestctx.teardown().await; } /// Verify that we properly deal with cases where: diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index dc2a16bc47..7bedc3ecbc 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -10,6 +10,7 @@ Usage: omdb [OPTIONS] Commands: db Query the control plane database (CockroachDB) + mgs Debug a specific Management Gateway Service instance nexus Debug a specific Nexus instance oximeter Query oximeter collector state sled-agent Debug a specific Sled @@ -33,6 +34,7 @@ Usage: omdb [OPTIONS] Commands: db Query the control plane database (CockroachDB) + mgs Debug a specific Management Gateway Service instance nexus Debug a specific Nexus instance oximeter Query oximeter collector state sled-agent Debug a specific Sled @@ -208,6 +210,24 @@ Options: --verbose Print out raw data structures from the data store -h, --help Print help ============================================= +EXECUTING COMMAND: omdb ["mgs"] +termination: Exited(2) +--------------------------------------------- +stdout: +--------------------------------------------- +stderr: +Debug a specific Management Gateway Service instance + +Usage: omdb mgs [OPTIONS] + +Commands: + inventory Show information about devices and components visible to MGS + help Print this message or the help of the given subcommand(s) + +Options: + --mgs-url URL of an MGS instance to query [env: OMDB_MGS_URL=] + -h, --help Print help +============================================= EXECUTING COMMAND: omdb ["nexus"] termination: Exited(2) --------------------------------------------- diff --git a/dev-tools/omicron-dev/Cargo.toml b/dev-tools/omicron-dev/Cargo.toml index 5439b69c76..251ee16c01 100644 --- a/dev-tools/omicron-dev/Cargo.toml +++ b/dev-tools/omicron-dev/Cargo.toml @@ -13,6 +13,8 @@ camino.workspace = true clap.workspace = true dropshot.workspace = true futures.workspace = true +gateway-messages.workspace = true +gateway-test-utils.workspace = true libc.workspace = true nexus-test-utils.workspace = true nexus-test-interface.workspace = true diff --git a/dev-tools/omicron-dev/src/bin/omicron-dev.rs b/dev-tools/omicron-dev/src/bin/omicron-dev.rs index 14617d6ba4..9107766d8a 100644 --- a/dev-tools/omicron-dev/src/bin/omicron-dev.rs +++ b/dev-tools/omicron-dev/src/bin/omicron-dev.rs @@ -30,6 +30,7 @@ async fn main() -> Result<(), anyhow::Error> { OmicronDb::DbPopulate { ref args } => cmd_db_populate(args).await, OmicronDb::DbWipe { ref args } => cmd_db_wipe(args).await, OmicronDb::ChRun { ref args } => cmd_clickhouse_run(args).await, + OmicronDb::MgsRun { ref args } => cmd_mgs_run(args).await, OmicronDb::RunAll { ref args } => cmd_run_all(args).await, OmicronDb::CertCreate { ref args } => cmd_cert_create(args).await, }; @@ -68,6 +69,12 @@ enum OmicronDb { args: ChRunArgs, }, + /// Run a simulated Management Gateway Service for development + MgsRun { + #[clap(flatten)] + args: MgsRunArgs, + }, + /// Run a full simulated control plane RunAll { #[clap(flatten)] @@ -465,3 +472,34 @@ fn write_private_file( .with_context(|| format!("open {:?} for writing", path))?; file.write_all(contents).with_context(|| format!("write to {:?}", path)) } + +#[derive(Clone, Debug, Args)] +struct MgsRunArgs {} + +async fn cmd_mgs_run(_args: &MgsRunArgs) -> Result<(), anyhow::Error> { + // Start a stream listening for SIGINT + let signals = Signals::new(&[SIGINT]).expect("failed to wait for SIGINT"); + let mut signal_stream = signals.fuse(); + + println!("omicron-dev: setting up MGS ... "); + let gwtestctx = gateway_test_utils::setup::test_setup( + "omicron-dev", + gateway_messages::SpPort::One, + ) + .await; + println!("omicron-dev: MGS is running."); + + let addr = gwtestctx.client.bind_address; + println!("omicron-dev: MGS API: http://{:?}", addr); + + // Wait for a signal. + let caught_signal = signal_stream.next().await; + assert_eq!(caught_signal.unwrap(), SIGINT); + eprintln!( + "omicron-dev: caught signal, shutting down and removing \ + temporary directory" + ); + + gwtestctx.teardown().await; + Ok(()) +} diff --git a/dev-tools/omicron-dev/tests/output/cmd-omicron-dev-noargs-stderr b/dev-tools/omicron-dev/tests/output/cmd-omicron-dev-noargs-stderr index f3c28e1ab9..ac1c87e165 100644 --- a/dev-tools/omicron-dev/tests/output/cmd-omicron-dev-noargs-stderr +++ b/dev-tools/omicron-dev/tests/output/cmd-omicron-dev-noargs-stderr @@ -7,6 +7,7 @@ Commands: db-populate Populate an existing CockroachDB cluster with the Omicron schema db-wipe Wipe the Omicron schema (and all data) from an existing CockroachDB cluster ch-run Run a ClickHouse database server for development + mgs-run Run a simulated Management Gateway Service for development run-all Run a full simulated control plane cert-create Create a self-signed certificate for use with Omicron help Print this message or the help of the given subcommand(s) From 7ab9c1936a714a10e3b51ee1214cb21e08ec0af8 Mon Sep 17 00:00:00 2001 From: Kyle Simpson Date: Fri, 6 Oct 2023 15:36:11 +0900 Subject: [PATCH 27/35] Chore: Networking stack update (#4169) Automated dendrite updates have been stalled on: * a breaking API change in oxidecomputer/dendrite#0933cb0, * a breaking behavioural change in oxidecomputer/dendrite#616862d and its accompanying sidecar-lite/npuzone change. This PR updates these dependencies and pulls in the OPTE version needed to handle the new switch logic on ingress traffic. Once merged, Helios users will need to reinstall dependencies. --- Cargo.lock | 12 ++++++------ Cargo.toml | 4 ++-- nexus/src/app/sagas/switch_port_settings_apply.rs | 4 ++-- package-manifest.toml | 12 ++++++------ sled-agent/src/bootstrap/early_networking.rs | 3 +-- tools/ci_download_softnpu_machinery | 2 +- tools/dendrite_openapi_version | 4 ++-- tools/dendrite_stub_checksums | 6 +++--- tools/opte_version | 2 +- wicketd/src/preflight_check/uplink.rs | 2 +- 10 files changed, 25 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aad3a8782a..18e0e15c3b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3390,7 +3390,7 @@ dependencies = [ [[package]] name = "illumos-sys-hdrs" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=98d33125413f01722947e322f82caf9d22209434#98d33125413f01722947e322f82caf9d22209434" +source = "git+https://github.com/oxidecomputer/opte?rev=631c2017f19cafb1535f621e9e5aa9198ccad869#631c2017f19cafb1535f621e9e5aa9198ccad869" [[package]] name = "illumos-utils" @@ -3811,7 +3811,7 @@ dependencies = [ [[package]] name = "kstat-macro" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=98d33125413f01722947e322f82caf9d22209434#98d33125413f01722947e322f82caf9d22209434" +source = "git+https://github.com/oxidecomputer/opte?rev=631c2017f19cafb1535f621e9e5aa9198ccad869#631c2017f19cafb1535f621e9e5aa9198ccad869" dependencies = [ "quote", "syn 1.0.109", @@ -5583,7 +5583,7 @@ dependencies = [ [[package]] name = "opte" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=98d33125413f01722947e322f82caf9d22209434#98d33125413f01722947e322f82caf9d22209434" +source = "git+https://github.com/oxidecomputer/opte?rev=631c2017f19cafb1535f621e9e5aa9198ccad869#631c2017f19cafb1535f621e9e5aa9198ccad869" dependencies = [ "cfg-if 0.1.10", "dyn-clone", @@ -5600,7 +5600,7 @@ dependencies = [ [[package]] name = "opte-api" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=98d33125413f01722947e322f82caf9d22209434#98d33125413f01722947e322f82caf9d22209434" +source = "git+https://github.com/oxidecomputer/opte?rev=631c2017f19cafb1535f621e9e5aa9198ccad869#631c2017f19cafb1535f621e9e5aa9198ccad869" dependencies = [ "cfg-if 0.1.10", "illumos-sys-hdrs", @@ -5613,7 +5613,7 @@ dependencies = [ [[package]] name = "opte-ioctl" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=98d33125413f01722947e322f82caf9d22209434#98d33125413f01722947e322f82caf9d22209434" +source = "git+https://github.com/oxidecomputer/opte?rev=631c2017f19cafb1535f621e9e5aa9198ccad869#631c2017f19cafb1535f621e9e5aa9198ccad869" dependencies = [ "libc", "libnet", @@ -5693,7 +5693,7 @@ dependencies = [ [[package]] name = "oxide-vpc" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=98d33125413f01722947e322f82caf9d22209434#98d33125413f01722947e322f82caf9d22209434" +source = "git+https://github.com/oxidecomputer/opte?rev=631c2017f19cafb1535f621e9e5aa9198ccad869#631c2017f19cafb1535f621e9e5aa9198ccad869" dependencies = [ "cfg-if 0.1.10", "illumos-sys-hdrs", diff --git a/Cargo.toml b/Cargo.toml index 1ca8a02886..3b83b2f7c5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -245,7 +245,7 @@ omicron-sled-agent = { path = "sled-agent" } omicron-test-utils = { path = "test-utils" } omicron-zone-package = "0.8.3" oxide-client = { path = "clients/oxide-client" } -oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "98d33125413f01722947e322f82caf9d22209434", features = [ "api", "std" ] } +oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "631c2017f19cafb1535f621e9e5aa9198ccad869", features = [ "api", "std" ] } once_cell = "1.18.0" openapi-lint = { git = "https://github.com/oxidecomputer/openapi-lint", branch = "main" } openapiv3 = "1.0" @@ -253,7 +253,7 @@ openapiv3 = "1.0" openssl = "0.10" openssl-sys = "0.9" openssl-probe = "0.1.2" -opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "98d33125413f01722947e322f82caf9d22209434" } +opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "631c2017f19cafb1535f621e9e5aa9198ccad869" } oso = "0.26" owo-colors = "3.5.0" oximeter = { path = "oximeter/oximeter" } diff --git a/nexus/src/app/sagas/switch_port_settings_apply.rs b/nexus/src/app/sagas/switch_port_settings_apply.rs index 07d4dd17fb..687613f0cc 100644 --- a/nexus/src/app/sagas/switch_port_settings_apply.rs +++ b/nexus/src/app/sagas/switch_port_settings_apply.rs @@ -175,7 +175,7 @@ pub(crate) fn api_to_dpd_port_settings( .to_string(), RouteSettingsV4 { link_id: link_id.0, - nexthop: Some(gw), + nexthop: gw, vid: r.vid.map(Into::into), }, ); @@ -194,7 +194,7 @@ pub(crate) fn api_to_dpd_port_settings( .to_string(), RouteSettingsV6 { link_id: link_id.0, - nexthop: Some(gw), + nexthop: gw, vid: r.vid.map(Into::into), }, ); diff --git a/package-manifest.toml b/package-manifest.toml index ff229e5def..a7f8683eee 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -458,8 +458,8 @@ only_for_targets.image = "standard" # 2. Copy dendrite.tar.gz from dendrite/out to omicron/out source.type = "prebuilt" source.repo = "dendrite" -source.commit = "363e365135cfa46d7f7558d8670f35aa8fe412e9" -source.sha256 = "2dc34eaac7eb9d320594f3ac125df6a601fe020e0b3c7f16eb0a5ebddc8e18b9" +source.commit = "7712104585266a2898da38c1345210ad26f9e71d" +source.sha256 = "486b0b016c0df06947810b90f3a3dd40423f0ee6f255ed079dc8e5618c9a7281" output.type = "zone" output.intermediate_only = true @@ -483,8 +483,8 @@ only_for_targets.image = "standard" # 2. Copy the output zone image from dendrite/out to omicron/out source.type = "prebuilt" source.repo = "dendrite" -source.commit = "363e365135cfa46d7f7558d8670f35aa8fe412e9" -source.sha256 = "1616eb25ab3d3a8b678b6cf3675af7ba61d455c3e6c2ba2a2d35a663861bc8e8" +source.commit = "7712104585266a2898da38c1345210ad26f9e71d" +source.sha256 = "76ff76d3526323c3fcbe2351cf9fbda4840e0dc11cd0eb6b71a3e0bd36c5e5e8" output.type = "zone" output.intermediate_only = true @@ -501,8 +501,8 @@ only_for_targets.image = "standard" # 2. Copy dendrite.tar.gz from dendrite/out to omicron/out/dendrite-softnpu.tar.gz source.type = "prebuilt" source.repo = "dendrite" -source.commit = "363e365135cfa46d7f7558d8670f35aa8fe412e9" -source.sha256 = "a045e6dbb84dbceaf3a8a7dc33d283449fbeaf081442d0ae14ce8d8ffcdda4e9" +source.commit = "7712104585266a2898da38c1345210ad26f9e71d" +source.sha256 = "b8e5c176070f9bc9ea0028de1999c77d66ea3438913664163975964effe4481b" output.type = "zone" output.intermediate_only = true diff --git a/sled-agent/src/bootstrap/early_networking.rs b/sled-agent/src/bootstrap/early_networking.rs index 78e54b3db4..61d4c84af3 100644 --- a/sled-agent/src/bootstrap/early_networking.rs +++ b/sled-agent/src/bootstrap/early_networking.rs @@ -495,14 +495,13 @@ impl<'a> EarlyNetworkSetup<'a> { e )) })?; - let nexthop = Some(uplink_config.gateway_ip); dpd_port_settings.v4_routes.insert( Ipv4Cidr { prefix: "0.0.0.0".parse().unwrap(), prefix_len: 0 } .to_string(), RouteSettingsV4 { link_id: link_id.0, vid: uplink_config.uplink_vid, - nexthop, + nexthop: uplink_config.gateway_ip, }, ); Ok((ipv6_entry, dpd_port_settings, port_id)) diff --git a/tools/ci_download_softnpu_machinery b/tools/ci_download_softnpu_machinery index 2575d6a186..d37d428476 100755 --- a/tools/ci_download_softnpu_machinery +++ b/tools/ci_download_softnpu_machinery @@ -15,7 +15,7 @@ OUT_DIR="out/npuzone" # Pinned commit for softnpu ASIC simulator SOFTNPU_REPO="softnpu" -SOFTNPU_COMMIT="64beaff129b7f63a04a53dd5ed0ec09f012f5756" +SOFTNPU_COMMIT="41b3a67b3d44f51528816ff8e539b4001df48305" # This is the softnpu ASIC simulator echo "fetching npuzone" diff --git a/tools/dendrite_openapi_version b/tools/dendrite_openapi_version index cbdbca7662..b1f210a647 100644 --- a/tools/dendrite_openapi_version +++ b/tools/dendrite_openapi_version @@ -1,2 +1,2 @@ -COMMIT="363e365135cfa46d7f7558d8670f35aa8fe412e9" -SHA2="4da5edf1571a550a90aa8679a25c1535d2b02154dfb6034f170e421c2633bc31" +COMMIT="7712104585266a2898da38c1345210ad26f9e71d" +SHA2="cb3f0cfbe6216d2441d34e0470252e0fb142332e47b33b65c24ef7368a694b6d" diff --git a/tools/dendrite_stub_checksums b/tools/dendrite_stub_checksums index acff400104..9538bc0d00 100644 --- a/tools/dendrite_stub_checksums +++ b/tools/dendrite_stub_checksums @@ -1,3 +1,3 @@ -CIDL_SHA256_ILLUMOS="2dc34eaac7eb9d320594f3ac125df6a601fe020e0b3c7f16eb0a5ebddc8e18b9" -CIDL_SHA256_LINUX_DPD="5a976d1e43031f4790d1cd2f42d226b47c1be9c998917666f21cfaa3a7b13939" -CIDL_SHA256_LINUX_SWADM="38680e69364ffbfc43fea524786580d151ff45ce2f1802bd5179599f7c80e5f8" +CIDL_SHA256_ILLUMOS="486b0b016c0df06947810b90f3a3dd40423f0ee6f255ed079dc8e5618c9a7281" +CIDL_SHA256_LINUX_DPD="af97aaf7e1046a5c651d316c384171df6387b4c54c8ae4a3ef498e532eaa5a4c" +CIDL_SHA256_LINUX_SWADM="909e400dcc9880720222c6dc3919404d83687f773f668160f66f38b51a81c188" diff --git a/tools/opte_version b/tools/opte_version index 83a91f78b4..2dbaeb7154 100644 --- a/tools/opte_version +++ b/tools/opte_version @@ -1 +1 @@ -0.23.173 +0.23.181 diff --git a/wicketd/src/preflight_check/uplink.rs b/wicketd/src/preflight_check/uplink.rs index c0f5d0c6bb..58955d04d6 100644 --- a/wicketd/src/preflight_check/uplink.rs +++ b/wicketd/src/preflight_check/uplink.rs @@ -777,7 +777,7 @@ fn build_port_settings( DPD_DEFAULT_IPV4_CIDR.parse().unwrap(), RouteSettingsV4 { link_id: link_id.0, - nexthop: Some(uplink.gateway_ip), + nexthop: uplink.gateway_ip, vid: uplink.uplink_vid, }, ); From c03029373982a528d372c68f3015506f15dd2a07 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Fri, 6 Oct 2023 07:07:06 -0700 Subject: [PATCH 28/35] Pick up correctly signed RoT sidecar images (#4216) --- tools/dvt_dock_version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/dvt_dock_version b/tools/dvt_dock_version index e2151b846f..790bd3ec26 100644 --- a/tools/dvt_dock_version +++ b/tools/dvt_dock_version @@ -1 +1 @@ -COMMIT=65f1979c1d3f4d0874a64144941cc41b46a70c80 +COMMIT=7cbfa19bad077a3c42976357a317d18291533ba2 From cf3bdaee3885dc34c838c5587e92787b772133a9 Mon Sep 17 00:00:00 2001 From: Patrick Mooney Date: Fri, 6 Oct 2023 16:40:00 -0500 Subject: [PATCH 29/35] Update Propolis to include UART fix This updates the Propolis packaging to use a version with the fix to oxidecomputer/propolis#540. --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 6 +++--- package-manifest.toml | 4 ++-- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 18e0e15c3b..ec4efec0fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -485,7 +485,7 @@ dependencies = [ [[package]] name = "bhyve_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" +source = "git+https://github.com/oxidecomputer/propolis?rev=901b710b6e5bd05a94a323693c2b971e7e7b240e#901b710b6e5bd05a94a323693c2b971e7e7b240e" dependencies = [ "bhyve_api_sys", "libc", @@ -495,7 +495,7 @@ dependencies = [ [[package]] name = "bhyve_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" +source = "git+https://github.com/oxidecomputer/propolis?rev=901b710b6e5bd05a94a323693c2b971e7e7b240e#901b710b6e5bd05a94a323693c2b971e7e7b240e" dependencies = [ "libc", "strum", @@ -1225,7 +1225,7 @@ dependencies = [ [[package]] name = "cpuid_profile_config" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" +source = "git+https://github.com/oxidecomputer/propolis?rev=901b710b6e5bd05a94a323693c2b971e7e7b240e#901b710b6e5bd05a94a323693c2b971e7e7b240e" dependencies = [ "propolis", "serde", @@ -2016,7 +2016,7 @@ checksum = "7e1a8646b2c125eeb9a84ef0faa6d2d102ea0d5da60b824ade2743263117b848" [[package]] name = "dladm" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" +source = "git+https://github.com/oxidecomputer/propolis?rev=901b710b6e5bd05a94a323693c2b971e7e7b240e#901b710b6e5bd05a94a323693c2b971e7e7b240e" dependencies = [ "libc", "strum", @@ -6593,7 +6593,7 @@ dependencies = [ [[package]] name = "propolis" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" +source = "git+https://github.com/oxidecomputer/propolis?rev=901b710b6e5bd05a94a323693c2b971e7e7b240e#901b710b6e5bd05a94a323693c2b971e7e7b240e" dependencies = [ "anyhow", "bhyve_api", @@ -6626,7 +6626,7 @@ dependencies = [ [[package]] name = "propolis-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" +source = "git+https://github.com/oxidecomputer/propolis?rev=901b710b6e5bd05a94a323693c2b971e7e7b240e#901b710b6e5bd05a94a323693c2b971e7e7b240e" dependencies = [ "async-trait", "base64 0.21.4", @@ -6650,7 +6650,7 @@ dependencies = [ [[package]] name = "propolis-server" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" +source = "git+https://github.com/oxidecomputer/propolis?rev=901b710b6e5bd05a94a323693c2b971e7e7b240e#901b710b6e5bd05a94a323693c2b971e7e7b240e" dependencies = [ "anyhow", "async-trait", @@ -6702,7 +6702,7 @@ dependencies = [ [[package]] name = "propolis-server-config" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" +source = "git+https://github.com/oxidecomputer/propolis?rev=901b710b6e5bd05a94a323693c2b971e7e7b240e#901b710b6e5bd05a94a323693c2b971e7e7b240e" dependencies = [ "cpuid_profile_config", "serde", @@ -6714,7 +6714,7 @@ dependencies = [ [[package]] name = "propolis_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" +source = "git+https://github.com/oxidecomputer/propolis?rev=901b710b6e5bd05a94a323693c2b971e7e7b240e#901b710b6e5bd05a94a323693c2b971e7e7b240e" dependencies = [ "schemars", "serde", @@ -9761,7 +9761,7 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "viona_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" +source = "git+https://github.com/oxidecomputer/propolis?rev=901b710b6e5bd05a94a323693c2b971e7e7b240e#901b710b6e5bd05a94a323693c2b971e7e7b240e" dependencies = [ "libc", "viona_api_sys", @@ -9770,7 +9770,7 @@ dependencies = [ [[package]] name = "viona_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" +source = "git+https://github.com/oxidecomputer/propolis?rev=901b710b6e5bd05a94a323693c2b971e7e7b240e#901b710b6e5bd05a94a323693c2b971e7e7b240e" dependencies = [ "libc", ] diff --git a/Cargo.toml b/Cargo.toml index 3b83b2f7c5..9388b2c7d6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -277,9 +277,9 @@ pretty-hex = "0.3.0" proc-macro2 = "1.0" progenitor = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } progenitor-client = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } -bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "42c878b71a58d430dfc306126af5d40ca816d70f" } -propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "42c878b71a58d430dfc306126af5d40ca816d70f", features = [ "generated-migration" ] } -propolis-server = { git = "https://github.com/oxidecomputer/propolis", rev = "42c878b71a58d430dfc306126af5d40ca816d70f", default-features = false, features = ["mock-only"] } +bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "901b710b6e5bd05a94a323693c2b971e7e7b240e" } +propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "901b710b6e5bd05a94a323693c2b971e7e7b240e", features = [ "generated-migration" ] } +propolis-server = { git = "https://github.com/oxidecomputer/propolis", rev = "901b710b6e5bd05a94a323693c2b971e7e7b240e", default-features = false, features = ["mock-only"] } proptest = "1.2.0" quote = "1.0" rand = "0.8.5" diff --git a/package-manifest.toml b/package-manifest.toml index a7f8683eee..7cf235c24a 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -406,10 +406,10 @@ service_name = "propolis-server" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "propolis" -source.commit = "42c878b71a58d430dfc306126af5d40ca816d70f" +source.commit = "901b710b6e5bd05a94a323693c2b971e7e7b240e" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/propolis/image//propolis-server.sha256.txt -source.sha256 = "dce4d82bb936e990262abcaa279eee7e33a19930880b23f49fa3851cded18567" +source.sha256 = "0f681cdbe7312f66fd3c99fe033b379e49c59fa4ad04d307f68b12514307e976" output.type = "zone" [package.maghemite] From 9f004d2759b7ae827bc5e49d6cc8b5c5956573a8 Mon Sep 17 00:00:00 2001 From: Rain Date: Fri, 6 Oct 2023 21:11:29 -0700 Subject: [PATCH 30/35] [crdb-seed] use a tarball, fix omicron-dev run-all (#4208) Several changes: 1. In https://github.com/oxidecomputer/omicron/issues/4193, @david-crespo observed some missing files in the crdb-seed generated directory. My suspicion is that that is due to the `/tmp` cleaner that runs on macOS. @davepacheco suggested using a tarball to get atomicity (either the file exists or it doesn't), and it ended up being pretty simple to do that at the end. 2. Teach nexus-test-utils to ensure that the seed tarball exists, fixing `omicron-dev run-all` and anything else that uses nexus-test-utils (and avoiding a dependency on the environment). 3. Move `crdb-seed` to `dev-tools` (thanks Dave for pointing it out!) 4. Add a way to invalidate the cache with `CRDB_SEED_INVALIDATE=1` in the environment. 5. Add a readme for `crdb-seed`. Fixes #4206. Hopefully addresses #4193. --- .config/nextest.toml | 4 +- Cargo.lock | 27 +- Cargo.toml | 5 +- crdb-seed/src/main.rs | 92 ------- {crdb-seed => dev-tools/crdb-seed}/Cargo.toml | 8 +- dev-tools/crdb-seed/README.md | 11 + dev-tools/crdb-seed/src/main.rs | 39 +++ dev-tools/omicron-dev/Cargo.toml | 2 +- dev-tools/omicron-dev/src/bin/omicron-dev.rs | 10 +- .../omicron-dev/tests/test_omicron_dev.rs | 11 + nexus/test-utils/Cargo.toml | 3 + nexus/test-utils/src/db.rs | 35 ++- nexus/test-utils/src/lib.rs | 105 +++++++- test-utils/Cargo.toml | 11 +- test-utils/src/dev/mod.rs | 107 +++----- test-utils/src/dev/seed.rs | 239 ++++++++++++++++++ workspace-hack/Cargo.toml | 24 +- 17 files changed, 518 insertions(+), 215 deletions(-) delete mode 100644 crdb-seed/src/main.rs rename {crdb-seed => dev-tools/crdb-seed}/Cargo.toml (60%) create mode 100644 dev-tools/crdb-seed/README.md create mode 100644 dev-tools/crdb-seed/src/main.rs create mode 100644 test-utils/src/dev/seed.rs diff --git a/.config/nextest.toml b/.config/nextest.toml index b2a8b360bb..ba07186c8a 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -8,7 +8,9 @@ nextest-version = { required = "0.9.59", recommended = "0.9.59" } experimental = ["setup-scripts"] [[profile.default.scripts]] -filter = 'rdeps(nexus-test-utils)' +# Exclude omicron-dev tests from crdb-seed as we explicitly want to simulate an +# environment where the seed file doesn't exist. +filter = 'rdeps(nexus-test-utils) - package(omicron-dev)' setup = 'crdb-seed' [profile.ci] diff --git a/Cargo.lock b/Cargo.lock index ec4efec0fc..306e953049 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -366,6 +366,17 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" +[[package]] +name = "atomicwrites" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1163d9d7c51de51a2b79d6df5e8888d11e9df17c752ce4a285fb6ca1580734e" +dependencies = [ + "rustix 0.37.23", + "tempfile", + "windows-sys 0.48.0", +] + [[package]] name = "atty" version = "0.2.14" @@ -1268,13 +1279,10 @@ dependencies = [ name = "crdb-seed" version = "0.1.0" dependencies = [ - "camino", - "camino-tempfile", + "anyhow", "dropshot", - "hex", "omicron-test-utils", "omicron-workspace-hack", - "ring", "slog", "tokio", ] @@ -5338,11 +5346,15 @@ name = "omicron-test-utils" version = "0.1.0" dependencies = [ "anyhow", + "atomicwrites", "camino", + "camino-tempfile", "dropshot", "expectorate", + "filetime", "futures", "headers", + "hex", "http", "libc", "omicron-common 0.1.0", @@ -5351,9 +5363,11 @@ dependencies = [ "rcgen", "regex", "reqwest", + "ring", "rustls", "slog", "subprocess", + "tar", "tempfile", "thiserror", "tokio", @@ -5436,6 +5450,7 @@ dependencies = [ "regex-syntax 0.7.5", "reqwest", "ring", + "rustix 0.37.23", "rustix 0.38.9", "schemars", "semver 1.0.18", @@ -9456,8 +9471,8 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 0.1.10", - "rand 0.4.6", + "cfg-if 1.0.0", + "rand 0.8.5", "static_assertions", ] diff --git a/Cargo.toml b/Cargo.toml index 9388b2c7d6..da7b582fe3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,7 @@ members = [ "clients/sled-agent-client", "clients/wicketd-client", "common", - "crdb-seed", + "dev-tools/crdb-seed", "dev-tools/omdb", "dev-tools/omicron-dev", "dev-tools/thing-flinger", @@ -83,6 +83,7 @@ default-members = [ "clients/sled-agent-client", "clients/wicketd-client", "common", + "dev-tools/crdb-seed", "dev-tools/omdb", "dev-tools/omicron-dev", "dev-tools/thing-flinger", @@ -137,6 +138,7 @@ assert_matches = "1.5.0" assert_cmd = "2.0.12" async-bb8-diesel = { git = "https://github.com/oxidecomputer/async-bb8-diesel", rev = "da04c087f835a51e0441addb19c5ef4986e1fcf2" } async-trait = "0.1.73" +atomicwrites = "0.4.1" authz-macros = { path = "nexus/authz-macros" } backoff = { version = "0.4.0", features = [ "tokio" ] } base64 = "0.21.4" @@ -182,6 +184,7 @@ dropshot = { git = "https://github.com/oxidecomputer/dropshot", branch = "main", either = "1.9.0" expectorate = "1.1.0" fatfs = "0.3.6" +filetime = "0.2.22" flate2 = "1.0.27" flume = "0.11.0" foreign-types = "0.3.2" diff --git a/crdb-seed/src/main.rs b/crdb-seed/src/main.rs deleted file mode 100644 index b8572bd886..0000000000 --- a/crdb-seed/src/main.rs +++ /dev/null @@ -1,92 +0,0 @@ -use camino::Utf8PathBuf; -use dropshot::{test_util::LogContext, ConfigLogging, ConfigLoggingLevel}; -use omicron_test_utils::dev; -use slog::Logger; -use std::io::Write; - -// Creates a string identifier for the current DB schema and version. -// -// The goal here is to allow to create different "seed" directories -// for each revision of the DB. -fn digest_unique_to_schema() -> String { - let schema = include_str!("../../schema/crdb/dbinit.sql"); - let crdb_version = include_str!("../../tools/cockroachdb_version"); - let mut ctx = ring::digest::Context::new(&ring::digest::SHA256); - ctx.update(&schema.as_bytes()); - ctx.update(&crdb_version.as_bytes()); - let digest = ctx.finish(); - hex::encode(digest.as_ref()) -} - -enum SeedDirectoryStatus { - Created, - Existing, -} - -async fn ensure_seed_directory_exists( - log: &Logger, -) -> (Utf8PathBuf, SeedDirectoryStatus) { - let base_seed_dir = Utf8PathBuf::from_path_buf(std::env::temp_dir()) - .expect("Not a UTF-8 path") - .join("crdb-base"); - std::fs::create_dir_all(&base_seed_dir).unwrap(); - let desired_seed_dir = base_seed_dir.join(digest_unique_to_schema()); - - if desired_seed_dir.exists() { - return (desired_seed_dir, SeedDirectoryStatus::Existing); - } - - // The directory didn't exist when we started, so try to create it. - // - // Nextest will execute it just once, but it is possible for a user to start - // up multiple nextest processes to be running at the same time. So we - // should consider it possible for another caller to create this seed - // directory before we finish setting it up ourselves. - let tmp_seed_dir = - camino_tempfile::Utf8TempDir::new_in(base_seed_dir).unwrap(); - dev::test_setup_database_seed(log, tmp_seed_dir.path()).await; - - // If we can successfully perform the rename, there was either no - // contention or we won a creation race. - // - // If we couldn't perform the rename, the directory might already exist. - // Check that this is the error we encountered -- otherwise, we're - // struggling. - if let Err(err) = std::fs::rename(tmp_seed_dir.path(), &desired_seed_dir) { - if !desired_seed_dir.exists() { - panic!("Cannot rename seed directory for CockroachDB: {err}"); - } - } - - (desired_seed_dir, SeedDirectoryStatus::Created) -} - -#[tokio::main] -async fn main() { - // TODO: dropshot is v heavyweight for this, we should be able to pull in a - // smaller binary - let logctx = LogContext::new( - "crdb_seeding", - &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, - ); - let (dir, status) = ensure_seed_directory_exists(&logctx.log).await; - match status { - SeedDirectoryStatus::Created => { - slog::info!(logctx.log, "Created seed directory: `{dir}`"); - } - SeedDirectoryStatus::Existing => { - slog::info!(logctx.log, "Using existing seed directory: `{dir}`"); - } - } - if let Ok(env_path) = std::env::var("NEXTEST_ENV") { - let mut file = std::fs::File::create(&env_path) - .expect("failed to open NEXTEST_ENV file"); - writeln!(file, "CRDB_SEED_DIR={dir}") - .expect("failed to write to NEXTEST_ENV file"); - } else { - slog::warn!( - logctx.log, - "NEXTEST_ENV not set (is this script running under nextest?)" - ); - } -} diff --git a/crdb-seed/Cargo.toml b/dev-tools/crdb-seed/Cargo.toml similarity index 60% rename from crdb-seed/Cargo.toml rename to dev-tools/crdb-seed/Cargo.toml index 8d6d570d08..aff26995dc 100644 --- a/crdb-seed/Cargo.toml +++ b/dev-tools/crdb-seed/Cargo.toml @@ -3,14 +3,12 @@ name = "crdb-seed" version = "0.1.0" edition = "2021" license = "MPL-2.0" +readme = "README.md" [dependencies] -camino.workspace = true -camino-tempfile.workspace = true +anyhow.workspace = true dropshot.workspace = true -hex.workspace = true -omicron-test-utils.workspace = true -ring.workspace = true +omicron-test-utils = { workspace = true, features = ["seed-gen"] } slog.workspace = true tokio.workspace = true omicron-workspace-hack.workspace = true diff --git a/dev-tools/crdb-seed/README.md b/dev-tools/crdb-seed/README.md new file mode 100644 index 0000000000..3b77f23066 --- /dev/null +++ b/dev-tools/crdb-seed/README.md @@ -0,0 +1,11 @@ +# crdb-seed + +This is a small utility that creates a seed tarball for our CockroachDB instance +in the temporary directory. It is used as a setup script for nextest (see +`.config/nextest.rs`). + +This utility hashes inputs and attempts to reuse a tarball if it already exists +(see `digest_unique_to_schema` in `omicron/test-utils/src/dev/seed.rs`). + +To invalidate the tarball and cause it to be recreated from scratch, set +`CRDB_SEED_INVALIDATE=1` in the environment. diff --git a/dev-tools/crdb-seed/src/main.rs b/dev-tools/crdb-seed/src/main.rs new file mode 100644 index 0000000000..26b0e19410 --- /dev/null +++ b/dev-tools/crdb-seed/src/main.rs @@ -0,0 +1,39 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use anyhow::{Context, Result}; +use dropshot::{test_util::LogContext, ConfigLogging, ConfigLoggingLevel}; +use omicron_test_utils::dev::seed::{ + ensure_seed_tarball_exists, should_invalidate_seed, +}; +use omicron_test_utils::dev::CRDB_SEED_TAR_ENV; +use std::io::Write; + +#[tokio::main] +async fn main() -> Result<()> { + // TODO: dropshot is v heavyweight for this, we should be able to pull in a + // smaller binary + let logctx = LogContext::new( + "crdb_seeding", + &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, + ); + let (seed_tar, status) = + ensure_seed_tarball_exists(&logctx.log, should_invalidate_seed()) + .await?; + status.log(&logctx.log, &seed_tar); + + if let Ok(env_path) = std::env::var("NEXTEST_ENV") { + let mut file = std::fs::File::create(&env_path) + .context("failed to open NEXTEST_ENV file")?; + writeln!(file, "{CRDB_SEED_TAR_ENV}={seed_tar}") + .context("failed to write to NEXTEST_ENV file")?; + } else { + slog::warn!( + logctx.log, + "NEXTEST_ENV not set (is this script running under nextest?)" + ); + } + + Ok(()) +} diff --git a/dev-tools/omicron-dev/Cargo.toml b/dev-tools/omicron-dev/Cargo.toml index 251ee16c01..ec7cafb559 100644 --- a/dev-tools/omicron-dev/Cargo.toml +++ b/dev-tools/omicron-dev/Cargo.toml @@ -16,7 +16,7 @@ futures.workspace = true gateway-messages.workspace = true gateway-test-utils.workspace = true libc.workspace = true -nexus-test-utils.workspace = true +nexus-test-utils = { workspace = true, features = ["omicron-dev"] } nexus-test-interface.workspace = true omicron-common.workspace = true omicron-nexus.workspace = true diff --git a/dev-tools/omicron-dev/src/bin/omicron-dev.rs b/dev-tools/omicron-dev/src/bin/omicron-dev.rs index 9107766d8a..e79184f7e5 100644 --- a/dev-tools/omicron-dev/src/bin/omicron-dev.rs +++ b/dev-tools/omicron-dev/src/bin/omicron-dev.rs @@ -14,7 +14,6 @@ use futures::stream::StreamExt; use nexus_test_interface::NexusServer; use omicron_common::cmd::fatal; use omicron_common::cmd::CmdError; -use omicron_sled_agent::sim; use omicron_test_utils::dev; use signal_hook::consts::signal::SIGINT; use signal_hook_tokio::Signals; @@ -348,13 +347,12 @@ async fn cmd_run_all(args: &RunAllArgs) -> Result<(), anyhow::Error> { config.deployment.dropshot_external.dropshot.bind_address.set_port(p); } - // Start up a ControlPlaneTestContext, which tautologically sets up - // everything needed for a simulated control plane. println!("omicron-dev: setting up all services ... "); - let cptestctx = nexus_test_utils::test_setup_with_config::< + let cptestctx = nexus_test_utils::omicron_dev_setup_with_config::< omicron_nexus::Server, - >("omicron-dev", &mut config, sim::SimMode::Auto, None) - .await; + >(&mut config) + .await + .context("error setting up services")?; println!("omicron-dev: services are running."); // Print out basic information about what was started. diff --git a/dev-tools/omicron-dev/tests/test_omicron_dev.rs b/dev-tools/omicron-dev/tests/test_omicron_dev.rs index f855d8935d..f1e8177243 100644 --- a/dev-tools/omicron-dev/tests/test_omicron_dev.rs +++ b/dev-tools/omicron-dev/tests/test_omicron_dev.rs @@ -13,6 +13,7 @@ use omicron_test_utils::dev::test_cmds::path_to_executable; use omicron_test_utils::dev::test_cmds::run_command; use omicron_test_utils::dev::test_cmds::EXIT_SUCCESS; use omicron_test_utils::dev::test_cmds::EXIT_USAGE; +use omicron_test_utils::dev::CRDB_SEED_TAR_ENV; use oxide_client::ClientHiddenExt; use std::io::BufRead; use std::path::Path; @@ -389,6 +390,16 @@ async fn test_db_run() { // This mirrors the `test_db_run()` test. #[tokio::test] async fn test_run_all() { + // Ensure that the CRDB_SEED_TAR environment variable is not set. We want to + // simulate a user running omicron-dev without the test environment. + // Check if CRDB_SEED_TAR_ENV is set and panic if it is + if let Ok(val) = std::env::var(CRDB_SEED_TAR_ENV) { + panic!( + "CRDB_SEED_TAR_ENV should not be set here, but is set to {}", + val + ); + } + let cmd_path = path_to_omicron_dev(); let cmdstr = format!( diff --git a/nexus/test-utils/Cargo.toml b/nexus/test-utils/Cargo.toml index 8eb8df4a5b..8cd25582be 100644 --- a/nexus/test-utils/Cargo.toml +++ b/nexus/test-utils/Cargo.toml @@ -39,3 +39,6 @@ trust-dns-proto.workspace = true trust-dns-resolver.workspace = true uuid.workspace = true omicron-workspace-hack.workspace = true + +[features] +omicron-dev = ["omicron-test-utils/seed-gen"] diff --git a/nexus/test-utils/src/db.rs b/nexus/test-utils/src/db.rs index 37d7128c49..ff23f35df0 100644 --- a/nexus/test-utils/src/db.rs +++ b/nexus/test-utils/src/db.rs @@ -8,7 +8,7 @@ use camino::Utf8PathBuf; use omicron_test_utils::dev; use slog::Logger; -/// Path to the "seed" CockroachDB directory. +/// Path to the "seed" CockroachDB tarball. /// /// Populating CockroachDB unfortunately isn't free - creation of /// tables, indices, and users takes several seconds to complete. @@ -16,20 +16,39 @@ use slog::Logger; /// By creating a "seed" version of the database, we can cut down /// on the time spent performing this operation. Instead, we opt /// to copy the database from this seed location. -fn seed_dir() -> Utf8PathBuf { +fn seed_tar() -> Utf8PathBuf { // The setup script should set this environment variable. - let seed_dir = std::env::var("CRDB_SEED_DIR") - .expect("CRDB_SEED_DIR missing -- are you running this test with `cargo nextest run`?"); + let seed_dir = std::env::var(dev::CRDB_SEED_TAR_ENV).unwrap_or_else(|_| { + panic!( + "{} missing -- are you running this test \ + with `cargo nextest run`?", + dev::CRDB_SEED_TAR_ENV, + ) + }); seed_dir.into() } -/// Wrapper around [`dev::test_setup_database`] which uses a a -/// seed directory provided at build-time. +/// Wrapper around [`dev::test_setup_database`] which uses a seed tarball +/// provided from the environment. pub async fn test_setup_database(log: &Logger) -> dev::db::CockroachInstance { - let dir = seed_dir(); + let input_tar = seed_tar(); dev::test_setup_database( log, - dev::StorageSource::CopyFromSeed { input_dir: dir }, + dev::StorageSource::CopyFromSeed { input_tar }, + ) + .await +} + +/// Wrapper around [`dev::test_setup_database`] which uses a seed tarball +/// provided as an argument. +#[cfg(feature = "omicron-dev")] +pub async fn test_setup_database_from_seed( + log: &Logger, + input_tar: Utf8PathBuf, +) -> dev::db::CockroachInstance { + dev::test_setup_database( + log, + dev::StorageSource::CopyFromSeed { input_tar }, ) .await } diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index d219da7e96..34c218b3e2 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -5,6 +5,7 @@ //! Integration testing facilities for Nexus use anyhow::Context; +use anyhow::Result; use camino::Utf8Path; use dns_service_client::types::DnsConfigParams; use dropshot::test_util::ClientTestContext; @@ -284,14 +285,30 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { } pub async fn start_crdb(&mut self, populate: bool) { + let populate = if populate { + PopulateCrdb::FromEnvironmentSeed + } else { + PopulateCrdb::Empty + }; + self.start_crdb_impl(populate).await; + } + + /// Private implementation of `start_crdb` that allows for a seed tarball to + /// be passed in. See [`PopulateCrdb`] for more details. + async fn start_crdb_impl(&mut self, populate: PopulateCrdb) { let log = &self.logctx.log; debug!(log, "Starting CRDB"); // Start up CockroachDB. - let database = if populate { - db::test_setup_database(log).await - } else { - db::test_setup_database_empty(log).await + let database = match populate { + PopulateCrdb::FromEnvironmentSeed => { + db::test_setup_database(log).await + } + #[cfg(feature = "omicron-dev")] + PopulateCrdb::FromSeed { input_tar } => { + db::test_setup_database_from_seed(log, input_tar).await + } + PopulateCrdb::Empty => db::test_setup_database_empty(log).await, }; eprintln!("DB URL: {}", database.pg_config()); @@ -759,17 +776,89 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { } } +/// How to populate CockroachDB. +/// +/// This is private because we want to ensure that tests use the setup script +/// rather than trying to create their own seed tarballs. This may need to be +/// revisited if circumstances change. +#[derive(Clone, Debug)] +enum PopulateCrdb { + /// Populate Cockroach from the `CRDB_SEED_TAR_ENV` environment variable. + /// + /// Any tests that depend on nexus-test-utils should have this environment + /// variable available. + FromEnvironmentSeed, + + /// Populate Cockroach from the seed located at this path. + #[cfg(feature = "omicron-dev")] + FromSeed { input_tar: camino::Utf8PathBuf }, + + /// Do not populate Cockroach. + Empty, +} + +/// Setup routine to use for `omicron-dev`. Use [`test_setup_with_config`] for +/// tests. +/// +/// The main difference from tests is that this routine ensures the seed tarball +/// exists (or creates a seed tarball if it doesn't exist). For tests, this +/// should be done in the `crdb-seed` setup script. +#[cfg(feature = "omicron-dev")] +pub async fn omicron_dev_setup_with_config( + config: &mut omicron_common::nexus_config::Config, +) -> Result> { + let builder = + ControlPlaneTestContextBuilder::::new("omicron-dev", config); + + let log = &builder.logctx.log; + debug!(log, "Ensuring seed tarball exists"); + + // Start up a ControlPlaneTestContext, which tautologically sets up + // everything needed for a simulated control plane. + let why_invalidate = + omicron_test_utils::dev::seed::should_invalidate_seed(); + let (seed_tar, status) = + omicron_test_utils::dev::seed::ensure_seed_tarball_exists( + log, + why_invalidate, + ) + .await + .context("error ensuring seed tarball exists")?; + status.log(log, &seed_tar); + + Ok(setup_with_config_impl( + builder, + PopulateCrdb::FromSeed { input_tar: seed_tar }, + sim::SimMode::Auto, + None, + ) + .await) +} + +/// Setup routine to use for tests. pub async fn test_setup_with_config( test_name: &str, config: &mut omicron_common::nexus_config::Config, sim_mode: sim::SimMode, initial_cert: Option, ) -> ControlPlaneTestContext { - let mut builder = - ControlPlaneTestContextBuilder::::new(test_name, config); + let builder = ControlPlaneTestContextBuilder::::new(test_name, config); + setup_with_config_impl( + builder, + PopulateCrdb::FromEnvironmentSeed, + sim_mode, + initial_cert, + ) + .await +} - let populate = true; - builder.start_crdb(populate).await; +async fn setup_with_config_impl( + mut builder: ControlPlaneTestContextBuilder<'_, N>, + populate: PopulateCrdb, + sim_mode: sim::SimMode, + initial_cert: Option, +) -> ControlPlaneTestContext { + builder.start_crdb_impl(populate).await; builder.start_clickhouse().await; builder.start_dendrite(SwitchLocation::Switch0).await; builder.start_dendrite(SwitchLocation::Switch1).await; diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index 9e21f3ca12..7b1f70c79e 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -6,20 +6,26 @@ license = "MPL-2.0" [dependencies] anyhow.workspace = true +atomicwrites.workspace = true camino.workspace = true +camino-tempfile.workspace = true dropshot.workspace = true +filetime = { workspace = true, optional = true } futures.workspace = true headers.workspace = true +hex.workspace = true http.workspace = true libc.workspace = true omicron-common.workspace = true pem.workspace = true +ring.workspace = true rustls.workspace = true slog.workspace = true subprocess.workspace = true tempfile.workspace = true thiserror.workspace = true -tokio = { workspace = true, features = [ "full" ] } +tar.workspace = true +tokio = { workspace = true, features = ["full"] } tokio-postgres.workspace = true usdt.workspace = true rcgen.workspace = true @@ -29,3 +35,6 @@ omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true + +[features] +seed-gen = ["dep:filetime"] diff --git a/test-utils/src/dev/mod.rs b/test-utils/src/dev/mod.rs index ea95a1de76..dbd66fe1f8 100644 --- a/test-utils/src/dev/mod.rs +++ b/test-utils/src/dev/mod.rs @@ -9,55 +9,21 @@ pub mod clickhouse; pub mod db; pub mod dendrite; pub mod poll; +#[cfg(feature = "seed-gen")] +pub mod seed; pub mod test_cmds; -use anyhow::Context; -use camino::Utf8Path; +use anyhow::{Context, Result}; use camino::Utf8PathBuf; pub use dropshot::test_util::LogContext; use dropshot::ConfigLogging; use dropshot::ConfigLoggingIfExists; use dropshot::ConfigLoggingLevel; use slog::Logger; -use std::path::Path; +use std::io::BufReader; -// Helper for copying all the files in one directory to another. -fn copy_dir( - src: impl AsRef, - dst: impl AsRef, -) -> Result<(), anyhow::Error> { - let src = src.as_ref(); - let dst = dst.as_ref(); - std::fs::create_dir_all(&dst) - .with_context(|| format!("Failed to create dst {}", dst.display()))?; - for entry in std::fs::read_dir(src) - .with_context(|| format!("Failed to read_dir {}", src.display()))? - { - let entry = entry.with_context(|| { - format!("Failed to read entry in {}", src.display()) - })?; - let ty = entry.file_type().context("Failed to access file type")?; - let target = dst.join(entry.file_name()); - if ty.is_dir() { - copy_dir(entry.path(), &target).with_context(|| { - format!( - "Failed to copy subdirectory {} to {}", - entry.path().display(), - target.display() - ) - })?; - } else { - std::fs::copy(entry.path(), &target).with_context(|| { - format!( - "Failed to copy file at {} to {}", - entry.path().display(), - target.display() - ) - })?; - } - } - Ok(()) -} +/// The environment variable via which the path to the seed tarball is passed. +pub static CRDB_SEED_TAR_ENV: &str = "CRDB_SEED_TAR"; /// Set up a [`dropshot::test_util::LogContext`] appropriate for a test named /// `test_name` @@ -80,36 +46,9 @@ pub enum StorageSource { DoNotPopulate, /// Populate the latest version of the database. PopulateLatest { output_dir: Utf8PathBuf }, - /// Copy the database from a seed directory, which has previously + /// Copy the database from a seed tarball, which has previously /// been created with `PopulateLatest`. - CopyFromSeed { input_dir: Utf8PathBuf }, -} - -/// Creates a [`db::CockroachInstance`] with a populated storage directory. -/// -/// This is intended to optimize subsequent calls to [`test_setup_database`] -/// by reducing the latency of populating the storage directory. -pub async fn test_setup_database_seed(log: &Logger, dir: &Utf8Path) { - let _ = std::fs::remove_dir_all(dir); - std::fs::create_dir_all(dir).unwrap(); - let mut db = setup_database( - log, - StorageSource::PopulateLatest { output_dir: dir.to_owned() }, - ) - .await; - db.cleanup().await.unwrap(); - - // See https://github.com/cockroachdb/cockroach/issues/74231 for context on - // this. We use this assertion to check that our seed directory won't point - // back to itself, even if it is copied elsewhere. - assert_eq!( - 0, - dir.join("temp-dirs-record.txt") - .metadata() - .expect("Cannot access metadata") - .len(), - "Temporary directory record should be empty after graceful shutdown", - ); + CopyFromSeed { input_tar: Utf8PathBuf }, } /// Set up a [`db::CockroachInstance`] for running tests. @@ -118,13 +57,15 @@ pub async fn test_setup_database( source: StorageSource, ) -> db::CockroachInstance { usdt::register_probes().expect("Failed to register USDT DTrace probes"); - setup_database(log, source).await + setup_database(log, source).await.unwrap() } +// TODO: switch to anyhow entirely -- this function is currently a mishmash of +// anyhow and unwrap/expect calls. async fn setup_database( log: &Logger, storage_source: StorageSource, -) -> db::CockroachInstance { +) -> Result { let builder = db::CockroachStarterBuilder::new(); let mut builder = match &storage_source { StorageSource::DoNotPopulate | StorageSource::CopyFromSeed { .. } => { @@ -135,7 +76,7 @@ async fn setup_database( } }; builder.redirect_stdio_to_files(); - let starter = builder.build().unwrap(); + let starter = builder.build().context("error building CockroachStarter")?; info!( &log, "cockroach temporary directory: {}", @@ -147,13 +88,22 @@ async fn setup_database( match &storage_source { StorageSource::DoNotPopulate | StorageSource::PopulateLatest { .. } => { } - StorageSource::CopyFromSeed { input_dir } => { + StorageSource::CopyFromSeed { input_tar } => { info!(&log, - "cockroach: copying from seed directory ({}) to storage directory ({})", - input_dir, starter.store_dir().to_string_lossy(), + "cockroach: copying from seed tarball ({}) to storage directory ({})", + input_tar, starter.store_dir().to_string_lossy(), ); - copy_dir(input_dir, starter.store_dir()) - .expect("Cannot copy storage from seed directory"); + let reader = std::fs::File::open(input_tar).with_context(|| { + format!("cannot open input tar {}", input_tar) + })?; + let mut tar = tar::Archive::new(BufReader::new(reader)); + tar.unpack(starter.store_dir()).with_context(|| { + format!( + "cannot unpack input tar {} into {}", + input_tar, + starter.store_dir().display() + ) + })?; } } @@ -184,7 +134,8 @@ async fn setup_database( info!(&log, "cockroach: populated"); } } - database + + Ok(database) } /// Returns whether the given process is currently running diff --git a/test-utils/src/dev/seed.rs b/test-utils/src/dev/seed.rs new file mode 100644 index 0000000000..841ecd5f35 --- /dev/null +++ b/test-utils/src/dev/seed.rs @@ -0,0 +1,239 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::io::{BufWriter, Write}; + +use anyhow::{ensure, Context, Result}; +use camino::{Utf8Path, Utf8PathBuf}; +use filetime::FileTime; +use slog::Logger; + +use super::CRDB_SEED_TAR_ENV; + +/// Creates a string identifier for the current DB schema and version. +// +/// The goal here is to allow to create different "seed" tarballs +/// for each revision of the DB. +pub fn digest_unique_to_schema() -> String { + let schema = include_str!("../../../schema/crdb/dbinit.sql"); + let crdb_version = include_str!("../../../tools/cockroachdb_version"); + let mut ctx = ring::digest::Context::new(&ring::digest::SHA256); + ctx.update(&schema.as_bytes()); + ctx.update(&crdb_version.as_bytes()); + let digest = ctx.finish(); + hex::encode(digest.as_ref()) +} + +/// Looks up the standard environment variable `CRDB_SEED_INVALIDATE` to check +/// if a seed should be invalidated. Returns a string to pass in as the +/// `why_invalidate` argument of [`ensure_seed_tarball_exists`]. +pub fn should_invalidate_seed() -> Option<&'static str> { + (std::env::var("CRDB_SEED_INVALIDATE").as_deref() == Ok("1")) + .then_some("CRDB_SEED_INVALIDATE=1 set in environment") +} + +/// The return value of [`ensure_seed_tarball_exists`]. +#[derive(Clone, Copy, Debug)] +pub enum SeedTarballStatus { + Created, + Invalidated, + Existing, +} + +impl SeedTarballStatus { + pub fn log(self, log: &Logger, seed_tar: &Utf8Path) { + match self { + SeedTarballStatus::Created => { + info!(log, "Created CRDB seed tarball: `{seed_tar}`"); + } + SeedTarballStatus::Invalidated => { + info!( + log, + "Invalidated and created new CRDB seed tarball: `{seed_tar}`", + ); + } + SeedTarballStatus::Existing => { + info!(log, "Using existing CRDB seed tarball: `{seed_tar}`"); + } + } + } +} + +/// Ensures that a seed tarball corresponding to the schema returned by +/// [`digest_unique_to_schema`] exists, recreating it if necessary. +/// +/// This used to create a directory rather than a tarball, but that was changed +/// due to [Omicron issue +/// #4193](https://github.com/oxidecomputer/omicron/issues/4193). +/// +/// If `why_invalidate` is `Some`, then if the seed tarball exists, it will be +/// deleted before being recreated. +/// +/// # Notes +/// +/// This method should _not_ be used by tests. Instead, rely on the `crdb-seed` +/// setup script. +pub async fn ensure_seed_tarball_exists( + log: &Logger, + why_invalidate: Option<&str>, +) -> Result<(Utf8PathBuf, SeedTarballStatus)> { + // If the CRDB_SEED_TAR_ENV variable is set, return an error. + // + // Even though this module is gated behind a feature flag, omicron-dev needs + // this function -- and so, if you're doing a top-level `cargo nextest run` + // like CI does, feature unification would mean this gets included in test + // binaries anyway. So this acts as a belt-and-suspenders check. + if let Ok(val) = std::env::var(CRDB_SEED_TAR_ENV) { + anyhow::bail!( + "{CRDB_SEED_TAR_ENV} is set to `{val}` -- implying that a test called \ + ensure_seed_tarball_exists. Instead, tests should rely on the `crdb-seed` \ + setup script." + ); + } + + // XXX: we aren't considering cross-user permissions for this file. Might be + // worth setting more restrictive permissions on it, or using a per-user + // cache dir. + let base_seed_dir = Utf8PathBuf::from_path_buf(std::env::temp_dir()) + .expect("Not a UTF-8 path") + .join("crdb-base"); + std::fs::create_dir_all(&base_seed_dir).unwrap(); + let mut desired_seed_tar = base_seed_dir.join(digest_unique_to_schema()); + desired_seed_tar.set_extension("tar"); + + let invalidated = match (desired_seed_tar.exists(), why_invalidate) { + (true, Some(why)) => { + slog::info!( + log, + "{why}: invalidating seed tarball: `{desired_seed_tar}`", + ); + std::fs::remove_file(&desired_seed_tar) + .context("failed to remove seed tarball")?; + true + } + (true, None) => { + // The tarball exists. Update its atime and mtime (i.e. `touch` it) + // to ensure that it doesn't get deleted by a /tmp cleaner. + let now = FileTime::now(); + filetime::set_file_times(&desired_seed_tar, now, now) + .context("failed to update seed tarball atime and mtime")?; + return Ok((desired_seed_tar, SeedTarballStatus::Existing)); + } + (false, Some(why)) => { + slog::info!( + log, + "{why}, but seed tarball does not exist: `{desired_seed_tar}`", + ); + false + } + (false, None) => { + // The tarball doesn't exist. + false + } + }; + + // At this point the tarball does not exist (either because it didn't exist + // in the first place or because it was deleted above), so try to create it. + // + // Nextest will execute this function just once via the `crdb-seed` binary, + // but it is possible for a user to start up multiple nextest processes to + // be running at the same time. So we should consider it possible for + // another caller to create this seed tarball before we finish setting it up + // ourselves. + test_setup_database_seed(log, &desired_seed_tar) + .await + .context("failed to setup seed tarball")?; + + let status = if invalidated { + SeedTarballStatus::Invalidated + } else { + SeedTarballStatus::Created + }; + Ok((desired_seed_tar, status)) +} + +/// Creates a seed file for a Cockroach database at the output tarball. +/// +/// This is intended to optimize subsequent calls to +/// [`test_setup_database`](super::test_setup_database) by reducing the latency +/// of populating the storage directory. +pub async fn test_setup_database_seed( + log: &Logger, + output_tar: &Utf8Path, +) -> Result<()> { + let base_seed_dir = output_tar.parent().unwrap(); + let tmp_seed_dir = camino_tempfile::Utf8TempDir::new_in(base_seed_dir) + .context("failed to create temporary seed directory")?; + + let mut db = super::setup_database( + log, + super::StorageSource::PopulateLatest { + output_dir: tmp_seed_dir.path().to_owned(), + }, + ) + .await + .context("failed to setup database")?; + db.cleanup().await.context("failed to cleanup database")?; + + // See https://github.com/cockroachdb/cockroach/issues/74231 for context on + // this. We use this assertion to check that our seed directory won't point + // back to itself, even if it is copied elsewhere. + let dirs_record_path = tmp_seed_dir.path().join("temp-dirs-record.txt"); + let dirs_record_len = dirs_record_path + .metadata() + .with_context(|| { + format!("cannot access metadata for {dirs_record_path}") + })? + .len(); + ensure!( + dirs_record_len == 0, + "Temporary directory record should be empty (was {dirs_record_len}) \ + after graceful shutdown", + ); + + let output_tar = output_tar.to_owned(); + + tokio::task::spawn_blocking(move || { + // Tar up the directory -- this prevents issues where some but not all of + // the files get cleaned up by /tmp cleaners. See + // https://github.com/oxidecomputer/omicron/issues/4193. + let atomic_file = atomicwrites::AtomicFile::new( + &output_tar, + // We don't expect this to exist, but if it does, we want to overwrite + // it. That is because there's a remote possibility that multiple + // instances of test_setup_database_seed are running simultaneously. + atomicwrites::OverwriteBehavior::AllowOverwrite, + ); + let res = atomic_file.write(|f| { + // Tar up the directory here. + let writer = BufWriter::new(f); + let mut tar = tar::Builder::new(writer); + tar.follow_symlinks(false); + tar.append_dir_all(".", tmp_seed_dir.path()).with_context( + || { + format!( + "failed to append directory `{}` to tarball", + tmp_seed_dir.path(), + ) + }, + )?; + + let mut writer = + tar.into_inner().context("failed to finish writing tarball")?; + writer.flush().context("failed to flush tarball")?; + + Ok::<_, anyhow::Error>(()) + }); + match res { + Ok(()) => Ok(()), + Err(atomicwrites::Error::Internal(error)) => Err(error) + .with_context(|| { + format!("failed to write seed tarball: `{}`", output_tar) + }), + Err(atomicwrites::Error::User(error)) => Err(error), + } + }) + .await + .context("error in task to tar up contents")? +} diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 8854ef27bc..106da92f62 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -215,49 +215,56 @@ bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-f hyper-rustls = { version = "0.24.1" } mio = { version = "0.8.8", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix-d585fab2519d2d1 = { package = "rustix", version = "0.38.9", features = ["fs", "termios"] } +rustix-d736d0ac4424f0f1 = { package = "rustix", version = "0.37.23", features = ["fs", "termios"] } [target.x86_64-unknown-linux-gnu.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.1" } mio = { version = "0.8.8", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix-d585fab2519d2d1 = { package = "rustix", version = "0.38.9", features = ["fs", "termios"] } +rustix-d736d0ac4424f0f1 = { package = "rustix", version = "0.37.23", features = ["fs", "termios"] } [target.x86_64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.1" } mio = { version = "0.8.8", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix-d585fab2519d2d1 = { package = "rustix", version = "0.38.9", features = ["fs", "termios"] } +rustix-d736d0ac4424f0f1 = { package = "rustix", version = "0.37.23", features = ["fs", "termios"] } [target.x86_64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.1" } mio = { version = "0.8.8", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix-d585fab2519d2d1 = { package = "rustix", version = "0.38.9", features = ["fs", "termios"] } +rustix-d736d0ac4424f0f1 = { package = "rustix", version = "0.37.23", features = ["fs", "termios"] } [target.aarch64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.1" } mio = { version = "0.8.8", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix-d585fab2519d2d1 = { package = "rustix", version = "0.38.9", features = ["fs", "termios"] } +rustix-d736d0ac4424f0f1 = { package = "rustix", version = "0.37.23", features = ["fs", "termios"] } [target.aarch64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.1" } mio = { version = "0.8.8", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix-d585fab2519d2d1 = { package = "rustix", version = "0.38.9", features = ["fs", "termios"] } +rustix-d736d0ac4424f0f1 = { package = "rustix", version = "0.37.23", features = ["fs", "termios"] } [target.x86_64-unknown-illumos.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.1" } mio = { version = "0.8.8", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix-d585fab2519d2d1 = { package = "rustix", version = "0.38.9", features = ["fs", "termios"] } +rustix-d736d0ac4424f0f1 = { package = "rustix", version = "0.37.23", features = ["fs", "termios"] } toml_datetime = { version = "0.6.3", default-features = false, features = ["serde"] } toml_edit = { version = "0.19.15", features = ["serde"] } @@ -266,7 +273,8 @@ bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-f hyper-rustls = { version = "0.24.1" } mio = { version = "0.8.8", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix-d585fab2519d2d1 = { package = "rustix", version = "0.38.9", features = ["fs", "termios"] } +rustix-d736d0ac4424f0f1 = { package = "rustix", version = "0.37.23", features = ["fs", "termios"] } toml_datetime = { version = "0.6.3", default-features = false, features = ["serde"] } toml_edit = { version = "0.19.15", features = ["serde"] } From d624bce9af25e5bc1141de4dcdb50a15223f106d Mon Sep 17 00:00:00 2001 From: Andy Fiddaman Date: Mon, 9 Oct 2023 20:02:08 +0100 Subject: [PATCH 31/35] Back /var/fm/fmd with a dataset from the boot M.2 (#4212) `/var/fm/fmd` is where the illumos fault management system records data. We want to preserve this data across system reboots and in real time rather than via periodic data copying, so that the information is available should the system panic shortly thereafter. Fixes: https://github.com/oxidecomputer/omicron/issues/4211 --- illumos-utils/src/zfs.rs | 41 ++++-- sled-agent/src/backing_fs.rs | 178 +++++++++++++++++++++++++ sled-agent/src/bootstrap/pre_server.rs | 1 + sled-agent/src/lib.rs | 1 + sled-agent/src/sled_agent.rs | 22 ++- sled-agent/src/storage_manager.rs | 1 + sled-agent/src/swap_device.rs | 3 - sled-hardware/src/disk.rs | 15 ++- 8 files changed, 242 insertions(+), 20 deletions(-) create mode 100644 sled-agent/src/backing_fs.rs diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index ba8cd8c84a..9118a9a3cd 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -61,6 +61,9 @@ enum EnsureFilesystemErrorRaw { #[error("Failed to mount encrypted filesystem: {0}")] MountEncryptedFsFailed(crate::ExecutionError), + + #[error("Failed to mount overlay filesystem: {0}")] + MountOverlayFsFailed(crate::ExecutionError), } /// Error returned by [`Zfs::ensure_filesystem`]. @@ -202,6 +205,7 @@ impl Zfs { /// Creates a new ZFS filesystem named `name`, unless one already exists. /// /// Applies an optional quota, provided _in bytes_. + #[allow(clippy::too_many_arguments)] pub fn ensure_filesystem( name: &str, mountpoint: Mountpoint, @@ -209,6 +213,7 @@ impl Zfs { do_format: bool, encryption_details: Option, size_details: Option, + additional_options: Option>, ) -> Result<(), EnsureFilesystemError> { let (exists, mounted) = Self::dataset_exists(name, &mountpoint)?; if exists { @@ -261,7 +266,14 @@ impl Zfs { ]); } + if let Some(opts) = additional_options { + for o in &opts { + cmd.args(&["-o", &o]); + } + } + cmd.args(&["-o", &format!("mountpoint={}", mountpoint), name]); + execute(cmd).map_err(|err| EnsureFilesystemError { name: name.to_string(), mountpoint: mountpoint.clone(), @@ -322,6 +334,20 @@ impl Zfs { Ok(()) } + pub fn mount_overlay_dataset( + name: &str, + mountpoint: &Mountpoint, + ) -> Result<(), EnsureFilesystemError> { + let mut command = std::process::Command::new(PFEXEC); + let cmd = command.args(&[ZFS, "mount", "-O", name]); + execute(cmd).map_err(|err| EnsureFilesystemError { + name: name.to_string(), + mountpoint: mountpoint.clone(), + err: EnsureFilesystemErrorRaw::MountOverlayFsFailed(err), + })?; + Ok(()) + } + // Return (true, mounted) if the dataset exists, (false, false) otherwise, // where mounted is if the dataset is mounted. fn dataset_exists( @@ -385,7 +411,7 @@ impl Zfs { Zfs::get_value(filesystem_name, &format!("oxide:{}", name)) } - fn get_value( + pub fn get_value( filesystem_name: &str, name: &str, ) -> Result { @@ -422,13 +448,12 @@ pub fn get_all_omicron_datasets_for_delete() -> anyhow::Result> { let internal = pool.kind() == crate::zpool::ZpoolKind::Internal; let pool = pool.to_string(); for dataset in &Zfs::list_datasets(&pool)? { - // Avoid erasing crashdump datasets on internal pools - if dataset == "crash" && internal { - continue; - } - - // The swap device might be in use, so don't assert that it can be deleted. - if dataset == "swap" && internal { + // Avoid erasing crashdump, backing data and swap datasets on + // internal pools. The swap device may be in use. + if internal + && (["crash", "backing", "swap"].contains(&dataset.as_str()) + || dataset.starts_with("backing/")) + { continue; } diff --git a/sled-agent/src/backing_fs.rs b/sled-agent/src/backing_fs.rs new file mode 100644 index 0000000000..5014ac5999 --- /dev/null +++ b/sled-agent/src/backing_fs.rs @@ -0,0 +1,178 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Operations for dealing with persistent backing mounts for OS data + +// On Oxide hardware, the root filesystem is backed by a ramdisk and +// non-persistent. However, there are several things within the root filesystem +// which are useful to preserve across reboots, and these are backed persistent +// datasets on the boot disk. +// +// Each boot disk contains a dataset sled_hardware::disk::M2_BACKING_DATASET +// and for each backing mount, a child dataset is created under there that +// is configured with the desired mountpoint in the root filesystem. Since +// there are multiple disks which can be used to boot, these datasets are also +// marked with the "canmount=noauto" attribute so that they do not all try to +// mount automatically and race -- only one could ever succeed. This allows us +// to come along later and specifically mount the one that we want (the one from +// the current boot disk) and also perform an overlay mount so that it succeeds +// even if there is content from the ramdisk image or early boot services +// present underneath. The overlay mount action is optionally bracketed with a +// service stop/start. + +use camino::Utf8PathBuf; +use illumos_utils::zfs::{ + EnsureFilesystemError, GetValueError, Mountpoint, SizeDetails, Zfs, +}; + +#[derive(Debug, thiserror::Error)] +pub enum BackingFsError { + #[error("Error administering service: {0}")] + Adm(#[from] smf::AdmError), + + #[error("Error retrieving dataset property: {0}")] + DatasetProperty(#[from] GetValueError), + + #[error("Error initializing dataset: {0}")] + Mount(#[from] EnsureFilesystemError), +} + +struct BackingFs { + // Dataset name + name: &'static str, + // Mountpoint + mountpoint: &'static str, + // Optional quota, in _bytes_ + quota: Option, + // Optional compression mode + compression: Option<&'static str>, + // Linked service + service: Option<&'static str>, +} + +impl BackingFs { + const fn new(name: &'static str) -> Self { + Self { + name, + mountpoint: "legacy", + quota: None, + compression: None, + service: None, + } + } + + const fn mountpoint(mut self, mountpoint: &'static str) -> Self { + self.mountpoint = mountpoint; + self + } + + const fn quota(mut self, quota: usize) -> Self { + self.quota = Some(quota); + self + } + + const fn compression(mut self, compression: &'static str) -> Self { + self.compression = Some(compression); + self + } + + const fn service(mut self, service: &'static str) -> Self { + self.service = Some(service); + self + } +} + +const BACKING_FMD_DATASET: &'static str = "fmd"; +const BACKING_FMD_MOUNTPOINT: &'static str = "/var/fm/fmd"; +const BACKING_FMD_SERVICE: &'static str = "svc:/system/fmd:default"; +const BACKING_FMD_QUOTA: usize = 500 * (1 << 20); // 500 MiB + +const BACKING_COMPRESSION: &'static str = "on"; + +const BACKINGFS_COUNT: usize = 1; +static BACKINGFS: [BackingFs; BACKINGFS_COUNT] = + [BackingFs::new(BACKING_FMD_DATASET) + .mountpoint(BACKING_FMD_MOUNTPOINT) + .quota(BACKING_FMD_QUOTA) + .compression(BACKING_COMPRESSION) + .service(BACKING_FMD_SERVICE)]; + +/// Ensure that the backing filesystems are mounted. +/// If the underlying dataset for a backing fs does not exist on the specified +/// boot disk then it will be created. +pub(crate) fn ensure_backing_fs( + log: &slog::Logger, + boot_zpool_name: &illumos_utils::zpool::ZpoolName, +) -> Result<(), BackingFsError> { + let log = log.new(o!( + "component" => "BackingFs", + )); + for bfs in BACKINGFS.iter() { + info!(log, "Processing {}", bfs.name); + + let dataset = format!( + "{}/{}/{}", + boot_zpool_name, + sled_hardware::disk::M2_BACKING_DATASET, + bfs.name + ); + let mountpoint = Mountpoint::Path(Utf8PathBuf::from(bfs.mountpoint)); + + info!(log, "Ensuring dataset {}", dataset); + + let size_details = Some(SizeDetails { + quota: bfs.quota, + compression: bfs.compression, + }); + + Zfs::ensure_filesystem( + &dataset, + mountpoint.clone(), + false, // zoned + true, // do_format + None, // encryption_details, + size_details, + Some(vec!["canmount=noauto".to_string()]), // options + )?; + + // Check if a ZFS filesystem is already mounted on bfs.mountpoint by + // retrieving the ZFS `mountpoint` property and comparing it. This + // might seem counter-intuitive but if there is a filesystem mounted + // there, its mountpoint will match, and if not then we will retrieve + // the mountpoint of a higher level filesystem, such as '/'. If we + // can't retrieve the property at all, then there is definitely no ZFS + // filesystem mounted there - most likely we are running with a non-ZFS + // root, such as when net booted during CI. + if Zfs::get_value(&bfs.mountpoint, "mountpoint") + .unwrap_or("not-zfs".to_string()) + == bfs.mountpoint + { + info!(log, "{} is already mounted", bfs.mountpoint); + return Ok(()); + } + + if let Some(service) = bfs.service { + info!(log, "Stopping service {}", service); + smf::Adm::new() + .disable() + .temporary() + .synchronous() + .run(smf::AdmSelection::ByPattern(&[service]))?; + } + + info!(log, "Mounting {} on {}", dataset, mountpoint); + + Zfs::mount_overlay_dataset(&dataset, &mountpoint)?; + + if let Some(service) = bfs.service { + info!(log, "Starting service {}", service); + smf::Adm::new() + .enable() + .synchronous() + .run(smf::AdmSelection::ByPattern(&[service]))?; + } + } + + Ok(()) +} diff --git a/sled-agent/src/bootstrap/pre_server.rs b/sled-agent/src/bootstrap/pre_server.rs index 0899bdd82f..71325fef3d 100644 --- a/sled-agent/src/bootstrap/pre_server.rs +++ b/sled-agent/src/bootstrap/pre_server.rs @@ -381,6 +381,7 @@ fn ensure_zfs_ramdisk_dataset() -> Result<(), StartError> { do_format, encryption_details, quota, + None, ) .map_err(StartError::EnsureZfsRamdiskDataset) } diff --git a/sled-agent/src/lib.rs b/sled-agent/src/lib.rs index 5c4dbd8310..4e7921c605 100644 --- a/sled-agent/src/lib.rs +++ b/sled-agent/src/lib.rs @@ -17,6 +17,7 @@ pub mod sim; pub mod common; // Modules for the non-simulated sled agent. +mod backing_fs; pub mod bootstrap; pub mod config; mod http_entrypoints; diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index 7e62f6a8a7..5574edca55 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -59,9 +59,15 @@ use illumos_utils::{dladm::MockDladm as Dladm, zone::MockZones as Zones}; #[derive(thiserror::Error, Debug)] pub enum Error { + #[error("Could not find boot disk")] + BootDiskNotFound, + #[error("Configuration error: {0}")] Config(#[from] crate::config::ConfigError), + #[error("Error setting up backing filesystems: {0}")] + BackingFs(#[from] crate::backing_fs::BackingFsError), + #[error("Error setting up swap device: {0}")] SwapDevice(#[from] crate::swap_device::SwapDeviceError), @@ -268,14 +274,17 @@ impl SledAgent { )); info!(&log, "SledAgent::new(..) starting"); - // Configure a swap device of the configured size before other system setup. + let boot_disk = storage + .resources() + .boot_disk() + .await + .ok_or_else(|| Error::BootDiskNotFound)?; + + // Configure a swap device of the configured size before other system + // setup. match config.swap_device_size_gb { Some(sz) if sz > 0 => { info!(log, "Requested swap device of size {} GiB", sz); - let boot_disk = - storage.resources().boot_disk().await.ok_or_else(|| { - crate::swap_device::SwapDeviceError::BootDiskNotFound - })?; crate::swap_device::ensure_swap_device( &parent_log, &boot_disk.1, @@ -290,6 +299,9 @@ impl SledAgent { } } + info!(log, "Mounting backing filesystems"); + crate::backing_fs::ensure_backing_fs(&parent_log, &boot_disk.1)?; + // Ensure we have a thread that automatically reaps process contracts // when they become empty. See the comments in // illumos-utils/src/running_zone.rs for more detail. diff --git a/sled-agent/src/storage_manager.rs b/sled-agent/src/storage_manager.rs index bd71371396..c31a4dc0bc 100644 --- a/sled-agent/src/storage_manager.rs +++ b/sled-agent/src/storage_manager.rs @@ -417,6 +417,7 @@ impl StorageWorker { do_format, encryption_details, size_details, + None, )?; // Ensure the dataset has a usable UUID. if let Ok(id_str) = Zfs::get_oxide_value(&fs_name, "uuid") { diff --git a/sled-agent/src/swap_device.rs b/sled-agent/src/swap_device.rs index 5a8f40adbd..6a00b42672 100644 --- a/sled-agent/src/swap_device.rs +++ b/sled-agent/src/swap_device.rs @@ -9,9 +9,6 @@ use zeroize::Zeroize; #[derive(Debug, thiserror::Error)] pub enum SwapDeviceError { - #[error("Could not find boot disk")] - BootDiskNotFound, - #[error("Error running ZFS command: {0}")] Zfs(illumos_utils::ExecutionError), diff --git a/sled-hardware/src/disk.rs b/sled-hardware/src/disk.rs index aec99ae3f8..e3078cbeea 100644 --- a/sled-hardware/src/disk.rs +++ b/sled-hardware/src/disk.rs @@ -256,6 +256,7 @@ pub const CRASH_DATASET: &'static str = "crash"; pub const CLUSTER_DATASET: &'static str = "cluster"; pub const CONFIG_DATASET: &'static str = "config"; pub const M2_DEBUG_DATASET: &'static str = "debug"; +pub const M2_BACKING_DATASET: &'static str = "backing"; // TODO-correctness: This value of 100GiB is a pretty wild guess, and should be // tuned as needed. pub const DEBUG_DATASET_QUOTA: usize = 100 * (1 << 30); @@ -282,7 +283,7 @@ static U2_EXPECTED_DATASETS: [ExpectedDataset; U2_EXPECTED_DATASET_COUNT] = [ .compression(DUMP_DATASET_COMPRESSION), ]; -const M2_EXPECTED_DATASET_COUNT: usize = 5; +const M2_EXPECTED_DATASET_COUNT: usize = 6; static M2_EXPECTED_DATASETS: [ExpectedDataset; M2_EXPECTED_DATASET_COUNT] = [ // Stores software images. // @@ -290,7 +291,11 @@ static M2_EXPECTED_DATASETS: [ExpectedDataset; M2_EXPECTED_DATASET_COUNT] = [ ExpectedDataset::new(INSTALL_DATASET), // Stores crash dumps. ExpectedDataset::new(CRASH_DATASET), - // Stores cluter configuration information. + // Backing store for OS data that should be persisted across reboots. + // Its children are selectively overlay mounted onto parts of the ramdisk + // root. + ExpectedDataset::new(M2_BACKING_DATASET), + // Stores cluster configuration information. // // Should be duplicated to both M.2s. ExpectedDataset::new(CLUSTER_DATASET), @@ -524,6 +529,7 @@ impl Disk { do_format, Some(encryption_details), None, + None, ); keyfile.zero_and_unlink().await.map_err(|error| { @@ -562,8 +568,8 @@ impl Disk { "Automatically destroying dataset: {}", name ); Zfs::destroy_dataset(name).or_else(|err| { - // If we can't find the dataset, that's fine -- it might - // not have been formatted yet. + // If we can't find the dataset, that's fine -- it + // might not have been formatted yet. if let DestroyDatasetErrorVariant::NotFound = err.err { @@ -588,6 +594,7 @@ impl Disk { do_format, encryption_details, size_details, + None, )?; if dataset.wipe { From 47a6b42c986c65292ee61b0c79090fa57dec5fe9 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Mon, 9 Oct 2023 16:35:01 -0500 Subject: [PATCH 32/35] [nexus] Add `/v1/ping` endpoint (#3925) Closes #3923 Adds `/v1/ping` that always returns `{ "status": "ok" }` if it returns anything at all. I went with `ping` over the initial `/v1/system/health` because the latter is vague about its meaning, whereas everyone know ping means a trivial request and response. I also thought it was weird to put an endpoint with no auth check under `/v1/system`, where ~all the other endpoints require fleet-level perms. This doesn't add too much over hitting an existing endpoint, but I think it's worth it because * It doesn't hit the DB * It has no auth check * It gives a very simple answer to "what endpoint should I use to ping the API?" (a question we have gotten at least once) * It's easy (I already did it) Questions that occurred to me while working through this: - Should we actually attempt to do something in the handler that would tell us, e.g., whether the DB is up? - No, that would be more than a ping - Raises DoS questions if not auth gated - Could add a db status endpoint or or you could use any endpoint that returns data - What tag should this be under? - Initially added a `system` tag because a) this doesn't fit under existing `system/blah` tags and b) it really does feel miscellaneous - Changed to `system/status`, with the idea that if we add other kinds of checks, they would be new endpoints under this tag. --- nexus/src/external_api/http_entrypoints.rs | 16 ++++++ nexus/src/external_api/tag-config.json | 6 ++ nexus/tests/integration_tests/basic.rs | 13 ++++- nexus/tests/integration_tests/endpoints.rs | 1 - nexus/tests/output/nexus_tags.txt | 4 ++ .../output/uncovered-authz-endpoints.txt | 1 + nexus/types/src/external_api/views.rs | 15 +++++ openapi/nexus.json | 57 +++++++++++++++++++ 8 files changed, 111 insertions(+), 2 deletions(-) diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 6e614d5644..ac5cf76775 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -98,6 +98,8 @@ type NexusApiDescription = ApiDescription>; /// Returns a description of the external nexus API pub(crate) fn external_api() -> NexusApiDescription { fn register_endpoints(api: &mut NexusApiDescription) -> Result<(), String> { + api.register(ping)?; + api.register(system_policy_view)?; api.register(system_policy_update)?; @@ -364,6 +366,20 @@ pub(crate) fn external_api() -> NexusApiDescription { // clients. Client generators use operationId to name API methods, so changing // a function name is a breaking change from a client perspective. +/// Ping API +/// +/// Always responds with Ok if it responds at all. +#[endpoint { + method = GET, + path = "/v1/ping", + tags = ["system/status"], +}] +async fn ping( + _rqctx: RequestContext>, +) -> Result, HttpError> { + Ok(HttpResponseOk(views::Ping { status: views::PingStatus::Ok })) +} + /// Fetch the top-level IAM policy #[endpoint { method = GET, diff --git a/nexus/src/external_api/tag-config.json b/nexus/src/external_api/tag-config.json index e985ea7db4..07eb198016 100644 --- a/nexus/src/external_api/tag-config.json +++ b/nexus/src/external_api/tag-config.json @@ -80,6 +80,12 @@ "url": "http://docs.oxide.computer/api/vpcs" } }, + "system/status": { + "description": "Endpoints related to system health", + "external_docs": { + "url": "http://docs.oxide.computer/api/system-status" + } + }, "system/hardware": { "description": "These operations pertain to hardware inventory and management. Racks are the unit of expansion of an Oxide deployment. Racks are in turn composed of sleds, switches, power supplies, and a cabled backplane.", "external_docs": { diff --git a/nexus/tests/integration_tests/basic.rs b/nexus/tests/integration_tests/basic.rs index ab54c97197..282ec0cd96 100644 --- a/nexus/tests/integration_tests/basic.rs +++ b/nexus/tests/integration_tests/basic.rs @@ -10,7 +10,8 @@ use dropshot::HttpErrorResponseBody; use http::method::Method; use http::StatusCode; -use nexus_types::external_api::{params, views::Project}; +use nexus_types::external_api::params; +use nexus_types::external_api::views::{self, Project}; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::IdentityMetadataUpdateParams; use omicron_common::api::external::Name; @@ -546,3 +547,13 @@ async fn test_projects_list(cptestctx: &ControlPlaneTestContext) { .collect::>() ); } + +#[nexus_test] +async fn test_ping(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + + let health = NexusRequest::object_get(client, "/v1/ping") + .execute_and_parse_unwrap::() + .await; + assert_eq!(health.status, views::PingStatus::Ok); +} diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index e04d26cc45..e9ae11c21f 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -1876,6 +1876,5 @@ lazy_static! { AllowedMethod::GetNonexistent ], }, - ]; } diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index ca2f737cb0..1d7f5556c2 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -172,6 +172,10 @@ silo_view GET /v1/system/silos/{silo} user_builtin_list GET /v1/system/users-builtin user_builtin_view GET /v1/system/users-builtin/{user} +API operations found with tag "system/status" +OPERATION ID METHOD URL PATH +ping GET /v1/ping + API operations found with tag "vpcs" OPERATION ID METHOD URL PATH vpc_create POST /v1/vpcs diff --git a/nexus/tests/output/uncovered-authz-endpoints.txt b/nexus/tests/output/uncovered-authz-endpoints.txt index 0e53222a8a..d76d9c5495 100644 --- a/nexus/tests/output/uncovered-authz-endpoints.txt +++ b/nexus/tests/output/uncovered-authz-endpoints.txt @@ -1,4 +1,5 @@ API endpoints with no coverage in authz tests: +ping (get "/v1/ping") device_auth_request (post "/device/auth") device_auth_confirm (post "/device/confirm") device_access_token (post "/device/token") diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index 4b30b0be1c..ef3835c618 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -522,3 +522,18 @@ pub struct UpdateDeployment { pub version: SemverVersion, pub status: UpdateStatus, } + +// SYSTEM HEALTH + +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub enum PingStatus { + Ok, +} + +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, JsonSchema)] +pub struct Ping { + /// Whether the external API is reachable. Will always be Ok if the endpoint + /// returns anything at all. + pub status: PingStatus, +} diff --git a/openapi/nexus.json b/openapi/nexus.json index 9330b0ef47..9dda94f283 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -2816,6 +2816,34 @@ } } }, + "/v1/ping": { + "get": { + "tags": [ + "system/status" + ], + "summary": "Ping API", + "description": "Always responds with Ok if it responds at all.", + "operationId": "ping", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ping" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/v1/policy": { "get": { "tags": [ @@ -12031,6 +12059,28 @@ "items" ] }, + "Ping": { + "type": "object", + "properties": { + "status": { + "description": "Whether the external API is reachable. Will always be Ok if the endpoint returns anything at all.", + "allOf": [ + { + "$ref": "#/components/schemas/PingStatus" + } + ] + } + }, + "required": [ + "status" + ] + }, + "PingStatus": { + "type": "string", + "enum": [ + "ok" + ] + }, "Project": { "description": "View of a Project", "type": "object", @@ -15277,6 +15327,13 @@ "url": "http://docs.oxide.computer/api/system-silos" } }, + { + "name": "system/status", + "description": "Endpoints related to system health", + "externalDocs": { + "url": "http://docs.oxide.computer/api/system-status" + } + }, { "name": "system/update" }, From d9d39531991cc8843ef38c4d0afc03afe1a58722 Mon Sep 17 00:00:00 2001 From: James MacMahon Date: Tue, 10 Oct 2023 12:19:23 -0400 Subject: [PATCH 33/35] Do not double count region snapshots records! (#4095) `decrease_crucible_resource_count_and_soft_delete_volume` does not disambiguate cases where the snapshot_addr of a region_snapshot is duplicated with another one, which can occur due to the Crucible Agent reclaiming ports from destroyed daemons (see also #4049, which makes the simulated Crucible agent do this). Several invocations of the snapshot create and snapshot delete sagas could race in such a way that one of these ports would be reclaimed, and then be used in a different snapshot, and the lifetime of both of these would overlap! This would confuse our reference counting, which was written with a naive assumption that this port reuse **wouldn't** occur with these overlapping lifetimes. Spoiler alert, it can: root@[fd00:1122:3344:101::3]:32221/omicron> select * from region_snapshot where snapshot_addr = '[fd00:1122:3344:102::7]:19016'; dataset_id | region_id | snapshot_id | snapshot_addr | volume_references ---------------------------------------+--------------------------------------+--------------------------------------+-------------------------------+-------------------- 80790bfd-4b81-4381-9262-20912e3826cc | 0387bbb7-1d54-4683-943c-6c17d6804de9 | 1a800928-8f93-4cd3-9df1-4129582ffc20 | [fd00:1122:3344:102::7]:19016 | 0 80790bfd-4b81-4381-9262-20912e3826cc | ff20e066-8815-4eb6-ac84-fab9b9103462 | bdd9614e-f089-4a94-ae46-e10b96b79ba3 | [fd00:1122:3344:102::7]:19016 | 0 (2 rows) One way to solve this would be to create a UNIQUE INDEX on `snapshot_addr` here, but then in these cases the snapshot creation would return a 500 error to the user. This commit adds a sixth column: `deleting`, a boolean that is true when the region snapshot is part of a volume's `resources_to_clean_up`, and false otherwise. This is used to select (as part of the transaction for `decrease_crucible_resource_count_and_soft_delete_volume`) only the region_snapshot records that were decremented as part of that transaction, and skip re-deleting them otherwise. This works because the overlapping lifetime of the records in the DB is **not** the overlapping lifetime of the actual read-only downstairs daemon: for the port to be reclaimed, the original daemon has to be DELETEd, which happens after the decrement transaction has already computed which resources to clean up: 1) a snapshot record is created: ``` snapshot_id | snapshot_addr | volume_references | deleted | -------------------------------------+-------------------------------+-------------------+---------- 1a800928-8f93-4cd3-9df1-4129582ffc20 | [fd00:1122:3344:102::7]:19016 | 0 | false | ``` 2) it is incremented as part of `volume_create`: ``` snapshot_id | snapshot_addr | volume_references | deleted | -------------------------------------+-------------------------------+-------------------+---------- 1a800928-8f93-4cd3-9df1-4129582ffc20 | [fd00:1122:3344:102::7]:19016 | 1 | false | ``` 3) when the volume is deleted, then the decrement transaction will: a) decrease `volume_references` by 1 ``` snapshot_id | snapshot_addr | volume_references | deleted | -------------------------------------+-------------------------------+-------------------+---------- 1a800928-8f93-4cd3-9df1-4129582ffc20 | [fd00:1122:3344:102::7]:19016 | 0 | false | ``` b) note any `region_snapshot` records whose `volume_references` went to 0 and have `deleted` = false, and return those in the list of resources to clean up: [ 1a800928-8f93-4cd3-9df1-4129582ffc20 ] c) set deleted = true for any region_snapshot records whose `volume_references` went to 0 and have deleted = false ``` snapshot_id | snapshot_addr | volume_references | deleted | -------------------------------------+-------------------------------+-------------------+---------- 1a800928-8f93-4cd3-9df1-4129582ffc20 | [fd00:1122:3344:102::7]:19016 | 0 | true | ``` 4) That read-only snapshot daemon is DELETEd, freeing up the port. Another snapshot creation occurs, using that reclaimed port: ``` snapshot_id | snapshot_addr | volume_references | deleted | -------------------------------------+-------------------------------+-------------------+---------- 1a800928-8f93-4cd3-9df1-4129582ffc20 | [fd00:1122:3344:102::7]:19016 | 0 | true | bdd9614e-f089-4a94-ae46-e10b96b79ba3 | [fd00:1122:3344:102::7]:19016 | 0 | false | ``` 5) That new snapshot is incremented as part of `volume_create`: ``` snapshot_id | snapshot_addr | volume_references | deleted | -------------------------------------+-------------------------------+-------------------+---------- 1a800928-8f93-4cd3-9df1-4129582ffc20 | [fd00:1122:3344:102::7]:19016 | 0 | true | bdd9614e-f089-4a94-ae46-e10b96b79ba3 | [fd00:1122:3344:102::7]:19016 | 1 | false | ``` 6) It is later deleted, and the decrement transaction will: a) decrease `volume_references` by 1: ``` j snapshot_id | snapshot_addr | volume_references | deleted | -------------------------------------+-------------------------------+-------------------+---------- 1a800928-8f93-4cd3-9df1-4129582ffc20 | [fd00:1122:3344:102::7]:19016 | 0 | true | bdd9614e-f089-4a94-ae46-e10b96b79ba3 | [fd00:1122:3344:102::7]:19016 | 0 | false | ``` b) note any `region_snapshot` records whose `volume_references` went to 0 and have `deleted` = false, and return those in the list of resources to clean up: [ bdd9614e-f089-4a94-ae46-e10b96b79ba3 ] c) set deleted = true for any region_snapshot records whose `volume_references` went to 0 and have deleted = false ``` snapshot_id | snapshot_addr | volume_references | deleted | -------------------------------------+-------------------------------+-------------------+---------- 1a800928-8f93-4cd3-9df1-4129582ffc20 | [fd00:1122:3344:102::7]:19016 | 0 | true | bdd9614e-f089-4a94-ae46-e10b96b79ba3 | [fd00:1122:3344:102::7]:19016 | 0 | true | ``` --- dev-tools/omdb/tests/env.out | 6 +- dev-tools/omdb/tests/successes.out | 12 +- nexus/db-model/src/region_snapshot.rs | 3 + nexus/db-model/src/schema.rs | 3 +- nexus/db-queries/src/db/datastore/dataset.rs | 16 + .../src/db/datastore/region_snapshot.rs | 23 ++ nexus/db-queries/src/db/datastore/volume.rs | 100 +++--- nexus/src/app/sagas/snapshot_create.rs | 1 + nexus/src/app/sagas/volume_delete.rs | 177 ++++++---- nexus/tests/integration_tests/snapshots.rs | 36 +- .../integration_tests/volume_management.rs | 308 ++++++++++++++++++ schema/crdb/6.0.0/up1.sql | 1 + schema/crdb/6.0.0/up2.sql | 1 + schema/crdb/dbinit.sql | 5 +- 14 files changed, 563 insertions(+), 129 deletions(-) create mode 100644 schema/crdb/6.0.0/up1.sql create mode 100644 schema/crdb/6.0.0/up2.sql diff --git a/dev-tools/omdb/tests/env.out b/dev-tools/omdb/tests/env.out index eb4cd0d32d..07a6d3fae5 100644 --- a/dev-tools/omdb/tests/env.out +++ b/dev-tools/omdb/tests/env.out @@ -7,7 +7,7 @@ sim-b6d65341 [::1]:REDACTED_PORT - REDACTED_UUID_REDACTED_UUID_REDACTED --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (5.0.0) +note: database schema version matches expected (6.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "--db-url", "junk", "sleds"] termination: Exited(2) @@ -172,7 +172,7 @@ stderr: note: database URL not specified. Will search DNS. note: (override with --db-url or OMDB_DB_URL) note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (5.0.0) +note: database schema version matches expected (6.0.0) ============================================= EXECUTING COMMAND: omdb ["--dns-server", "[::1]:REDACTED_PORT", "db", "sleds"] termination: Exited(0) @@ -185,5 +185,5 @@ stderr: note: database URL not specified. Will search DNS. note: (override with --db-url or OMDB_DB_URL) note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (5.0.0) +note: database schema version matches expected (6.0.0) ============================================= diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index eb075a84ea..038f365e8e 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -8,7 +8,7 @@ external oxide-dev.test 2 create silo: "tes --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (5.0.0) +note: database schema version matches expected (6.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "dns", "diff", "external", "2"] termination: Exited(0) @@ -24,7 +24,7 @@ changes: names added: 1, names removed: 0 --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (5.0.0) +note: database schema version matches expected (6.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "dns", "names", "external", "2"] termination: Exited(0) @@ -36,7 +36,7 @@ External zone: oxide-dev.test --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (5.0.0) +note: database schema version matches expected (6.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "services", "list-instances"] termination: Exited(0) @@ -52,7 +52,7 @@ Nexus REDACTED_UUID_REDACTED_UUID_REDACTED [::ffff:127.0.0.1]:REDACTED_ --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (5.0.0) +note: database schema version matches expected (6.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "services", "list-by-sled"] termination: Exited(0) @@ -71,7 +71,7 @@ sled: sim-b6d65341 (id REDACTED_UUID_REDACTED_UUID_REDACTED) --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (5.0.0) +note: database schema version matches expected (6.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "sleds"] termination: Exited(0) @@ -82,7 +82,7 @@ sim-b6d65341 [::1]:REDACTED_PORT - REDACTED_UUID_REDACTED_UUID_REDACTED --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (5.0.0) +note: database schema version matches expected (6.0.0) ============================================= EXECUTING COMMAND: omdb ["mgs", "inventory"] termination: Exited(0) diff --git a/nexus/db-model/src/region_snapshot.rs b/nexus/db-model/src/region_snapshot.rs index 9addeb83e3..af1cf8b2b3 100644 --- a/nexus/db-model/src/region_snapshot.rs +++ b/nexus/db-model/src/region_snapshot.rs @@ -32,4 +32,7 @@ pub struct RegionSnapshot { // how many volumes reference this? pub volume_references: i64, + + // true if part of a volume's `resources_to_clean_up` already + pub deleting: bool, } diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 94a770e2ca..0165ab1568 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -856,6 +856,7 @@ table! { snapshot_id -> Uuid, snapshot_addr -> Text, volume_references -> Int8, + deleting -> Bool, } } @@ -1130,7 +1131,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(5, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(6, 0, 0); allow_tables_to_appear_in_same_query!( system_update, diff --git a/nexus/db-queries/src/db/datastore/dataset.rs b/nexus/db-queries/src/db/datastore/dataset.rs index 99972459c8..0b26789e8f 100644 --- a/nexus/db-queries/src/db/datastore/dataset.rs +++ b/nexus/db-queries/src/db/datastore/dataset.rs @@ -13,15 +13,31 @@ use crate::db::error::ErrorHandler; use crate::db::identity::Asset; use crate::db::model::Dataset; use crate::db::model::Zpool; +use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; use diesel::upsert::excluded; use omicron_common::api::external::CreateResult; use omicron_common::api::external::Error; +use omicron_common::api::external::LookupResult; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; +use uuid::Uuid; impl DataStore { + pub async fn dataset_get(&self, dataset_id: Uuid) -> LookupResult { + use db::schema::dataset::dsl; + + dsl::dataset + .filter(dsl::id.eq(dataset_id)) + .select(Dataset::as_select()) + .first_async::( + &*self.pool_connection_unauthorized().await?, + ) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + /// Stores a new dataset in the database. pub async fn dataset_upsert( &self, diff --git a/nexus/db-queries/src/db/datastore/region_snapshot.rs b/nexus/db-queries/src/db/datastore/region_snapshot.rs index 0a707e4504..148cfe4812 100644 --- a/nexus/db-queries/src/db/datastore/region_snapshot.rs +++ b/nexus/db-queries/src/db/datastore/region_snapshot.rs @@ -10,9 +10,11 @@ use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::RegionSnapshot; use async_bb8_diesel::AsyncRunQueryDsl; +use async_bb8_diesel::OptionalExtension; use diesel::prelude::*; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::LookupResult; use uuid::Uuid; impl DataStore { @@ -31,6 +33,27 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + pub async fn region_snapshot_get( + &self, + dataset_id: Uuid, + region_id: Uuid, + snapshot_id: Uuid, + ) -> LookupResult> { + use db::schema::region_snapshot::dsl; + + dsl::region_snapshot + .filter(dsl::dataset_id.eq(dataset_id)) + .filter(dsl::region_id.eq(region_id)) + .filter(dsl::snapshot_id.eq(snapshot_id)) + .select(RegionSnapshot::as_select()) + .first_async::( + &*self.pool_connection_unauthorized().await?, + ) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + pub async fn region_snapshot_remove( &self, dataset_id: Uuid, diff --git a/nexus/db-queries/src/db/datastore/volume.rs b/nexus/db-queries/src/db/datastore/volume.rs index b3e82886de..b97b8451cf 100644 --- a/nexus/db-queries/src/db/datastore/volume.rs +++ b/nexus/db-queries/src/db/datastore/volume.rs @@ -119,6 +119,7 @@ impl DataStore { .filter( rs_dsl::snapshot_addr.eq(read_only_target.clone()), ) + .filter(rs_dsl::deleting.eq(false)) .set( rs_dsl::volume_references .eq(rs_dsl::volume_references + 1), @@ -573,9 +574,7 @@ impl DataStore { // multiple times, and that is done by soft-deleting the volume during // the transaction, and returning the previously serialized list of // resources to clean up if a soft-delete has already occurred. - // - // TODO it would be nice to make this transaction_async, but I couldn't - // get the async optional extension to work. + self.pool_connection_unauthorized() .await? .transaction_async(|conn| async move { @@ -639,7 +638,9 @@ impl DataStore { } }; - // Decrease the number of uses for each referenced region snapshot. + // Decrease the number of uses for each non-deleted referenced + // region snapshot. + use db::schema::region_snapshot::dsl; diesel::update(dsl::region_snapshot) @@ -647,12 +648,40 @@ impl DataStore { dsl::snapshot_addr .eq_any(crucible_targets.read_only_targets.clone()), ) + .filter(dsl::volume_references.gt(0)) + .filter(dsl::deleting.eq(false)) .set(dsl::volume_references.eq(dsl::volume_references - 1)) .execute_async(&conn) .await?; + // Then, note anything that was set to zero from the above + // UPDATE, and then mark all those as deleted. + let snapshots_to_delete: Vec = + dsl::region_snapshot + .filter( + dsl::snapshot_addr.eq_any( + crucible_targets.read_only_targets.clone(), + ), + ) + .filter(dsl::volume_references.eq(0)) + .filter(dsl::deleting.eq(false)) + .select(RegionSnapshot::as_select()) + .load_async(&conn) + .await?; + + diesel::update(dsl::region_snapshot) + .filter( + dsl::snapshot_addr + .eq_any(crucible_targets.read_only_targets.clone()), + ) + .filter(dsl::volume_references.eq(0)) + .filter(dsl::deleting.eq(false)) + .set(dsl::deleting.eq(true)) + .execute_async(&conn) + .await?; + // Return what results can be cleaned up - let result = CrucibleResources::V1(CrucibleResourcesV1 { + let result = CrucibleResources::V2(CrucibleResourcesV2 { // The only use of a read-write region will be at the top level of a // Volume. These are not shared, but if any snapshots are taken this // will prevent deletion of the region. Filter out any regions that @@ -681,6 +710,7 @@ impl DataStore { .eq(0) // Despite the SQL specifying that this column is NOT NULL, // this null check is required for this function to work! + // The left join of region_snapshot might cause a null here. .or(dsl::volume_references.is_null()), ) .select((Dataset::as_select(), Region::as_select())) @@ -688,46 +718,17 @@ impl DataStore { .await? }, - // A volume (for a disk or snapshot) may reference another nested - // volume as a read-only parent, and this may be arbitrarily deep. - // After decrementing volume_references above, get the region - // snapshot records for these read_only_targets where the - // volume_references has gone to 0. Consumers of this struct will - // be responsible for deleting the read-only downstairs running - // for the snapshot and the snapshot itself. - datasets_and_snapshots: { - use db::schema::dataset::dsl as dataset_dsl; - - dsl::region_snapshot - // Only return region_snapshot records related to - // this volume that have zero references. This will - // only happen one time, on the last decrease of a - // volume containing these read-only targets. - // - // It's important to not return *every* region - // snapshot with zero references: multiple volume - // delete sub-sagas will then be issues duplicate - // DELETE calls to Crucible agents, and a request to - // delete a read-only downstairs running for a - // snapshot that doesn't exist will return a 404, - // causing the saga to error and unwind. - .filter(dsl::snapshot_addr.eq_any( - crucible_targets.read_only_targets.clone(), - )) - .filter(dsl::volume_references.eq(0)) - .inner_join( - dataset_dsl::dataset - .on(dsl::dataset_id.eq(dataset_dsl::id)), - ) - .select(( - Dataset::as_select(), - RegionSnapshot::as_select(), - )) - .get_results_async::<(Dataset, RegionSnapshot)>( - &conn, - ) - .await? - }, + // Consumers of this struct will be responsible for deleting + // the read-only downstairs running for the snapshot and the + // snapshot itself. + // + // It's important to not return *every* region snapshot with + // zero references: multiple volume delete sub-sagas will + // then be issues duplicate DELETE calls to Crucible agents, + // and a request to delete a read-only downstairs running + // for a snapshot that doesn't exist will return a 404, + // causing the saga to error and unwind. + snapshots_to_delete, }); // Soft delete this volume, and serialize the resources that are to @@ -967,7 +968,7 @@ impl DataStore { #[derive(Default, Debug, Serialize, Deserialize)] pub struct CrucibleTargets { - read_only_targets: Vec, + pub read_only_targets: Vec, } // Serialize this enum into the `resources_to_clean_up` column to handle @@ -975,6 +976,7 @@ pub struct CrucibleTargets { #[derive(Debug, Serialize, Deserialize)] pub enum CrucibleResources { V1(CrucibleResourcesV1), + V2(CrucibleResourcesV2), } #[derive(Debug, Default, Serialize, Deserialize)] @@ -983,6 +985,12 @@ pub struct CrucibleResourcesV1 { pub datasets_and_snapshots: Vec<(Dataset, RegionSnapshot)>, } +#[derive(Debug, Default, Serialize, Deserialize)] +pub struct CrucibleResourcesV2 { + pub datasets_and_regions: Vec<(Dataset, Region)>, + pub snapshots_to_delete: Vec, +} + /// Return the targets from a VolumeConstructionRequest. /// /// The targets of a volume construction request map to resources. diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index eeabf64894..9c8a33fb17 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -1280,6 +1280,7 @@ async fn ssc_start_running_snapshot( snapshot_id, snapshot_addr, volume_references: 0, // to be filled later + deleting: false, }) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/src/app/sagas/volume_delete.rs b/nexus/src/app/sagas/volume_delete.rs index 4cd633f575..d6358d5435 100644 --- a/nexus/src/app/sagas/volume_delete.rs +++ b/nexus/src/app/sagas/volume_delete.rs @@ -155,39 +155,39 @@ async fn svd_delete_crucible_regions( sagactx.lookup::("crucible_resources_to_delete")?; // Send DELETE calls to the corresponding Crucible agents - match crucible_resources_to_delete { + let datasets_and_regions = match crucible_resources_to_delete { CrucibleResources::V1(crucible_resources_to_delete) => { - delete_crucible_regions( - log, - crucible_resources_to_delete.datasets_and_regions.clone(), - ) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "failed to delete_crucible_regions: {:?}", - e, - )) - })?; + crucible_resources_to_delete.datasets_and_regions + } - // Remove DB records - let region_ids_to_delete = crucible_resources_to_delete - .datasets_and_regions - .iter() - .map(|(_, r)| r.id()) - .collect(); - - osagactx - .datastore() - .regions_hard_delete(log, region_ids_to_delete) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "failed to regions_hard_delete: {:?}", - e, - )) - })?; + CrucibleResources::V2(crucible_resources_to_delete) => { + crucible_resources_to_delete.datasets_and_regions } - } + }; + + delete_crucible_regions(log, datasets_and_regions.clone()).await.map_err( + |e| { + ActionError::action_failed(format!( + "failed to delete_crucible_regions: {:?}", + e, + )) + }, + )?; + + // Remove DB records + let region_ids_to_delete = + datasets_and_regions.iter().map(|(_, r)| r.id()).collect(); + + osagactx + .datastore() + .regions_hard_delete(log, region_ids_to_delete) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "failed to regions_hard_delete: {:?}", + e, + )) + })?; Ok(()) } @@ -202,26 +202,46 @@ async fn svd_delete_crucible_running_snapshots( sagactx: NexusActionContext, ) -> Result<(), ActionError> { let log = sagactx.user_data().log(); + let osagactx = sagactx.user_data(); let crucible_resources_to_delete = sagactx.lookup::("crucible_resources_to_delete")?; // Send DELETE calls to the corresponding Crucible agents - match crucible_resources_to_delete { + let datasets_and_snapshots = match crucible_resources_to_delete { CrucibleResources::V1(crucible_resources_to_delete) => { - delete_crucible_running_snapshots( - log, - crucible_resources_to_delete.datasets_and_snapshots.clone(), - ) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "failed to delete_crucible_running_snapshots: {:?}", - e, - )) - })?; + crucible_resources_to_delete.datasets_and_snapshots } - } + + CrucibleResources::V2(crucible_resources_to_delete) => { + let mut datasets_and_snapshots: Vec<_> = Vec::with_capacity( + crucible_resources_to_delete.snapshots_to_delete.len(), + ); + + for region_snapshot in + crucible_resources_to_delete.snapshots_to_delete + { + let dataset = osagactx + .datastore() + .dataset_get(region_snapshot.dataset_id) + .await + .map_err(ActionError::action_failed)?; + + datasets_and_snapshots.push((dataset, region_snapshot)); + } + + datasets_and_snapshots + } + }; + + delete_crucible_running_snapshots(log, datasets_and_snapshots.clone()) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "failed to delete_crucible_running_snapshots: {:?}", + e, + )) + })?; Ok(()) } @@ -235,26 +255,46 @@ async fn svd_delete_crucible_snapshots( sagactx: NexusActionContext, ) -> Result<(), ActionError> { let log = sagactx.user_data().log(); + let osagactx = sagactx.user_data(); let crucible_resources_to_delete = sagactx.lookup::("crucible_resources_to_delete")?; // Send DELETE calls to the corresponding Crucible agents - match crucible_resources_to_delete { + let datasets_and_snapshots = match crucible_resources_to_delete { CrucibleResources::V1(crucible_resources_to_delete) => { - delete_crucible_snapshots( - log, - crucible_resources_to_delete.datasets_and_snapshots.clone(), - ) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "failed to delete_crucible_snapshots: {:?}", - e, - )) - })?; + crucible_resources_to_delete.datasets_and_snapshots } - } + + CrucibleResources::V2(crucible_resources_to_delete) => { + let mut datasets_and_snapshots: Vec<_> = Vec::with_capacity( + crucible_resources_to_delete.snapshots_to_delete.len(), + ); + + for region_snapshot in + crucible_resources_to_delete.snapshots_to_delete + { + let dataset = osagactx + .datastore() + .dataset_get(region_snapshot.dataset_id) + .await + .map_err(ActionError::action_failed)?; + + datasets_and_snapshots.push((dataset, region_snapshot)); + } + + datasets_and_snapshots + } + }; + + delete_crucible_snapshots(log, datasets_and_snapshots.clone()) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "failed to delete_crucible_snapshots: {:?}", + e, + )) + })?; Ok(()) } @@ -293,6 +333,31 @@ async fn svd_delete_crucible_snapshot_records( })?; } } + + CrucibleResources::V2(crucible_resources_to_delete) => { + // Remove DB records + for region_snapshot in + &crucible_resources_to_delete.snapshots_to_delete + { + osagactx + .datastore() + .region_snapshot_remove( + region_snapshot.dataset_id, + region_snapshot.region_id, + region_snapshot.snapshot_id, + ) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "failed to region_snapshot_remove {} {} {}: {:?}", + region_snapshot.dataset_id, + region_snapshot.region_id, + region_snapshot.snapshot_id, + e, + )) + })?; + } + } } Ok(()) diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index d212175415..68f4cdadd2 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -1094,6 +1094,7 @@ async fn test_region_snapshot_create_idempotent( snapshot_addr: "[::]:12345".to_string(), volume_references: 1, + deleting: false, }; datastore.region_snapshot_create(region_snapshot.clone()).await.unwrap(); @@ -1287,13 +1288,16 @@ async fn test_multiple_deletes_not_sent(cptestctx: &ControlPlaneTestContext) { .unwrap(); let resources_1 = match resources_1 { - db::datastore::CrucibleResources::V1(resources_1) => resources_1, + db::datastore::CrucibleResources::V1(_) => panic!("using old style!"), + db::datastore::CrucibleResources::V2(resources_1) => resources_1, }; let resources_2 = match resources_2 { - db::datastore::CrucibleResources::V1(resources_2) => resources_2, + db::datastore::CrucibleResources::V1(_) => panic!("using old style!"), + db::datastore::CrucibleResources::V2(resources_2) => resources_2, }; let resources_3 = match resources_3 { - db::datastore::CrucibleResources::V1(resources_3) => resources_3, + db::datastore::CrucibleResources::V1(_) => panic!("using old style!"), + db::datastore::CrucibleResources::V2(resources_3) => resources_3, }; // No region deletions yet, these are just snapshot deletes @@ -1304,24 +1308,24 @@ async fn test_multiple_deletes_not_sent(cptestctx: &ControlPlaneTestContext) { // But there are snapshots to delete - assert!(!resources_1.datasets_and_snapshots.is_empty()); - assert!(!resources_2.datasets_and_snapshots.is_empty()); - assert!(!resources_3.datasets_and_snapshots.is_empty()); + assert!(!resources_1.snapshots_to_delete.is_empty()); + assert!(!resources_2.snapshots_to_delete.is_empty()); + assert!(!resources_3.snapshots_to_delete.is_empty()); - // Assert there are no overlaps in the datasets_and_snapshots to delete. + // Assert there are no overlaps in the snapshots_to_delete to delete. - for tuple in &resources_1.datasets_and_snapshots { - assert!(!resources_2.datasets_and_snapshots.contains(tuple)); - assert!(!resources_3.datasets_and_snapshots.contains(tuple)); + for tuple in &resources_1.snapshots_to_delete { + assert!(!resources_2.snapshots_to_delete.contains(tuple)); + assert!(!resources_3.snapshots_to_delete.contains(tuple)); } - for tuple in &resources_2.datasets_and_snapshots { - assert!(!resources_1.datasets_and_snapshots.contains(tuple)); - assert!(!resources_3.datasets_and_snapshots.contains(tuple)); + for tuple in &resources_2.snapshots_to_delete { + assert!(!resources_1.snapshots_to_delete.contains(tuple)); + assert!(!resources_3.snapshots_to_delete.contains(tuple)); } - for tuple in &resources_3.datasets_and_snapshots { - assert!(!resources_1.datasets_and_snapshots.contains(tuple)); - assert!(!resources_2.datasets_and_snapshots.contains(tuple)); + for tuple in &resources_3.snapshots_to_delete { + assert!(!resources_1.snapshots_to_delete.contains(tuple)); + assert!(!resources_2.snapshots_to_delete.contains(tuple)); } } diff --git a/nexus/tests/integration_tests/volume_management.rs b/nexus/tests/integration_tests/volume_management.rs index 70d34fb778..e263593def 100644 --- a/nexus/tests/integration_tests/volume_management.rs +++ b/nexus/tests/integration_tests/volume_management.rs @@ -19,6 +19,7 @@ use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; use nexus_types::external_api::views; +use nexus_types::identity::Asset; use omicron_common::api::external::ByteCount; use omicron_common::api::external::Disk; use omicron_common::api::external::IdentityMetadataCreateParams; @@ -1813,6 +1814,313 @@ async fn test_volume_checkout_updates_sparse_mid_multiple_gen( volume_match_gen(new_vol, vec![Some(8), None, Some(10)]); } +/// Test that the Crucible agent's port reuse does not confuse +/// `decrease_crucible_resource_count_and_soft_delete_volume`, due to the +/// `[ipv6]:port` targets being reused. +#[nexus_test] +async fn test_keep_your_targets_straight(cptestctx: &ControlPlaneTestContext) { + let nexus = &cptestctx.server.apictx().nexus; + let datastore = nexus.datastore(); + + // Four zpools, one dataset each + let mut disk_test = DiskTest::new(&cptestctx).await; + disk_test + .add_zpool_with_dataset(&cptestctx, DiskTest::DEFAULT_ZPOOL_SIZE_GIB) + .await; + + // This bug occurs when region_snapshot records share a snapshot_addr, so + // insert those here manually. + + // (dataset_id, region_id, snapshot_id, snapshot_addr) + let region_snapshots = vec![ + // first snapshot-create + ( + disk_test.zpools[0].datasets[0].id, + Uuid::new_v4(), + Uuid::new_v4(), + String::from("[fd00:1122:3344:101:7]:19016"), + ), + ( + disk_test.zpools[1].datasets[0].id, + Uuid::new_v4(), + Uuid::new_v4(), + String::from("[fd00:1122:3344:102:7]:19016"), + ), + ( + disk_test.zpools[2].datasets[0].id, + Uuid::new_v4(), + Uuid::new_v4(), + String::from("[fd00:1122:3344:103:7]:19016"), + ), + // second snapshot-create + ( + disk_test.zpools[0].datasets[0].id, + Uuid::new_v4(), + Uuid::new_v4(), + String::from("[fd00:1122:3344:101:7]:19016"), // duplicate! + ), + ( + disk_test.zpools[3].datasets[0].id, + Uuid::new_v4(), + Uuid::new_v4(), + String::from("[fd00:1122:3344:104:7]:19016"), + ), + ( + disk_test.zpools[2].datasets[0].id, + Uuid::new_v4(), + Uuid::new_v4(), + String::from("[fd00:1122:3344:103:7]:19017"), + ), + ]; + + // First, three `region_snapshot` records created in the snapshot-create + // saga, which are then used to make snapshot's volume construction request + + for i in 0..3 { + let (dataset_id, region_id, snapshot_id, snapshot_addr) = + ®ion_snapshots[i]; + datastore + .region_snapshot_create(nexus_db_model::RegionSnapshot { + dataset_id: *dataset_id, + region_id: *region_id, + snapshot_id: *snapshot_id, + snapshot_addr: snapshot_addr.clone(), + volume_references: 0, + deleting: false, + }) + .await + .unwrap(); + } + + let volume_id = Uuid::new_v4(); + let volume = datastore + .volume_create(nexus_db_model::Volume::new( + volume_id, + serde_json::to_string(&VolumeConstructionRequest::Volume { + id: volume_id, + block_size: 512, + sub_volumes: vec![], + read_only_parent: Some(Box::new( + VolumeConstructionRequest::Region { + block_size: 512, + blocks_per_extent: 1, + extent_count: 1, + gen: 1, + opts: CrucibleOpts { + id: Uuid::new_v4(), + target: vec![ + region_snapshots[0].3.clone(), + region_snapshots[1].3.clone(), + region_snapshots[2].3.clone(), + ], + lossy: false, + flush_timeout: None, + key: None, + cert_pem: None, + key_pem: None, + root_cert_pem: None, + control: None, + read_only: true, + }, + }, + )), + }) + .unwrap(), + )) + .await + .unwrap(); + + // Sanity check + + assert_eq!(volume.id(), volume_id); + + // Make sure the volume has only three read-only targets: + + let crucible_targets = datastore + .read_only_resources_associated_with_volume(volume_id) + .await + .unwrap(); + assert_eq!(crucible_targets.read_only_targets.len(), 3); + + // Also validate the volume's region_snapshots got incremented by + // volume_create + + for i in 0..3 { + let (dataset_id, region_id, snapshot_id, _) = region_snapshots[i]; + let region_snapshot = datastore + .region_snapshot_get(dataset_id, region_id, snapshot_id) + .await + .unwrap() + .unwrap(); + + assert_eq!(region_snapshot.volume_references, 1); + assert_eq!(region_snapshot.deleting, false); + } + + // Soft delete the volume, and validate that only three region_snapshot + // records are returned. + + let cr = datastore + .decrease_crucible_resource_count_and_soft_delete_volume(volume_id) + .await + .unwrap(); + + for i in 0..3 { + let (dataset_id, region_id, snapshot_id, _) = region_snapshots[i]; + let region_snapshot = datastore + .region_snapshot_get(dataset_id, region_id, snapshot_id) + .await + .unwrap() + .unwrap(); + + assert_eq!(region_snapshot.volume_references, 0); + assert_eq!(region_snapshot.deleting, true); + } + + match cr { + nexus_db_queries::db::datastore::CrucibleResources::V1(cr) => { + assert!(cr.datasets_and_regions.is_empty()); + assert_eq!(cr.datasets_and_snapshots.len(), 3); + } + + nexus_db_queries::db::datastore::CrucibleResources::V2(cr) => { + assert!(cr.datasets_and_regions.is_empty()); + assert_eq!(cr.snapshots_to_delete.len(), 3); + } + } + + // Now, let's say we're at a spot where the running snapshots have been + // deleted, but before volume_hard_delete or region_snapshot_remove are + // called. Pretend another snapshot-create and snapshot-delete snuck in + // here, and the second snapshot hits a agent that reuses the first target. + + for i in 3..6 { + let (dataset_id, region_id, snapshot_id, snapshot_addr) = + ®ion_snapshots[i]; + datastore + .region_snapshot_create(nexus_db_model::RegionSnapshot { + dataset_id: *dataset_id, + region_id: *region_id, + snapshot_id: *snapshot_id, + snapshot_addr: snapshot_addr.clone(), + volume_references: 0, + deleting: false, + }) + .await + .unwrap(); + } + + let volume_id = Uuid::new_v4(); + let volume = datastore + .volume_create(nexus_db_model::Volume::new( + volume_id, + serde_json::to_string(&VolumeConstructionRequest::Volume { + id: volume_id, + block_size: 512, + sub_volumes: vec![], + read_only_parent: Some(Box::new( + VolumeConstructionRequest::Region { + block_size: 512, + blocks_per_extent: 1, + extent_count: 1, + gen: 1, + opts: CrucibleOpts { + id: Uuid::new_v4(), + target: vec![ + region_snapshots[3].3.clone(), + region_snapshots[4].3.clone(), + region_snapshots[5].3.clone(), + ], + lossy: false, + flush_timeout: None, + key: None, + cert_pem: None, + key_pem: None, + root_cert_pem: None, + control: None, + read_only: true, + }, + }, + )), + }) + .unwrap(), + )) + .await + .unwrap(); + + // Sanity check + + assert_eq!(volume.id(), volume_id); + + // Make sure the volume has only three read-only targets: + + let crucible_targets = datastore + .read_only_resources_associated_with_volume(volume_id) + .await + .unwrap(); + assert_eq!(crucible_targets.read_only_targets.len(), 3); + + // Also validate only the volume's region_snapshots got incremented by + // volume_create. + + for i in 0..3 { + let (dataset_id, region_id, snapshot_id, _) = region_snapshots[i]; + let region_snapshot = datastore + .region_snapshot_get(dataset_id, region_id, snapshot_id) + .await + .unwrap() + .unwrap(); + + assert_eq!(region_snapshot.volume_references, 0); + assert_eq!(region_snapshot.deleting, true); + } + for i in 3..6 { + let (dataset_id, region_id, snapshot_id, _) = region_snapshots[i]; + let region_snapshot = datastore + .region_snapshot_get(dataset_id, region_id, snapshot_id) + .await + .unwrap() + .unwrap(); + + assert_eq!(region_snapshot.volume_references, 1); + assert_eq!(region_snapshot.deleting, false); + } + + // Soft delete the volume, and validate that only three region_snapshot + // records are returned. + + let cr = datastore + .decrease_crucible_resource_count_and_soft_delete_volume(volume_id) + .await + .unwrap(); + + // Make sure every region_snapshot is now 0, and deleting + + for i in 0..6 { + let (dataset_id, region_id, snapshot_id, _) = region_snapshots[i]; + let region_snapshot = datastore + .region_snapshot_get(dataset_id, region_id, snapshot_id) + .await + .unwrap() + .unwrap(); + + assert_eq!(region_snapshot.volume_references, 0); + assert_eq!(region_snapshot.deleting, true); + } + + match cr { + nexus_db_queries::db::datastore::CrucibleResources::V1(cr) => { + assert!(cr.datasets_and_regions.is_empty()); + assert_eq!(cr.datasets_and_snapshots.len(), 3); + } + + nexus_db_queries::db::datastore::CrucibleResources::V2(cr) => { + assert!(cr.datasets_and_regions.is_empty()); + assert_eq!(cr.snapshots_to_delete.len(), 3); + } + } +} + #[nexus_test] async fn test_disk_create_saga_unwinds_correctly( cptestctx: &ControlPlaneTestContext, diff --git a/schema/crdb/6.0.0/up1.sql b/schema/crdb/6.0.0/up1.sql new file mode 100644 index 0000000000..4a3cdc302e --- /dev/null +++ b/schema/crdb/6.0.0/up1.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.region_snapshot ADD COLUMN IF NOT EXISTS deleting BOOL NOT NULL DEFAULT false; diff --git a/schema/crdb/6.0.0/up2.sql b/schema/crdb/6.0.0/up2.sql new file mode 100644 index 0000000000..77c136a3bf --- /dev/null +++ b/schema/crdb/6.0.0/up2.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.region_snapshot ALTER COLUMN deleting DROP DEFAULT; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index ad09092f8f..a62cbae5ea 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -505,6 +505,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.region_snapshot ( /* How many volumes reference this? */ volume_references INT8 NOT NULL, + /* Is this currently part of some resources_to_delete? */ + deleting BOOL NOT NULL, + PRIMARY KEY (dataset_id, region_id, snapshot_id) ); @@ -2574,7 +2577,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '5.0.0', NULL) + ( TRUE, NOW(), NOW(), '6.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; From 3e9f46c057b223ad390c742f882ef05e09366b77 Mon Sep 17 00:00:00 2001 From: Ryan Goodfellow Date: Tue, 10 Oct 2023 12:57:14 -0700 Subject: [PATCH 34/35] update softnpu version (#4227) This pulls in a new version of the `npuzone` tool from the softnpu repo that automatically pulls the latest sidecar-lite code. --- tools/ci_download_softnpu_machinery | 2 +- tools/create_virtual_hardware.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/ci_download_softnpu_machinery b/tools/ci_download_softnpu_machinery index d37d428476..7975a310f0 100755 --- a/tools/ci_download_softnpu_machinery +++ b/tools/ci_download_softnpu_machinery @@ -15,7 +15,7 @@ OUT_DIR="out/npuzone" # Pinned commit for softnpu ASIC simulator SOFTNPU_REPO="softnpu" -SOFTNPU_COMMIT="41b3a67b3d44f51528816ff8e539b4001df48305" +SOFTNPU_COMMIT="eb27e6a00f1082c9faac7cf997e57d0609f7a309" # This is the softnpu ASIC simulator echo "fetching npuzone" diff --git a/tools/create_virtual_hardware.sh b/tools/create_virtual_hardware.sh index dd6d9af9dd..95c2aa63df 100755 --- a/tools/create_virtual_hardware.sh +++ b/tools/create_virtual_hardware.sh @@ -37,7 +37,7 @@ function ensure_simulated_links { dladm create-simnet -t "net$I" dladm create-simnet -t "sc${I}_0" dladm modify-simnet -t -p "net$I" "sc${I}_0" - dladm set-linkprop -p mtu=1600 "sc${I}_0" # encap headroom + dladm set-linkprop -p mtu=9000 "sc${I}_0" # match emulated devices fi success "Simnet net$I/sc${I}_0 exists" done From 97ddc7da3a5cdbded9097827f90151980755c1e4 Mon Sep 17 00:00:00 2001 From: Rain Date: Tue, 10 Oct 2023 13:12:23 -0700 Subject: [PATCH 35/35] [dependencies] add Renovate config (#4236) * Add configuration for automatically creating dependencies, and for pinning GitHub Actions digests * Add a post-upgrade script that runs cargo-hakari. Depends on https://github.com/oxidecomputer/renovate-config/pull/5. See [RFD 434](https://rfd.shared.oxide.computer/rfd/0434) and #4166. --- .github/renovate.json | 9 ++++++++ tools/renovate-post-upgrade.sh | 42 ++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 .github/renovate.json create mode 100755 tools/renovate-post-upgrade.sh diff --git a/.github/renovate.json b/.github/renovate.json new file mode 100644 index 0000000000..405a3e282b --- /dev/null +++ b/.github/renovate.json @@ -0,0 +1,9 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "local>oxidecomputer/renovate-config", + "local>oxidecomputer/renovate-config//rust/autocreate", + "local>oxidecomputer/renovate-config:post-upgrade", + "helpers:pinGitHubActionDigests" + ] +} diff --git a/tools/renovate-post-upgrade.sh b/tools/renovate-post-upgrade.sh new file mode 100755 index 0000000000..c21832e0a9 --- /dev/null +++ b/tools/renovate-post-upgrade.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# This script is run after Renovate upgrades dependencies or lock files. + +set -euo pipefail + +# Function to retry a command up to 3 times. +function retry_command { + local retries=3 + local delay=5 + local count=0 + until "$@"; do + exit_code=$? + count=$((count+1)) + if [ $count -lt $retries ]; then + echo "Command failed with exit code $exit_code. Retrying in $delay seconds..." + sleep $delay + else + echo "Command failed with exit code $exit_code after $count attempts." + return $exit_code + fi + done +} + +# Download and install cargo-hakari if it is not already installed. +if ! command -v cargo-hakari &> /dev/null; then + # Need cargo-binstall to install cargo-hakari. + if ! command -v cargo-binstall &> /dev/null; then + # Fetch cargo binstall. + echo "Installing cargo-binstall..." + curl --retry 3 -L --proto '=https' --tlsv1.2 -sSfO https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh + retry_command bash install-from-binstall-release.sh + fi + + # Install cargo-hakari. + echo "Installing cargo-hakari..." + retry_command cargo binstall cargo-hakari --no-confirm +fi + +# Run cargo hakari to regenerate the workspace-hack file. +echo "Running cargo-hakari..." +cargo hakari generate