diff --git a/.config/hakari.toml b/.config/hakari.toml index 9562f92300..0d883dc6f6 100644 --- a/.config/hakari.toml +++ b/.config/hakari.toml @@ -6,6 +6,10 @@ hakari-package = "omicron-workspace-hack" # Format for `workspace-hack = ...` lines in other Cargo.tomls. Requires cargo-hakari 0.9.8 or above. dep-format-version = "4" +# Output lines as `omicron-workspace-hack.workspace = true`. Requires +# cargo-hakari 0.9.28 or above. +workspace-hack-line-style = "workspace-dotted" + # Setting workspace.resolver = "2" in the root Cargo.toml is HIGHLY recommended. # Hakari works much better with the new feature resolver. # For more about the new feature resolver, see: @@ -22,8 +26,8 @@ platforms = [ # "x86_64-pc-windows-msvc", ] +# Write out exact versions rather than a semver range. (Defaults to false.) +exact-versions = true + [traversal-excludes] workspace-members = ["xtask"] - -# Write out exact versions rather than a semver range. (Defaults to false.) -# exact-versions = true diff --git a/.github/buildomat/jobs/deploy.sh b/.github/buildomat/jobs/deploy.sh index 5d3dd8ec39..c2579d98ea 100755 --- a/.github/buildomat/jobs/deploy.sh +++ b/.github/buildomat/jobs/deploy.sh @@ -143,6 +143,7 @@ cd /opt/oxide/work ptime -m tar xvzf /input/package/work/package.tar.gz cp /input/package/work/zones/* out/ +mv out/omicron-nexus-single-sled.tar.gz out/omicron-nexus.tar.gz mkdir tests for p in /input/ci-tools/work/end-to-end-tests/*.gz; do ptime -m gunzip < "$p" > "tests/$(basename "${p%.gz}")" diff --git a/.github/buildomat/jobs/package.sh b/.github/buildomat/jobs/package.sh index fe5d6a9b7f..64c087524e 100755 --- a/.github/buildomat/jobs/package.sh +++ b/.github/buildomat/jobs/package.sh @@ -45,7 +45,7 @@ ptime -m ./tools/ci_download_softnpu_machinery # Build the test target ptime -m cargo run --locked --release --bin omicron-package -- \ - -t test target create -i standard -m non-gimlet -s softnpu + -t test target create -i standard -m non-gimlet -s softnpu -r single-sled ptime -m cargo run --locked --release --bin omicron-package -- \ -t test package @@ -81,9 +81,13 @@ stamp_packages() { done } +# Keep the single-sled Nexus zone around for the deploy job. (The global zone +# build below overwrites the file.) +mv out/omicron-nexus.tar.gz out/omicron-nexus-single-sled.tar.gz + # Build necessary for the global zone ptime -m cargo run --locked --release --bin omicron-package -- \ - -t host target create -i standard -m gimlet -s asic + -t host target create -i standard -m gimlet -s asic -r multi-sled ptime -m cargo run --locked --release --bin omicron-package -- \ -t host package stamp_packages omicron-sled-agent maghemite propolis-server overlay @@ -111,6 +115,7 @@ zones=( out/external-dns.tar.gz out/internal-dns.tar.gz out/omicron-nexus.tar.gz + out/omicron-nexus-single-sled.tar.gz out/oximeter-collector.tar.gz out/propolis-server.tar.gz out/switch-*.tar.gz diff --git a/.github/buildomat/jobs/tuf-repo.sh b/.github/buildomat/jobs/tuf-repo.sh index a06468c6b2..e169bebff6 100644 --- a/.github/buildomat/jobs/tuf-repo.sh +++ b/.github/buildomat/jobs/tuf-repo.sh @@ -77,10 +77,11 @@ done mkdir /work/package pushd /work/package tar xf /input/package/work/package.tar.gz out package-manifest.toml target/release/omicron-package -target/release/omicron-package -t default target create -i standard -m gimlet -s asic +target/release/omicron-package -t default target create -i standard -m gimlet -s asic -r multi-sled ln -s /input/package/work/zones/* out/ rm out/switch-softnpu.tar.gz # not used when target switch=asic rm out/omicron-gateway-softnpu.tar.gz # not used when target switch=asic +rm out/omicron-nexus-single-sled.tar.gz # only used for deploy tests for zone in out/*.tar.gz; do target/release/omicron-package stamp "$(basename "${zone%.tar.gz}")" "$VERSION" done @@ -218,7 +219,7 @@ EOF done } # usage: SERIES ROT_DIR ROT_VERSION BOARDS... -add_hubris_artifacts rot-staging-dev staging/dev cert-staging-dev-v1.0.0 "${ALL_BOARDS[@]}" +add_hubris_artifacts rot-staging-dev staging/dev cert-staging-dev-v1.0.2 "${ALL_BOARDS[@]}" add_hubris_artifacts rot-prod-rel prod/rel cert-prod-rel-v1.0.0 "${ALL_BOARDS[@]}" for series in "${SERIES_LIST[@]}"; do diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 722aacbe0f..f5cf1dc885 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -41,7 +41,7 @@ jobs: - name: Install Pre-Requisites run: ./tools/install_builder_prerequisites.sh -y - name: Set default target - run: cargo run --bin omicron-package -- -t default target create + run: cargo run --bin omicron-package -- -t default target create -r single-sled - name: Check build of deployed Omicron packages run: cargo run --bin omicron-package -- -t default check diff --git a/Cargo.lock b/Cargo.lock index 4680158258..7f5376e644 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -67,6 +67,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ahash" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +dependencies = [ + "getrandom 0.2.10", + "once_cell", + "version_check", +] + [[package]] name = "ahash" version = "0.8.3" @@ -474,20 +485,20 @@ dependencies = [ [[package]] name = "bhyve_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "bhyve_api_sys", "libc", - "num_enum 0.5.11", + "strum", ] [[package]] name = "bhyve_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "libc", - "num_enum 0.5.11", + "strum", ] [[package]] @@ -1211,6 +1222,18 @@ dependencies = [ "libc", ] +[[package]] +name = "cpuid_profile_config" +version = "0.0.0" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" +dependencies = [ + "propolis", + "serde", + "serde_derive", + "thiserror", + "toml 0.7.8", +] + [[package]] name = "crc" version = "3.0.1" @@ -1413,7 +1436,7 @@ dependencies = [ [[package]] name = "crucible" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=aeb69dda26c7e1a8b6eada425670cd4b83f91c07#aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source = "git+https://github.com/oxidecomputer/crucible?rev=20273bcca1fd5834ebc3e67dfa7020f0e99ad681#20273bcca1fd5834ebc3e67dfa7020f0e99ad681" dependencies = [ "aes-gcm-siv", "anyhow", @@ -1447,17 +1470,18 @@ dependencies = [ "tokio", "tokio-rustls", "tokio-util", - "toml 0.7.8", + "toml 0.8.0", "tracing", "usdt", "uuid", "version_check", + "workspace-hack", ] [[package]] name = "crucible-agent-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=aeb69dda26c7e1a8b6eada425670cd4b83f91c07#aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source = "git+https://github.com/oxidecomputer/crucible?rev=20273bcca1fd5834ebc3e67dfa7020f0e99ad681#20273bcca1fd5834ebc3e67dfa7020f0e99ad681" dependencies = [ "anyhow", "chrono", @@ -1467,24 +1491,26 @@ dependencies = [ "schemars", "serde", "serde_json", + "workspace-hack", ] [[package]] name = "crucible-client-types" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/crucible?rev=aeb69dda26c7e1a8b6eada425670cd4b83f91c07#aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source = "git+https://github.com/oxidecomputer/crucible?rev=20273bcca1fd5834ebc3e67dfa7020f0e99ad681#20273bcca1fd5834ebc3e67dfa7020f0e99ad681" dependencies = [ "base64 0.21.4", "schemars", "serde", "serde_json", "uuid", + "workspace-hack", ] [[package]] name = "crucible-common" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=aeb69dda26c7e1a8b6eada425670cd4b83f91c07#aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source = "git+https://github.com/oxidecomputer/crucible?rev=20273bcca1fd5834ebc3e67dfa7020f0e99ad681#20273bcca1fd5834ebc3e67dfa7020f0e99ad681" dependencies = [ "anyhow", "atty", @@ -1502,16 +1528,17 @@ dependencies = [ "tempfile", "thiserror", "tokio-rustls", - "toml 0.7.8", + "toml 0.8.0", "twox-hash", "uuid", "vergen", + "workspace-hack", ] [[package]] name = "crucible-pantry-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=aeb69dda26c7e1a8b6eada425670cd4b83f91c07#aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source = "git+https://github.com/oxidecomputer/crucible?rev=20273bcca1fd5834ebc3e67dfa7020f0e99ad681#20273bcca1fd5834ebc3e67dfa7020f0e99ad681" dependencies = [ "anyhow", "chrono", @@ -1522,32 +1549,36 @@ dependencies = [ "serde", "serde_json", "uuid", + "workspace-hack", ] [[package]] name = "crucible-protocol" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/crucible?rev=aeb69dda26c7e1a8b6eada425670cd4b83f91c07#aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source = "git+https://github.com/oxidecomputer/crucible?rev=20273bcca1fd5834ebc3e67dfa7020f0e99ad681#20273bcca1fd5834ebc3e67dfa7020f0e99ad681" dependencies = [ "anyhow", "bincode", "bytes", "crucible-common", "num_enum 0.7.0", + "schemars", "serde", "tokio-util", "uuid", + "workspace-hack", ] [[package]] name = "crucible-smf" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/crucible?rev=aeb69dda26c7e1a8b6eada425670cd4b83f91c07#aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source = "git+https://github.com/oxidecomputer/crucible?rev=20273bcca1fd5834ebc3e67dfa7020f0e99ad681#20273bcca1fd5834ebc3e67dfa7020f0e99ad681" dependencies = [ "libc", "num-derive", "num-traits", "thiserror", + "workspace-hack", ] [[package]] @@ -1985,10 +2016,10 @@ checksum = "7e1a8646b2c125eeb9a84ef0faa6d2d102ea0d5da60b824ade2743263117b848" [[package]] name = "dladm" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "libc", - "num_enum 0.5.11", + "strum", ] [[package]] @@ -2322,26 +2353,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "enum-iterator" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7add3873b5dd076766ee79c8e406ad1a472c385476b9e38849f8eec24f1be689" -dependencies = [ - "enum-iterator-derive", -] - -[[package]] -name = "enum-iterator-derive" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.32", -] - [[package]] name = "env_logger" version = "0.9.3" @@ -2404,9 +2415,9 @@ dependencies = [ [[package]] name = "expectorate" -version = "1.0.7" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "710ab6a2d57038a835d66f78d5af3fa5d27c1ec4682f823b9203c48826cb0591" +checksum = "de6f19b25bdfa2747ae775f37cd109c31f1272d4e4c83095be0727840aa1d75f" dependencies = [ "console", "newline-converter", @@ -2965,6 +2976,9 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.6", +] [[package]] name = "hashbrown" @@ -2972,7 +2986,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash", + "ahash 0.8.3", ] [[package]] @@ -2981,7 +2995,7 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" dependencies = [ - "ahash", + "ahash 0.8.3", "allocator-api2", ] @@ -3219,7 +3233,7 @@ dependencies = [ [[package]] name = "hubtools" version = "0.4.1" -source = "git+https://github.com/oxidecomputer/hubtools.git?branch=main#0c642f6e1f83b74725c7119a546bc26ac7452a48" +source = "git+https://github.com/oxidecomputer/hubtools.git?branch=main#2481445b80f8476041f62a1c8b6301e4918c63ed" dependencies = [ "lpc55_areas", "lpc55_sign", @@ -4008,7 +4022,7 @@ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "lpc55_areas" version = "0.2.4" -source = "git+https://github.com/oxidecomputer/lpc55_support#4051a3b9421573dc36ed6098b292a7609a3cf98b" +source = "git+https://github.com/oxidecomputer/lpc55_support#96f064eaae5e95930efaab6c29fd1b2e22225dac" dependencies = [ "bitfield", "clap 4.4.3", @@ -4018,8 +4032,8 @@ dependencies = [ [[package]] name = "lpc55_sign" -version = "0.3.2" -source = "git+https://github.com/oxidecomputer/lpc55_support#4051a3b9421573dc36ed6098b292a7609a3cf98b" +version = "0.3.3" +source = "git+https://github.com/oxidecomputer/lpc55_support#96f064eaae5e95930efaab6c29fd1b2e22225dac" dependencies = [ "byteorder", "const-oid", @@ -5125,8 +5139,10 @@ dependencies = [ "diesel", "dropshot", "expectorate", + "futures", "humantime", "internal-dns 0.1.0", + "ipnetwork", "nexus-client 0.1.0", "nexus-db-model", "nexus-db-queries", @@ -5138,6 +5154,7 @@ dependencies = [ "omicron-rpaths", "omicron-test-utils", "omicron-workspace-hack", + "oximeter-client", "pq-sys", "regex", "serde", @@ -5299,6 +5316,7 @@ dependencies = [ "static_assertions", "subprocess", "tar", + "tempfile", "thiserror", "tofino", "tokio", @@ -5368,6 +5386,7 @@ dependencies = [ "futures", "futures-channel", "futures-core", + "futures-executor", "futures-io", "futures-sink", "futures-task", @@ -5375,11 +5394,13 @@ dependencies = [ "gateway-messages", "generic-array", "getrandom 0.2.10", + "hashbrown 0.12.3", "hashbrown 0.13.2", "hashbrown 0.14.0", "hex", "hyper", "hyper-rustls", + "indexmap 1.9.3", "indexmap 2.0.0", "inout", "ipnetwork", @@ -5397,7 +5418,9 @@ dependencies = [ "num-traits", "once_cell", "openapiv3", + "parking_lot 0.12.1", "petgraph", + "phf_shared 0.11.2", "postgres-types", "ppv-lite86", "predicates 3.0.3", @@ -5431,6 +5454,7 @@ dependencies = [ "toml_datetime", "toml_edit 0.19.15", "tracing", + "tracing-core", "trust-dns-proto", "unicode-bidi", "unicode-normalization", @@ -5714,6 +5738,7 @@ name = "oximeter-client" version = "0.1.0" dependencies = [ "chrono", + "futures", "omicron-common 0.1.0", "omicron-workspace-hack", "progenitor", @@ -5727,24 +5752,31 @@ dependencies = [ name = "oximeter-collector" version = "0.1.0" dependencies = [ + "anyhow", "clap 4.4.3", "dropshot", "expectorate", "futures", "internal-dns 0.1.0", "nexus-client 0.1.0", + "nexus-types", "omicron-common 0.1.0", "omicron-test-utils", "omicron-workspace-hack", "openapi-lint", "openapiv3", "oximeter 0.1.0", + "oximeter-client", "oximeter-db", + "rand 0.8.5", "reqwest", + "schemars", "serde", "serde_json", "slog", + "slog-async", "slog-dtrace", + "slog-term", "subprocess", "thiserror", "tokio", @@ -5819,7 +5851,9 @@ dependencies = [ name = "oximeter-producer" version = "0.1.0" dependencies = [ + "anyhow", "chrono", + "clap 4.4.3", "dropshot", "nexus-client 0.1.0", "omicron-common 0.1.0", @@ -6554,7 +6588,7 @@ dependencies = [ [[package]] name = "propolis" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "anyhow", "bhyve_api", @@ -6569,7 +6603,6 @@ dependencies = [ "lazy_static", "libc", "nexus-client 0.1.0 (git+https://github.com/oxidecomputer/omicron?branch=main)", - "num_enum 0.5.11", "oximeter 0.1.0 (git+https://github.com/oxidecomputer/omicron?branch=main)", "propolis_types", "rfb", @@ -6577,6 +6610,7 @@ dependencies = [ "serde_arrays", "serde_json", "slog", + "strum", "thiserror", "tokio", "usdt", @@ -6587,7 +6621,7 @@ dependencies = [ [[package]] name = "propolis-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "async-trait", "base64 0.21.4", @@ -6604,14 +6638,14 @@ dependencies = [ "slog", "thiserror", "tokio", - "tokio-tungstenite 0.17.2", + "tokio-tungstenite 0.20.1", "uuid", ] [[package]] name = "propolis-server" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "anyhow", "async-trait", @@ -6626,7 +6660,6 @@ dependencies = [ "const_format", "crucible-client-types", "dropshot", - "enum-iterator", "erased-serde", "futures", "http", @@ -6634,7 +6667,6 @@ dependencies = [ "internal-dns 0.1.0 (git+https://github.com/oxidecomputer/omicron?branch=main)", "lazy_static", "nexus-client 0.1.0 (git+https://github.com/oxidecomputer/omicron?branch=main)", - "num_enum 0.5.11", "omicron-common 0.1.0 (git+https://github.com/oxidecomputer/omicron?branch=main)", "oximeter 0.1.0 (git+https://github.com/oxidecomputer/omicron?branch=main)", "oximeter-producer 0.1.0 (git+https://github.com/oxidecomputer/omicron?branch=main)", @@ -6652,9 +6684,10 @@ dependencies = [ "slog-bunyan", "slog-dtrace", "slog-term", + "strum", "thiserror", "tokio", - "tokio-tungstenite 0.17.2", + "tokio-tungstenite 0.20.1", "tokio-util", "toml 0.7.8", "usdt", @@ -6664,8 +6697,9 @@ dependencies = [ [[package]] name = "propolis-server-config" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ + "cpuid_profile_config", "serde", "serde_derive", "thiserror", @@ -6675,7 +6709,7 @@ dependencies = [ [[package]] name = "propolis_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "schemars", "serde", @@ -7142,9 +7176,9 @@ dependencies = [ [[package]] name = "ringbuffer" -version = "0.14.2" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eba9638e96ac5a324654f8d47fb71c5e21abef0f072740ed9c1d4b0801faa37" +checksum = "3df6368f71f205ff9c33c076d170dd56ebf68e8161c733c0caa07a7a5509ed53" [[package]] name = "ron" @@ -7906,17 +7940,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "sha-1" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest", -] - [[package]] name = "sha1" version = "0.10.5" @@ -8753,18 +8776,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.48" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" +checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.48" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" +checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ "proc-macro2", "quote", @@ -9017,26 +9040,26 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.17.2" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" +checksum = "54319c93411147bced34cb5609a80e0a8e44c5999c93903a81cd866630ec0bfd" dependencies = [ "futures-util", "log", "tokio", - "tungstenite 0.17.3", + "tungstenite 0.18.0", ] [[package]] name = "tokio-tungstenite" -version = "0.18.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54319c93411147bced34cb5609a80e0a8e44c5999c93903a81cd866630ec0bfd" +checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" dependencies = [ "futures-util", "log", "tokio", - "tungstenite 0.18.0", + "tungstenite 0.20.1", ] [[package]] @@ -9204,6 +9227,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", + "valuable", ] [[package]] @@ -9385,9 +9409,9 @@ dependencies = [ [[package]] name = "tungstenite" -version = "0.17.3" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" +checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" dependencies = [ "base64 0.13.1", "byteorder", @@ -9396,7 +9420,7 @@ dependencies = [ "httparse", "log", "rand 0.8.5", - "sha-1", + "sha1", "thiserror", "url", "utf-8", @@ -9404,13 +9428,13 @@ dependencies = [ [[package]] name = "tungstenite" -version = "0.18.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" +checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" dependencies = [ - "base64 0.13.1", "byteorder", "bytes", + "data-encoding", "http", "httparse", "log", @@ -9427,8 +9451,8 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 1.0.0", - "rand 0.8.5", + "cfg-if 0.1.10", + "rand 0.4.6", "static_assertions", ] @@ -9692,6 +9716,12 @@ dependencies = [ "serde", ] +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + [[package]] name = "vcpkg" version = "0.2.15" @@ -9726,17 +9756,16 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "viona_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "libc", - "num_enum 0.5.11", "viona_api_sys", ] [[package]] name = "viona_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=de6369aa45a255f896da0a3ddd2b7152c036a4e9#de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source = "git+https://github.com/oxidecomputer/propolis?rev=42c878b71a58d430dfc306126af5d40ca816d70f#42c878b71a58d430dfc306126af5d40ca816d70f" dependencies = [ "libc", ] @@ -10315,6 +10344,61 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "workspace-hack" +version = "0.1.0" +source = "git+https://github.com/oxidecomputer/crucible?rev=20273bcca1fd5834ebc3e67dfa7020f0e99ad681#20273bcca1fd5834ebc3e67dfa7020f0e99ad681" +dependencies = [ + "bitflags 2.4.0", + "bytes", + "cc", + "chrono", + "console", + "crossbeam-utils", + "crypto-common", + "digest", + "either", + "futures-channel", + "futures-core", + "futures-executor", + "futures-sink", + "futures-util", + "getrandom 0.2.10", + "hashbrown 0.12.3", + "hex", + "hyper", + "indexmap 1.9.3", + "libc", + "log", + "mio", + "num-traits", + "once_cell", + "openapiv3", + "parking_lot 0.12.1", + "phf_shared 0.11.2", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_core 0.6.4", + "reqwest", + "rustls", + "schemars", + "semver 1.0.18", + "serde", + "slog", + "syn 1.0.109", + "syn 2.0.32", + "time", + "time-macros", + "tokio", + "tokio-util", + "toml_datetime", + "toml_edit 0.19.15", + "tracing", + "tracing-core", + "usdt", + "uuid", +] + [[package]] name = "wyz" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index 56c311c85d..90fc099b8d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,26 +2,31 @@ members = [ "api_identity", "bootstore", - "bootstrap-agent-client", "caboose-util", "certificates", + "clients/bootstrap-agent-client", + "clients/ddm-admin-client", + "clients/dns-service-client", + "clients/dpd-client", + "clients/gateway-client", + "clients/installinator-artifact-client", + "clients/nexus-client", + "clients/oxide-client", + "clients/oximeter-client", + "clients/sled-agent-client", + "clients/wicketd-client", "common", "crdb-seed", - "ddm-admin-client", - "deploy", "dev-tools/omdb", "dev-tools/omicron-dev", + "dev-tools/thing-flinger", "dev-tools/xtask", "dns-server", - "dns-service-client", - "dpd-client", "end-to-end-tests", "gateway-cli", - "gateway-client", "gateway-test-utils", "gateway", "illumos-utils", - "installinator-artifact-client", "installinator-artifactd", "installinator-common", "installinator", @@ -29,7 +34,6 @@ members = [ "internal-dns", "ipcc-key-value", "key-manager", - "nexus-client", "nexus", "nexus/authz-macros", "nexus/db-macros", @@ -40,8 +44,6 @@ members = [ "nexus/test-utils-macros", "nexus/test-utils", "nexus/types", - "oxide-client", - "oximeter-client", "oximeter/collector", "oximeter/db", "oximeter/instruments", @@ -51,7 +53,6 @@ members = [ "package", "passwords", "rpaths", - "sled-agent-client", "sled-agent", "sled-hardware", "sp-sim", @@ -62,70 +63,69 @@ members = [ "wicket-common", "wicket-dbg", "wicket", - "wicketd-client", "wicketd", "workspace-hack", ] default-members = [ - "bootstrap-agent-client", "bootstore", "caboose-util", "certificates", + "clients/bootstrap-agent-client", + "clients/ddm-admin-client", + "clients/dns-service-client", + "clients/dpd-client", + "clients/gateway-client", + "clients/installinator-artifact-client", + "clients/nexus-client", + "clients/oxide-client", + "clients/oximeter-client", + "clients/sled-agent-client", + "clients/wicketd-client", "common", - "ddm-admin-client", - "dpd-client", - "deploy", "dev-tools/omdb", "dev-tools/omicron-dev", + "dev-tools/thing-flinger", "dev-tools/xtask", "dns-server", - "dns-service-client", - "gateway", "gateway-cli", - "gateway-client", "gateway-test-utils", + "gateway", "illumos-utils", - "installinator", - "installinator-artifact-client", "installinator-artifactd", "installinator-common", - "internal-dns", + "installinator", "internal-dns-cli", + "internal-dns", "ipcc-key-value", "key-manager", "nexus", - "nexus-client", "nexus/authz-macros", "nexus/db-macros", "nexus/db-model", "nexus/db-queries", "nexus/defaults", "nexus/types", - "oxide-client", - "oximeter-client", "oximeter/collector", "oximeter/db", "oximeter/instruments", - "oximeter/oximeter", "oximeter/oximeter-macro-impl", + "oximeter/oximeter", "oximeter/producer", "package", "passwords", "rpaths", "sled-agent", - "sled-agent-client", "sled-hardware", "sp-sim", "test-utils", - "tufaceous", "tufaceous-lib", + "tufaceous", "update-engine", - "wicket", "wicket-common", "wicket-dbg", + "wicket", "wicketd", - "wicketd-client", ] resolver = "2" @@ -144,7 +144,7 @@ bb8 = "0.8.1" bcs = "0.1.5" bincode = "1.3.3" bootstore = { path = "bootstore" } -bootstrap-agent-client = { path = "bootstrap-agent-client" } +bootstrap-agent-client = { path = "clients/bootstrap-agent-client" } buf-list = { version = "1.0.3", features = ["tokio1"] } byteorder = "1.4.3" bytes = "1.5.0" @@ -161,14 +161,14 @@ cookie = "0.16" criterion = { version = "0.5.1", features = [ "async_tokio" ] } crossbeam = "0.8" crossterm = { version = "0.27.0", features = ["event-stream"] } -crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "aeb69dda26c7e1a8b6eada425670cd4b83f91c07" } -crucible-client-types = { git = "https://github.com/oxidecomputer/crucible", rev = "aeb69dda26c7e1a8b6eada425670cd4b83f91c07" } -crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "aeb69dda26c7e1a8b6eada425670cd4b83f91c07" } -crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "aeb69dda26c7e1a8b6eada425670cd4b83f91c07" } +crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "20273bcca1fd5834ebc3e67dfa7020f0e99ad681" } +crucible-client-types = { git = "https://github.com/oxidecomputer/crucible", rev = "20273bcca1fd5834ebc3e67dfa7020f0e99ad681" } +crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "20273bcca1fd5834ebc3e67dfa7020f0e99ad681" } +crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "20273bcca1fd5834ebc3e67dfa7020f0e99ad681" } curve25519-dalek = "4" datatest-stable = "0.1.3" display-error-chain = "0.1.1" -ddm-admin-client = { path = "ddm-admin-client" } +ddm-admin-client = { path = "clients/ddm-admin-client" } db-macros = { path = "nexus/db-macros" } debug-ignore = "1.0.5" derive_more = "0.99.17" @@ -176,18 +176,18 @@ derive-where = "1.2.5" diesel = { version = "2.1.1", features = ["postgres", "r2d2", "chrono", "serde_json", "network-address", "uuid"] } diesel-dtrace = { git = "https://github.com/oxidecomputer/diesel-dtrace", branch = "main" } dns-server = { path = "dns-server" } -dns-service-client = { path = "dns-service-client" } -dpd-client = { path = "dpd-client" } +dns-service-client = { path = "clients/dns-service-client" } +dpd-client = { path = "clients/dpd-client" } dropshot = { git = "https://github.com/oxidecomputer/dropshot", branch = "main", features = [ "usdt-probes" ] } either = "1.9.0" -expectorate = "1.0.7" +expectorate = "1.1.0" fatfs = "0.3.6" flate2 = "1.0.27" flume = "0.11.0" foreign-types = "0.3.2" fs-err = "2.9.0" futures = "0.3.28" -gateway-client = { path = "gateway-client" } +gateway-client = { path = "clients/gateway-client" } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "1e180ae55e56bd17af35cb868ffbd18ce487351d", default-features = false, features = ["std"] } gateway-sp-comms = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "1e180ae55e56bd17af35cb868ffbd18ce487351d" } gateway-test-utils = { path = "gateway-test-utils" } @@ -209,7 +209,7 @@ indexmap = "2.0.0" indicatif = { version = "0.17.6", features = ["rayon"] } installinator = { path = "installinator" } installinator-artifactd = { path = "installinator-artifactd" } -installinator-artifact-client = { path = "installinator-artifact-client" } +installinator-artifact-client = { path = "clients/installinator-artifact-client" } installinator-common = { path = "installinator-common" } internal-dns = { path = "internal-dns" } ipcc-key-value = { path = "ipcc-key-value" } @@ -223,12 +223,13 @@ macaddr = { version = "1.0.1", features = ["serde_std"] } mime_guess = "2.0.4" mockall = "0.11" newtype_derive = "0.1.6" -nexus-client = { path = "nexus-client" } +nexus-client = { path = "clients/nexus-client" } nexus-db-model = { path = "nexus/db-model" } nexus-db-queries = { path = "nexus/db-queries" } nexus-defaults = { path = "nexus/defaults" } omicron-certificates = { path = "certificates" } omicron-passwords = { path = "passwords" } +omicron-workspace-hack = "0.1.0" nexus-test-interface = { path = "nexus/test-interface" } nexus-test-utils-macros = { path = "nexus/test-utils-macros" } nexus-test-utils = { path = "nexus/test-utils" } @@ -236,10 +237,8 @@ nexus-types = { path = "nexus/types" } num-integer = "0.1.45" num = { version = "0.4.1", default-features = false, features = [ "libm" ] } omicron-common = { path = "common" } -omicron-dev = { path = "dev-tools/omicron-dev" } omicron-gateway = { path = "gateway" } omicron-nexus = { path = "nexus" } -omicron-omdb = { path = "omdb" } omicron-package = { path = "package" } omicron-rpaths = { path = "rpaths" } omicron-sled-agent = { path = "sled-agent" } @@ -258,7 +257,7 @@ opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "631c2017f19 oso = "0.26" owo-colors = "3.5.0" oximeter = { path = "oximeter/oximeter" } -oximeter-client = { path = "oximeter-client" } +oximeter-client = { path = "clients/oximeter-client" } oximeter-db = { path = "oximeter/db/" } oximeter-collector = { path = "oximeter/collector" } oximeter-instruments = { path = "oximeter/instruments" } @@ -278,9 +277,9 @@ pretty-hex = "0.3.0" proc-macro2 = "1.0" progenitor = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } progenitor-client = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } -bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "de6369aa45a255f896da0a3ddd2b7152c036a4e9" } -propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "de6369aa45a255f896da0a3ddd2b7152c036a4e9", features = [ "generated-migration" ] } -propolis-server = { git = "https://github.com/oxidecomputer/propolis", rev = "de6369aa45a255f896da0a3ddd2b7152c036a4e9", default-features = false, features = ["mock-only"] } +bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "42c878b71a58d430dfc306126af5d40ca816d70f" } +propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "42c878b71a58d430dfc306126af5d40ca816d70f", features = [ "generated-migration" ] } +propolis-server = { git = "https://github.com/oxidecomputer/propolis", rev = "42c878b71a58d430dfc306126af5d40ca816d70f", default-features = false, features = ["mock-only"] } proptest = "1.2.0" quote = "1.0" rand = "0.8.5" @@ -316,7 +315,7 @@ signal-hook = "0.3" signal-hook-tokio = { version = "0.3", features = [ "futures-v0_3" ] } similar-asserts = "1.5.0" sled = "0.34" -sled-agent-client = { path = "sled-agent-client" } +sled-agent-client = { path = "clients/sled-agent-client" } sled-hardware = { path = "sled-hardware" } slog = { version = "2.7", features = [ "dynamic-keys", "max_level_trace", "release_max_level_debug" ] } slog-async = "2.8" @@ -371,7 +370,7 @@ usdt = "0.3" walkdir = "2.4" wicket = { path = "wicket" } wicket-common = { path = "wicket-common" } -wicketd-client = { path = "wicketd-client" } +wicketd-client = { path = "clients/wicketd-client" } zeroize = { version = "1.6.0", features = ["zeroize_derive", "std"] } zip = { version = "0.6.6", default-features = false, features = ["deflate","bzip2"] } zone = { version = "0.2", default-features = false, features = ["async"] } @@ -556,3 +555,10 @@ opt-level = 3 [patch.crates-io.pq-sys] git = 'https://github.com/oxidecomputer/pq-sys' branch = "oxide/omicron" + +# Using the workspace-hack via this patch directive means that it only applies +# while building within this workspace. If another workspace imports a crate +# from here via a git dependency, it will not have the workspace-hack applied +# to it. +[patch.crates-io.omicron-workspace-hack] +path = "workspace-hack" diff --git a/api_identity/Cargo.toml b/api_identity/Cargo.toml index 9faf2a1878..547defa7c5 100644 --- a/api_identity/Cargo.toml +++ b/api_identity/Cargo.toml @@ -14,4 +14,4 @@ proc-macro = true proc-macro2.workspace = true quote.workspace = true syn.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/bootstore/Cargo.toml b/bootstore/Cargo.toml index eefe05c8d6..18e3e3876b 100644 --- a/bootstore/Cargo.toml +++ b/bootstore/Cargo.toml @@ -36,7 +36,7 @@ zeroize.workspace = true # utils`. Unfortunately, it doesn't appear possible to put the `pq-sys` dep # only in `[dev-dependencies]`. pq-sys = "*" -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] assert_matches.workspace = true diff --git a/caboose-util/Cargo.toml b/caboose-util/Cargo.toml index 253d54643d..91bf00741e 100644 --- a/caboose-util/Cargo.toml +++ b/caboose-util/Cargo.toml @@ -7,4 +7,4 @@ license = "MPL-2.0" [dependencies] anyhow.workspace = true hubtools.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/certificates/Cargo.toml b/certificates/Cargo.toml index d20d257e4c..87b12fd167 100644 --- a/certificates/Cargo.toml +++ b/certificates/Cargo.toml @@ -12,7 +12,7 @@ openssl-sys.workspace = true thiserror.workspace = true omicron-common.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] omicron-test-utils.workspace = true diff --git a/bootstrap-agent-client/Cargo.toml b/clients/bootstrap-agent-client/Cargo.toml similarity index 86% rename from bootstrap-agent-client/Cargo.toml rename to clients/bootstrap-agent-client/Cargo.toml index 17989a5c5f..42ae59b7aa 100644 --- a/bootstrap-agent-client/Cargo.toml +++ b/clients/bootstrap-agent-client/Cargo.toml @@ -17,4 +17,4 @@ serde.workspace = true sled-hardware.workspace = true slog.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/bootstrap-agent-client/src/lib.rs b/clients/bootstrap-agent-client/src/lib.rs similarity index 97% rename from bootstrap-agent-client/src/lib.rs rename to clients/bootstrap-agent-client/src/lib.rs index 5a159e299a..3f8b20e1f5 100644 --- a/bootstrap-agent-client/src/lib.rs +++ b/clients/bootstrap-agent-client/src/lib.rs @@ -5,7 +5,7 @@ //! Interface for making API requests to a Bootstrap Agent progenitor::generate_api!( - spec = "../openapi/bootstrap-agent.json", + spec = "../../openapi/bootstrap-agent.json", inner_type = slog::Logger, pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { slog::debug!(log, "client request"; diff --git a/ddm-admin-client/Cargo.toml b/clients/ddm-admin-client/Cargo.toml similarity index 89% rename from ddm-admin-client/Cargo.toml rename to clients/ddm-admin-client/Cargo.toml index 3814446b3e..4d00f329e7 100644 --- a/ddm-admin-client/Cargo.toml +++ b/clients/ddm-admin-client/Cargo.toml @@ -15,7 +15,7 @@ tokio.workspace = true omicron-common.workspace = true sled-hardware.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [build-dependencies] anyhow.workspace = true diff --git a/ddm-admin-client/build.rs b/clients/ddm-admin-client/build.rs similarity index 87% rename from ddm-admin-client/build.rs rename to clients/ddm-admin-client/build.rs index ef4183fee3..e3c1345eda 100644 --- a/ddm-admin-client/build.rs +++ b/clients/ddm-admin-client/build.rs @@ -16,23 +16,23 @@ use std::path::Path; fn main() -> Result<()> { // Find the current maghemite repo commit from our package manifest. - let manifest = fs::read_to_string("../package-manifest.toml") - .context("failed to read ../package-manifest.toml")?; - println!("cargo:rerun-if-changed=../package-manifest.toml"); + let manifest = fs::read_to_string("../../package-manifest.toml") + .context("failed to read ../../package-manifest.toml")?; + println!("cargo:rerun-if-changed=../../package-manifest.toml"); let config: Config = toml::from_str(&manifest) - .context("failed to parse ../package-manifest.toml")?; + .context("failed to parse ../../package-manifest.toml")?; let maghemite = config .packages .get("maghemite") - .context("missing maghemite package in ../package-manifest.toml")?; + .context("missing maghemite package in ../../package-manifest.toml")?; let local_path = match &maghemite.source { PackageSource::Prebuilt { commit, .. } => { // Report a relatively verbose error if we haven't downloaded the requisite // openapi spec. let local_path = - format!("../out/downloads/ddm-admin-{commit}.json"); + format!("../../out/downloads/ddm-admin-{commit}.json"); if !Path::new(&local_path).exists() { bail!("{local_path} doesn't exist; rerun `tools/ci_download_maghemite_openapi` (after updating `tools/maghemite_openapi_version` if the maghemite commit in package-manifest.toml has changed)"); } @@ -42,7 +42,7 @@ fn main() -> Result<()> { PackageSource::Manual => { let local_path = - "../out/downloads/ddm-admin-manual.json".to_string(); + "../../out/downloads/ddm-admin-manual.json".to_string(); if !Path::new(&local_path).exists() { bail!("{local_path} doesn't exist, please copy manually built ddm-admin.json there!"); } diff --git a/ddm-admin-client/src/lib.rs b/clients/ddm-admin-client/src/lib.rs similarity index 100% rename from ddm-admin-client/src/lib.rs rename to clients/ddm-admin-client/src/lib.rs diff --git a/dns-service-client/Cargo.toml b/clients/dns-service-client/Cargo.toml similarity index 83% rename from dns-service-client/Cargo.toml rename to clients/dns-service-client/Cargo.toml index e351d90da2..681c06672f 100644 --- a/dns-service-client/Cargo.toml +++ b/clients/dns-service-client/Cargo.toml @@ -14,4 +14,4 @@ serde.workspace = true serde_json.workspace = true slog.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/dns-service-client/src/lib.rs b/clients/dns-service-client/src/lib.rs similarity index 98% rename from dns-service-client/src/lib.rs rename to clients/dns-service-client/src/lib.rs index 9b729b1c5c..931e68322f 100644 --- a/dns-service-client/src/lib.rs +++ b/clients/dns-service-client/src/lib.rs @@ -3,7 +3,7 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. progenitor::generate_api!( - spec = "../openapi/dns-server.json", + spec = "../../openapi/dns-server.json", inner_type = slog::Logger, derives = [schemars::JsonSchema, Eq, PartialEq], pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { diff --git a/dpd-client/Cargo.toml b/clients/dpd-client/Cargo.toml similarity index 90% rename from dpd-client/Cargo.toml rename to clients/dpd-client/Cargo.toml index 26807f7d79..0239c6d9b0 100644 --- a/dpd-client/Cargo.toml +++ b/clients/dpd-client/Cargo.toml @@ -17,7 +17,7 @@ ipnetwork.workspace = true http.workspace = true schemars.workspace = true rand.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [build-dependencies] anyhow.workspace = true diff --git a/dpd-client/build.rs b/clients/dpd-client/build.rs similarity index 87% rename from dpd-client/build.rs rename to clients/dpd-client/build.rs index 2aaa8437e7..6a65ab9495 100644 --- a/dpd-client/build.rs +++ b/clients/dpd-client/build.rs @@ -22,23 +22,23 @@ use std::path::Path; fn main() -> Result<()> { // Find the current dendrite repo commit from our package manifest. - let manifest = fs::read_to_string("../package-manifest.toml") - .context("failed to read ../package-manifest.toml")?; - println!("cargo:rerun-if-changed=../package-manifest.toml"); + let manifest = fs::read_to_string("../../package-manifest.toml") + .context("failed to read ../../package-manifest.toml")?; + println!("cargo:rerun-if-changed=../../package-manifest.toml"); let config: Config = toml::from_str(&manifest) - .context("failed to parse ../package-manifest.toml")?; + .context("failed to parse ../../package-manifest.toml")?; let dendrite = config .packages .get("dendrite-asic") - .context("missing dendrite package in ../package-manifest.toml")?; + .context("missing dendrite package in ../../package-manifest.toml")?; let local_path = match &dendrite.source { PackageSource::Prebuilt { commit, .. } => { - // Report a relatively verbose error if we haven't downloaded the requisite - // openapi spec. - let local_path = format!("../out/downloads/dpd-{commit}.json"); + // Report a relatively verbose error if we haven't downloaded the + // requisite openapi spec. + let local_path = format!("../../out/downloads/dpd-{commit}.json"); if !Path::new(&local_path).exists() { bail!("{local_path} doesn't exist; rerun `tools/ci_download_dendrite_openapi` (after updating `tools/dendrite_openapi_version` if the dendrite commit in package-manifest.toml has changed)"); } @@ -47,7 +47,7 @@ fn main() -> Result<()> { } PackageSource::Manual => { - let local_path = "../out/downloads/dpd-manual.json".to_string(); + let local_path = "../../out/downloads/dpd-manual.json".to_string(); if !Path::new(&local_path).exists() { bail!("{local_path} doesn't exist, please copy manually built dpd.json there!"); } diff --git a/dpd-client/src/lib.rs b/clients/dpd-client/src/lib.rs similarity index 100% rename from dpd-client/src/lib.rs rename to clients/dpd-client/src/lib.rs diff --git a/gateway-client/Cargo.toml b/clients/gateway-client/Cargo.toml similarity index 84% rename from gateway-client/Cargo.toml rename to clients/gateway-client/Cargo.toml index 96a1eb221f..fc33174107 100644 --- a/gateway-client/Cargo.toml +++ b/clients/gateway-client/Cargo.toml @@ -15,4 +15,4 @@ serde_json.workspace = true schemars.workspace = true slog.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/gateway-client/src/lib.rs b/clients/gateway-client/src/lib.rs similarity index 98% rename from gateway-client/src/lib.rs rename to clients/gateway-client/src/lib.rs index 7992eff9e4..800254b197 100644 --- a/gateway-client/src/lib.rs +++ b/clients/gateway-client/src/lib.rs @@ -34,7 +34,7 @@ // it is no longer useful to directly expose the JsonSchema types, we can go // back to reusing `omicron_common`. progenitor::generate_api!( - spec = "../openapi/gateway.json", + spec = "../../openapi/gateway.json", inner_type = slog::Logger, pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { slog::debug!(log, "client request"; diff --git a/installinator-artifact-client/Cargo.toml b/clients/installinator-artifact-client/Cargo.toml similarity index 85% rename from installinator-artifact-client/Cargo.toml rename to clients/installinator-artifact-client/Cargo.toml index 18447b8e83..c3ddc529d9 100644 --- a/installinator-artifact-client/Cargo.toml +++ b/clients/installinator-artifact-client/Cargo.toml @@ -15,4 +15,4 @@ serde_json.workspace = true slog.workspace = true update-engine.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/installinator-artifact-client/src/lib.rs b/clients/installinator-artifact-client/src/lib.rs similarity index 96% rename from installinator-artifact-client/src/lib.rs rename to clients/installinator-artifact-client/src/lib.rs index aa5ceb863a..de3072a34a 100644 --- a/installinator-artifact-client/src/lib.rs +++ b/clients/installinator-artifact-client/src/lib.rs @@ -5,7 +5,7 @@ //! Interface for making API requests to installinator-artifactd. progenitor::generate_api!( - spec = "../openapi/installinator-artifactd.json", + spec = "../../openapi/installinator-artifactd.json", inner_type = slog::Logger, pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { slog::debug!(log, "client request"; diff --git a/nexus-client/Cargo.toml b/clients/nexus-client/Cargo.toml similarity index 86% rename from nexus-client/Cargo.toml rename to clients/nexus-client/Cargo.toml index d59c013992..2734142f9f 100644 --- a/nexus-client/Cargo.toml +++ b/clients/nexus-client/Cargo.toml @@ -18,4 +18,4 @@ serde.workspace = true serde_json.workspace = true slog.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/nexus-client/src/lib.rs b/clients/nexus-client/src/lib.rs similarity index 99% rename from nexus-client/src/lib.rs rename to clients/nexus-client/src/lib.rs index e5cec83f39..412ca70497 100644 --- a/nexus-client/src/lib.rs +++ b/clients/nexus-client/src/lib.rs @@ -8,7 +8,7 @@ use std::collections::HashMap; progenitor::generate_api!( - spec = "../openapi/nexus-internal.json", + spec = "../../openapi/nexus-internal.json", derives = [schemars::JsonSchema, PartialEq], inner_type = slog::Logger, pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { diff --git a/oxide-client/Cargo.toml b/clients/oxide-client/Cargo.toml similarity index 88% rename from oxide-client/Cargo.toml rename to clients/oxide-client/Cargo.toml index df34ab9721..3cb411729d 100644 --- a/oxide-client/Cargo.toml +++ b/clients/oxide-client/Cargo.toml @@ -21,4 +21,4 @@ thiserror.workspace = true tokio = { workspace = true, features = [ "net" ] } trust-dns-resolver.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/oxide-client/src/lib.rs b/clients/oxide-client/src/lib.rs similarity index 99% rename from oxide-client/src/lib.rs rename to clients/oxide-client/src/lib.rs index 7d34697002..07a190c38e 100644 --- a/oxide-client/src/lib.rs +++ b/clients/oxide-client/src/lib.rs @@ -16,7 +16,7 @@ use trust_dns_resolver::config::{ use trust_dns_resolver::TokioAsyncResolver; progenitor::generate_api!( - spec = "../openapi/nexus.json", + spec = "../../openapi/nexus.json", interface = Builder, tags = Separate, ); diff --git a/oximeter-client/Cargo.toml b/clients/oximeter-client/Cargo.toml similarity index 82% rename from oximeter-client/Cargo.toml rename to clients/oximeter-client/Cargo.toml index 297dfb6c92..e54b152415 100644 --- a/oximeter-client/Cargo.toml +++ b/clients/oximeter-client/Cargo.toml @@ -6,10 +6,11 @@ license = "MPL-2.0" [dependencies] chrono.workspace = true +futures.workspace = true omicron-common.workspace = true progenitor.workspace = true reqwest = { workspace = true, features = ["json", "rustls-tls", "stream"] } serde.workspace = true slog.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/oximeter-client/src/lib.rs b/clients/oximeter-client/src/lib.rs similarity index 93% rename from oximeter-client/src/lib.rs rename to clients/oximeter-client/src/lib.rs index 9f326fdee8..7bd17d7e76 100644 --- a/oximeter-client/src/lib.rs +++ b/clients/oximeter-client/src/lib.rs @@ -6,7 +6,7 @@ //! Interface for API requests to an Oximeter metric collection server -omicron_common::generate_logging_api!("../openapi/oximeter.json"); +omicron_common::generate_logging_api!("../../openapi/oximeter.json"); impl omicron_common::api::external::ClientError for types::Error { fn message(&self) -> String { diff --git a/sled-agent-client/Cargo.toml b/clients/sled-agent-client/Cargo.toml similarity index 85% rename from sled-agent-client/Cargo.toml rename to clients/sled-agent-client/Cargo.toml index 01c1032a51..b2ed07caba 100644 --- a/sled-agent-client/Cargo.toml +++ b/clients/sled-agent-client/Cargo.toml @@ -15,4 +15,4 @@ reqwest = { workspace = true, features = [ "json", "rustls-tls", "stream" ] } serde.workspace = true slog.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/sled-agent-client/src/lib.rs b/clients/sled-agent-client/src/lib.rs similarity index 99% rename from sled-agent-client/src/lib.rs rename to clients/sled-agent-client/src/lib.rs index 98e7f207e3..68e60e8d95 100644 --- a/sled-agent-client/src/lib.rs +++ b/clients/sled-agent-client/src/lib.rs @@ -9,7 +9,7 @@ use omicron_common::generate_logging_api; use std::convert::TryFrom; use uuid::Uuid; -generate_logging_api!("../openapi/sled-agent.json"); +generate_logging_api!("../../openapi/sled-agent.json"); impl omicron_common::api::external::ClientError for types::Error { fn message(&self) -> String { diff --git a/wicketd-client/Cargo.toml b/clients/wicketd-client/Cargo.toml similarity index 87% rename from wicketd-client/Cargo.toml rename to clients/wicketd-client/Cargo.toml index 2d959f1f8d..814309b975 100644 --- a/wicketd-client/Cargo.toml +++ b/clients/wicketd-client/Cargo.toml @@ -18,4 +18,4 @@ slog.workspace = true update-engine.workspace = true uuid.workspace = true wicket-common.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/wicketd-client/src/lib.rs b/clients/wicketd-client/src/lib.rs similarity index 99% rename from wicketd-client/src/lib.rs rename to clients/wicketd-client/src/lib.rs index 3f113ea271..ff45232520 100644 --- a/wicketd-client/src/lib.rs +++ b/clients/wicketd-client/src/lib.rs @@ -5,7 +5,7 @@ //! Interface for making API requests to wicketd progenitor::generate_api!( - spec = "../openapi/wicketd.json", + spec = "../../openapi/wicketd.json", inner_type = slog::Logger, pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { slog::debug!(log, "client request"; diff --git a/common/Cargo.toml b/common/Cargo.toml index bda88d0d43..75c1efab55 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -40,7 +40,7 @@ toml.workspace = true uuid.workspace = true parse-display.workspace = true progenitor.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] camino-tempfile.workspace = true diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 1d7e6884d1..91ed7e4240 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -952,7 +952,9 @@ pub struct Disk { #[serde(flatten)] pub identity: IdentityMetadata, pub project_id: Uuid, + /// ID of snapshot from which disk was created, if any pub snapshot_id: Option, + /// ID of image from which disk was created, if any pub image_id: Option, pub size: ByteCount, pub block_size: ByteCount, diff --git a/common/src/api/internal/nexus.rs b/common/src/api/internal/nexus.rs index 018869ce14..983976bbb7 100644 --- a/common/src/api/internal/nexus.rs +++ b/common/src/api/internal/nexus.rs @@ -67,7 +67,7 @@ pub struct InstanceRuntimeState { /// Information announced by a metric server, used so that clients can contact it and collect /// available metric data from it. -#[derive(Debug, Clone, JsonSchema, Serialize, Deserialize)] +#[derive(Clone, Debug, Deserialize, JsonSchema, Serialize, PartialEq)] pub struct ProducerEndpoint { pub id: Uuid, pub address: SocketAddr, diff --git a/common/src/nexus_config.rs b/common/src/nexus_config.rs index 73ccec996c..ad62c34f92 100644 --- a/common/src/nexus_config.rs +++ b/common/src/nexus_config.rs @@ -372,6 +372,8 @@ pub struct PackageConfig { pub dendrite: HashMap, /// Background task configuration pub background_tasks: BackgroundTaskConfig, + /// Default Crucible region allocation strategy + pub default_region_allocation_strategy: RegionAllocationStrategy, } #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] @@ -594,6 +596,9 @@ mod test { dns_external.period_secs_propagation = 7 dns_external.max_concurrent_server_updates = 8 external_endpoints.period_secs = 9 + [default_region_allocation_strategy] + type = "random" + seed = 0 "##, ) .unwrap(); @@ -677,6 +682,10 @@ mod test { period_secs: Duration::from_secs(9), } }, + default_region_allocation_strategy: + crate::nexus_config::RegionAllocationStrategy::Random { + seed: Some(0) + } }, } ); @@ -724,6 +733,8 @@ mod test { dns_external.period_secs_propagation = 7 dns_external.max_concurrent_server_updates = 8 external_endpoints.period_secs = 9 + [default_region_allocation_strategy] + type = "random" "##, ) .unwrap(); @@ -864,25 +875,31 @@ mod test { struct DummyConfig { deployment: DeploymentConfig, } - let config_path = "../smf/nexus/config-partial.toml"; - println!( - "checking {:?} with example deployment section added", - config_path - ); - let mut contents = std::fs::read_to_string(config_path) - .expect("failed to read Nexus SMF config file"); - contents.push_str( - "\n\n\n \ - # !! content below added by test_repo_configs_are_valid()\n\ - \n\n\n", - ); let example_deployment = toml::to_string_pretty(&DummyConfig { deployment: example_config.deployment, }) .unwrap(); - contents.push_str(&example_deployment); - let _: Config = toml::from_str(&contents) - .expect("Nexus SMF config file is not valid"); + + let nexus_config_paths = [ + "../smf/nexus/single-sled/config-partial.toml", + "../smf/nexus/multi-sled/config-partial.toml", + ]; + for config_path in nexus_config_paths { + println!( + "checking {:?} with example deployment section added", + config_path + ); + let mut contents = std::fs::read_to_string(config_path) + .expect("failed to read Nexus SMF config file"); + contents.push_str( + "\n\n\n \ + # !! content below added by test_repo_configs_are_valid()\n\ + \n\n\n", + ); + contents.push_str(&example_deployment); + let _: Config = toml::from_str(&contents) + .expect("Nexus SMF config file is not valid"); + } } #[test] @@ -894,3 +911,30 @@ mod test { ); } } + +/// Defines a strategy for choosing what physical disks to use when allocating +/// new crucible regions. +/// +/// NOTE: More strategies can - and should! - be added. +/// +/// See for a more +/// complete discussion. +/// +/// Longer-term, we should consider: +/// - Storage size + remaining free space +/// - Sled placement of datasets +/// - What sort of loads we'd like to create (even split across all disks +/// may not be preferable, especially if maintenance is expected) +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum RegionAllocationStrategy { + /// Choose disks pseudo-randomly. An optional seed may be provided to make + /// the ordering deterministic, otherwise the current time in nanoseconds + /// will be used. Ordering is based on sorting the output of `md5(UUID of + /// candidate dataset + seed)`. The seed does not need to come from a + /// cryptographically secure source. + Random { seed: Option }, + + /// Like Random, but ensures that each region is allocated on its own sled. + RandomWithDistinctSleds { seed: Option }, +} diff --git a/crdb-seed/Cargo.toml b/crdb-seed/Cargo.toml index fa71fe7e8a..8d6d570d08 100644 --- a/crdb-seed/Cargo.toml +++ b/crdb-seed/Cargo.toml @@ -13,4 +13,4 @@ omicron-test-utils.workspace = true ring.workspace = true slog.workspace = true tokio.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/dev-tools/omdb/Cargo.toml b/dev-tools/omdb/Cargo.toml index 5b2adde1b2..cd4af6e947 100644 --- a/dev-tools/omdb/Cargo.toml +++ b/dev-tools/omdb/Cargo.toml @@ -16,11 +16,13 @@ diesel.workspace = true dropshot.workspace = true humantime.workspace = true internal-dns.workspace = true +futures.workspace = true nexus-client.workspace = true nexus-db-model.workspace = true nexus-db-queries.workspace = true nexus-types.workspace = true omicron-common.workspace = true +oximeter-client.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. pq-sys = "*" serde.workspace = true @@ -32,7 +34,8 @@ tabled.workspace = true textwrap.workspace = true tokio = { workspace = true, features = [ "full" ] } uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +ipnetwork.workspace = true +omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 93e5ef4301..10e5546b6d 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -12,6 +12,9 @@ //! would be the only consumer -- and in that case it's okay to query the //! database directly. +// NOTE: eminates from Tabled macros +#![allow(clippy::useless_vec)] + use crate::Omdb; use anyhow::anyhow; use anyhow::bail; @@ -30,7 +33,9 @@ use nexus_db_model::DnsGroup; use nexus_db_model::DnsName; use nexus_db_model::DnsVersion; use nexus_db_model::DnsZone; +use nexus_db_model::ExternalIp; use nexus_db_model::Instance; +use nexus_db_model::Project; use nexus_db_model::Region; use nexus_db_model::Sled; use nexus_db_model::Zpool; @@ -86,6 +91,8 @@ enum DbCommands { Sleds, /// Print information about customer instances Instances, + /// Print information about the network + Network(NetworkArgs), } #[derive(Debug, Args)] @@ -170,6 +177,22 @@ enum ServicesCommands { ListBySled, } +#[derive(Debug, Args)] +struct NetworkArgs { + #[command(subcommand)] + command: NetworkCommands, + + /// Print out raw data structures from the data store. + #[clap(long)] + verbose: bool, +} + +#[derive(Debug, Subcommand)] +enum NetworkCommands { + /// List external IPs + ListEips, +} + impl DbArgs { /// Run a `omdb db` subcommand. pub(crate) async fn run_cmd( @@ -269,6 +292,13 @@ impl DbArgs { DbCommands::Instances => { cmd_db_instances(&datastore, self.fetch_limit).await } + DbCommands::Network(NetworkArgs { + command: NetworkCommands::ListEips, + verbose, + }) => { + cmd_db_eips(&opctx, &datastore, self.fetch_limit, *verbose) + .await + } } } } @@ -1098,6 +1128,156 @@ async fn cmd_db_dns_names( Ok(()) } +async fn cmd_db_eips( + opctx: &OpContext, + datastore: &DataStore, + limit: NonZeroU32, + verbose: bool, +) -> Result<(), anyhow::Error> { + use db::schema::external_ip::dsl; + let ips: Vec = dsl::external_ip + .filter(dsl::time_deleted.is_null()) + .select(ExternalIp::as_select()) + .get_results_async(&*datastore.pool_connection_for_tests().await?) + .await?; + + check_limit(&ips, limit, || String::from("listing external ips")); + + struct PortRange { + first: u16, + last: u16, + } + + impl Display for PortRange { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}/{}", self.first, self.last) + } + } + + #[derive(Tabled)] + enum Owner { + Instance { project: String, name: String }, + Service { kind: String }, + None, + } + + impl Display for Owner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Instance { project, name } => { + write!(f, "Instance {project}/{name}") + } + Self::Service { kind } => write!(f, "Service {kind}"), + Self::None => write!(f, "None"), + } + } + } + + #[derive(Tabled)] + struct IpRow { + ip: ipnetwork::IpNetwork, + ports: PortRange, + kind: String, + owner: Owner, + } + + if verbose { + for ip in &ips { + if verbose { + println!("{ip:#?}"); + } + } + return Ok(()); + } + + let mut rows = Vec::new(); + + for ip in &ips { + let owner = if let Some(owner_id) = ip.parent_id { + if ip.is_service { + let service = match LookupPath::new(opctx, datastore) + .service_id(owner_id) + .fetch() + .await + { + Ok(instance) => instance, + Err(e) => { + eprintln!( + "error looking up service with id {owner_id}: {e}" + ); + continue; + } + }; + Owner::Service { kind: format!("{:?}", service.1.kind) } + } else { + use db::schema::instance::dsl as instance_dsl; + let instance = match instance_dsl::instance + .filter(instance_dsl::id.eq(owner_id)) + .limit(1) + .select(Instance::as_select()) + .load_async(&*datastore.pool_connection_for_tests().await?) + .await + .context("loading requested instance")? + .pop() + { + Some(instance) => instance, + None => { + eprintln!("instance with id {owner_id} not found"); + continue; + } + }; + + use db::schema::project::dsl as project_dsl; + let project = match project_dsl::project + .filter(project_dsl::id.eq(instance.project_id)) + .limit(1) + .select(Project::as_select()) + .load_async(&*datastore.pool_connection_for_tests().await?) + .await + .context("loading requested project")? + .pop() + { + Some(instance) => instance, + None => { + eprintln!( + "project with id {} not found", + instance.project_id + ); + continue; + } + }; + + Owner::Instance { + project: project.name().to_string(), + name: instance.name().to_string(), + } + } + } else { + Owner::None + }; + + let row = IpRow { + ip: ip.ip, + ports: PortRange { + first: ip.first_port.into(), + last: ip.last_port.into(), + }, + kind: format!("{:?}", ip.kind), + owner, + }; + rows.push(row); + } + + rows.sort_by(|a, b| a.ip.cmp(&b.ip)); + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .to_string(); + + println!("{}", table); + + Ok(()) +} + fn print_name( prefix: &str, name: &str, diff --git a/dev-tools/omdb/src/bin/omdb/main.rs b/dev-tools/omdb/src/bin/omdb/main.rs index 166ed3043f..d1a56e1d80 100644 --- a/dev-tools/omdb/src/bin/omdb/main.rs +++ b/dev-tools/omdb/src/bin/omdb/main.rs @@ -42,6 +42,7 @@ use std::net::SocketAddrV6; mod db; mod nexus; +mod oximeter; mod sled_agent; #[tokio::main] @@ -57,6 +58,7 @@ async fn main() -> Result<(), anyhow::Error> { match &args.command { OmdbCommands::Db(db) => db.run_cmd(&args, &log).await, OmdbCommands::Nexus(nexus) => nexus.run_cmd(&args, &log).await, + OmdbCommands::Oximeter(oximeter) => oximeter.run_cmd(&log).await, OmdbCommands::SledAgent(sled) => sled.run_cmd(&args, &log).await, } } @@ -155,6 +157,8 @@ enum OmdbCommands { Db(db::DbArgs), /// Debug a specific Nexus instance Nexus(nexus::NexusArgs), + /// Query oximeter collector state + Oximeter(oximeter::OximeterArgs), /// Debug a specific Sled SledAgent(sled_agent::SledAgentArgs), } diff --git a/dev-tools/omdb/src/bin/omdb/oximeter.rs b/dev-tools/omdb/src/bin/omdb/oximeter.rs new file mode 100644 index 0000000000..e0f20556a2 --- /dev/null +++ b/dev-tools/omdb/src/bin/omdb/oximeter.rs @@ -0,0 +1,94 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! omdb commands that query oximeter + +use anyhow::Context; +use clap::Args; +use clap::Subcommand; +use futures::TryStreamExt; +use oximeter_client::types::ProducerEndpoint; +use oximeter_client::Client; +use slog::Logger; +use std::net::SocketAddr; +use std::time::Duration; +use tabled::Table; +use tabled::Tabled; +use uuid::Uuid; + +#[derive(Debug, Args)] +pub struct OximeterArgs { + /// URL of the oximeter collector to query + #[arg(long, env("OMDB_OXIMETER_URL"))] + oximeter_url: String, + + #[command(subcommand)] + command: OximeterCommands, +} + +/// Subcommands that query oximeter collector state +#[derive(Debug, Subcommand)] +enum OximeterCommands { + /// List the producers the collector is assigned to poll + ListProducers, +} + +impl OximeterArgs { + fn client(&self, log: &Logger) -> Client { + Client::new( + &self.oximeter_url, + log.new(slog::o!("component" => "oximeter-client")), + ) + } + + pub async fn run_cmd(&self, log: &Logger) -> anyhow::Result<()> { + let client = self.client(log); + match self.command { + OximeterCommands::ListProducers => { + self.list_producers(client).await + } + } + } + + async fn list_producers(&self, client: Client) -> anyhow::Result<()> { + let info = client + .collector_info() + .await + .context("failed to fetch collector info")?; + let producers: Vec = client + .producers_list_stream(None) + .map_ok(Producer::from) + .try_collect() + .await + .context("failed to list producers")?; + let table = Table::new(producers) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + println!("Collector ID: {}\n", info.id); + println!("{table}"); + Ok(()) + } +} + +#[derive(Tabled)] +#[tabled(rename_all = "SCREAMING_SNAKE_CASE")] +struct Producer { + id: Uuid, + address: SocketAddr, + base_route: String, + interval: String, +} + +impl From for Producer { + fn from(p: ProducerEndpoint) -> Self { + let interval = Duration::new(p.interval.secs, p.interval.nanos); + Self { + id: p.id, + address: p.address.parse().unwrap(), + base_route: p.base_route, + interval: humantime::format_duration(interval).to_string(), + } + } +} diff --git a/dev-tools/omdb/tests/test_all_output.rs b/dev-tools/omdb/tests/test_all_output.rs index 0eddcb492c..d757369ead 100644 --- a/dev-tools/omdb/tests/test_all_output.rs +++ b/dev-tools/omdb/tests/test_all_output.rs @@ -41,6 +41,7 @@ async fn test_omdb_usage_errors() { &["db", "dns", "diff"], &["db", "dns", "names"], &["db", "services"], + &["db", "network"], &["nexus"], &["nexus", "background-tasks"], &["sled-agent"], diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index 136a631e80..dc2a16bc47 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -11,6 +11,7 @@ Usage: omdb [OPTIONS] Commands: db Query the control plane database (CockroachDB) nexus Debug a specific Nexus instance + oximeter Query oximeter collector state sled-agent Debug a specific Sled help Print this message or the help of the given subcommand(s) @@ -33,6 +34,7 @@ Usage: omdb [OPTIONS] Commands: db Query the control plane database (CockroachDB) nexus Debug a specific Nexus instance + oximeter Query oximeter collector state sled-agent Debug a specific Sled help Print this message or the help of the given subcommand(s) @@ -91,6 +93,7 @@ Commands: services Print information about control plane services sleds Print information about sleds instances Print information about customer instances + network Print information about the network help Print this message or the help of the given subcommand(s) Options: @@ -112,6 +115,7 @@ Commands: services Print information about control plane services sleds Print information about sleds instances Print information about customer instances + network Print information about the network help Print this message or the help of the given subcommand(s) Options: @@ -186,6 +190,24 @@ Commands: Options: -h, --help Print help ============================================= +EXECUTING COMMAND: omdb ["db", "network"] +termination: Exited(2) +--------------------------------------------- +stdout: +--------------------------------------------- +stderr: +Print information about the network + +Usage: omdb db network [OPTIONS] + +Commands: + list-eips List external IPs + help Print this message or the help of the given subcommand(s) + +Options: + --verbose Print out raw data structures from the data store + -h, --help Print help +============================================= EXECUTING COMMAND: omdb ["nexus"] termination: Exited(2) --------------------------------------------- diff --git a/dev-tools/omicron-dev/Cargo.toml b/dev-tools/omicron-dev/Cargo.toml index 95da4d42ef..5439b69c76 100644 --- a/dev-tools/omicron-dev/Cargo.toml +++ b/dev-tools/omicron-dev/Cargo.toml @@ -28,7 +28,7 @@ signal-hook-tokio.workspace = true tokio = { workspace = true, features = [ "full" ] } tokio-postgres.workspace = true toml.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] camino-tempfile.workspace = true diff --git a/deploy/.gitignore b/dev-tools/thing-flinger/.gitignore similarity index 100% rename from deploy/.gitignore rename to dev-tools/thing-flinger/.gitignore diff --git a/deploy/Cargo.toml b/dev-tools/thing-flinger/Cargo.toml similarity index 85% rename from deploy/Cargo.toml rename to dev-tools/thing-flinger/Cargo.toml index 17bacd6354..1a6c05a546 100644 --- a/deploy/Cargo.toml +++ b/dev-tools/thing-flinger/Cargo.toml @@ -14,7 +14,7 @@ serde.workspace = true serde_derive.workspace = true thiserror.workspace = true toml.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [[bin]] name = "thing-flinger" diff --git a/deploy/README.adoc b/dev-tools/thing-flinger/README.adoc similarity index 100% rename from deploy/README.adoc rename to dev-tools/thing-flinger/README.adoc diff --git a/deploy/src/bin/deployment-example.toml b/dev-tools/thing-flinger/src/bin/deployment-example.toml similarity index 100% rename from deploy/src/bin/deployment-example.toml rename to dev-tools/thing-flinger/src/bin/deployment-example.toml diff --git a/deploy/src/bin/thing-flinger.rs b/dev-tools/thing-flinger/src/bin/thing-flinger.rs similarity index 100% rename from deploy/src/bin/thing-flinger.rs rename to dev-tools/thing-flinger/src/bin/thing-flinger.rs diff --git a/dev-tools/xtask/src/main.rs b/dev-tools/xtask/src/main.rs index 3e52d742f5..93d91799bc 100644 --- a/dev-tools/xtask/src/main.rs +++ b/dev-tools/xtask/src/main.rs @@ -133,12 +133,6 @@ fn cmd_check_workspace_deps() -> Result<()> { } } - if name == WORKSPACE_HACK_PACKAGE_NAME { - // Skip over workspace-hack because hakari doesn't yet support - // workspace deps: https://github.com/guppy-rs/guppy/issues/7 - continue; - } - non_workspace_dependencies .entry(name.to_owned()) .or_insert_with(Vec::new) diff --git a/dns-server/Cargo.toml b/dns-server/Cargo.toml index d7606dcff5..f91cbfafdb 100644 --- a/dns-server/Cargo.toml +++ b/dns-server/Cargo.toml @@ -30,7 +30,7 @@ trust-dns-proto.workspace = true trust-dns-resolver.workspace = true trust-dns-server.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/docs/how-to-run.adoc b/docs/how-to-run.adoc index 1988a42669..04d274da8b 100644 --- a/docs/how-to-run.adoc +++ b/docs/how-to-run.adoc @@ -143,7 +143,10 @@ $ svcadm enable ipfilter Other network configurations are possible but beyond the scope of this doc. -When making this choice, note that **in order to use the system once it's set up, you will need to be able to access it from a web browser.** If you go with option 2 here, you may need to use an ssh tunnel or the like to do this. +When making this choice, note that **in order to use the system once it's set +up, you will need to be able to access it from a web browser.** If you go with +option 2 here, you may need to use an SSH tunnel (see: +<>) or the like to do this. === Picking a "machine" type @@ -321,10 +324,32 @@ Error: Creates a new build target, and sets it as "active" Usage: omicron-package target create [OPTIONS] Options: - -i, --image [default: standard] [possible values: standard, trampoline] - -m, --machine [possible values: gimlet, gimlet-standalone, non-gimlet] - -s, --switch [possible values: asic, stub, softnpu] - -h, --help Print help (see more with '--help') + -i, --image + [default: standard] + + Possible values: + - standard: A typical host OS image + - trampoline: A recovery host OS image, intended to bootstrap a Standard image + + -m, --machine + Possible values: + - gimlet: Use sled agent configuration for a Gimlet + - gimlet-standalone: Use sled agent configuration for a Gimlet running in isolation + - non-gimlet: Use sled agent configuration for a device emulating a Gimlet + + -s, --switch + Possible values: + - asic: Use the "real" Dendrite, that attempts to interact with the Tofino + - stub: Use a "stub" Dendrite that does not require any real hardware + - softnpu: Use a "softnpu" Dendrite that uses the SoftNPU asic emulator + + -r, --rack-topology + Possible values: + - multi-sled: Use configurations suitable for a multi-sled deployment, such as dogfood and production racks + - single-sled: Use configurations suitable for a single-sled deployment, such as CI and dev machines + + -h, --help + Print help (see a summary with '-h') ---- @@ -332,9 +357,9 @@ To set up a build target for a non-Gimlet machine with simulated (but fully func [source,console] ---- -$ cargo run --release --bin omicron-package -- -t default target create -i standard -m non-gimlet -s softnpu +$ cargo run --release --bin omicron-package -- -t default target create -i standard -m non-gimlet -s softnpu -r single-sled Finished release [optimized] target(s) in 0.66s - Running `target/release/omicron-package -t default target create -i standard -m non-gimlet -s softnpu` + Running `target/release/omicron-package -t default target create -i standard -m non-gimlet -s softnpu -r single-sled` Created new build target 'default' and set it as active ---- @@ -411,7 +436,32 @@ Where did 192.168.1.20 come from? That's the external address of the external DNS server. We knew that because it's listed in the `external_dns_ips` entry of the `config-rss.toml` file we're using. -Having looked this up, the easiest thing will be to use `http://192.168.1.21` for your URL (replacing with `https` if you used a certificate, and replacing that IP if needed). If you've set up networking right, you should be able to reach this from your web browser. You may have to instruct the browser to accept a self-signed TLS certificate. See also <<_connecting_securely_with_tls_using_the_cli>>. +Having looked this up, the easiest thing will be to use `http://192.168.1.21` for your URL (replacing with `https` if you used a certificate, and replacing that IP if needed). If you've set up networking right, you should be able to reach this from your web browser. You may have to instruct the browser to accept a self-signed TLS certificate. See also <>. + +=== Setting up an SSH tunnel for console access + +If you set up a fake external network (method 2 in <>), one +way to be able to access the console of your deployment is by setting up an SSH +tunnel. Console access is required to use the CLI for device authentication. +The following is an example of how to access the console with an SSH tunnel. + +Nexus serves the console, so first get a nexus IP from the instructions above. + +In this example, Omicron is running on the lab machine `dunkin`. Usually, you'll +want to set up the tunnel from the machine where you run a browser, to the +machine running Omicron. In this example, one would run this on the machine +running the browser: + +``` +$ ssh -L 1234:192.168.1.22:80 dunkin.eng.oxide.computer +``` + +The above command configures `ssh` to bind to the TCP port `1234` on the machine +running the browser, forward packets through the ssh connection, and redirect +them to 192.168.1.22 port 80 *as seen from the other side of the connection*. + +Now you should be able to access the console from the browser on this machine, +via something like: `127.0.0.1:1234`, using the port from the `ssh` command. === Using the CLI @@ -675,3 +725,37 @@ To build a recovery host image: ---- $ ./tools/build-host-image.sh -R $HELIOS_PATH /work/trampoline-global-zone-packages.tar.gz ---- + + +== Running `oximeter` in standalone mode + +`oximeter` is the program used to collect metrics from producers in the control +plane. Normally, the producers register themselves with `nexus`, which creates a +durable assignment between the producer and an `oximeter` collector in the +database. That allows components to survive restarts, while still producing +metrics. + +To ease development, `oximeter` can be run in "standalone" mode. In this case, a +mock `nexus` server is started, with only the minimal subset of the internal API +needed to register producers and collectors. Neither CockroachDB nor ClickHouse +is required, although ClickHouse _can_ be used, if one wants to see how data is +inserted into the database. + +To run `oximeter` in standalone, use: + +[source,console] +---- +$ cargo run --bin oximeter -- standalone +---- + +The producer should still register with `nexus` as normal, which is usually done +with an explicit IP address and port. This defaults to `[::1]:12221`. + +When run this way, `oximeter` will print the samples it collects from the +producers to its logs, like so: + +[source,console] +---- +Sep 26 17:48:56.006 INFO sample: Sample { measurement: Measurement { timestamp: 2023-09-26T17:48:56.004565890Z, datum: CumulativeF64(Cumulative { start_time: 2023-09-26T17:48:45.997404777Z, value: 10.007154703 }) }, timeseries_name: "virtual_machine:cpu_busy", target: FieldSet { name: "virtual_machine", fields: {"instance_id": Field { name: "instance_id", value: Uuid(564ef6df-d5f6-4204-88f7-5c615859cfa7) }, "project_id": Field { name: "project_id", value: Uuid(2dc7e1c9-f8ac-49d7-8292-46e9e2b1a61d) }} }, metric: FieldSet { name: "cpu_busy", fields: {"cpu_id": Field { name: "cpu_id", value: I64(0) }} } }, component: results-sink, collector_id: 78c7c9a5-1569-460a-8899-aada9ad5db6c, component: oximeter-standalone, component: nexus-standalone, file: oximeter/collector/src/lib.rs:280 +Sep 26 17:48:56.006 INFO sample: Sample { measurement: Measurement { timestamp: 2023-09-26T17:48:56.004700841Z, datum: CumulativeF64(Cumulative { start_time: 2023-09-26T17:48:45.997405187Z, value: 10.007154703 }) }, timeseries_name: "virtual_machine:cpu_busy", target: FieldSet { name: "virtual_machine", fields: {"instance_id": Field { name: "instance_id", value: Uuid(564ef6df-d5f6-4204-88f7-5c615859cfa7) }, "project_id": Field { name: "project_id", value: Uuid(2dc7e1c9-f8ac-49d7-8292-46e9e2b1a61d) }} }, metric: FieldSet { name: "cpu_busy", fields: {"cpu_id": Field { name: "cpu_id", value: I64(1) }} } }, component: results-sink, collector_id: 78c7c9a5-1569-460a-8899-aada9ad5db6c, component: oximeter-standalone, component: nexus-standalone, file: oximeter/collector/src/lib.rs:280 +---- diff --git a/end-to-end-tests/Cargo.toml b/end-to-end-tests/Cargo.toml index 5ff0f9b377..732a4a2091 100644 --- a/end-to-end-tests/Cargo.toml +++ b/end-to-end-tests/Cargo.toml @@ -24,4 +24,4 @@ tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } toml.workspace = true trust-dns-resolver.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/gateway-cli/Cargo.toml b/gateway-cli/Cargo.toml index 0d179750ea..ba66fa4c4f 100644 --- a/gateway-cli/Cargo.toml +++ b/gateway-cli/Cargo.toml @@ -24,4 +24,4 @@ uuid.workspace = true gateway-client.workspace = true gateway-messages.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/gateway-test-utils/Cargo.toml b/gateway-test-utils/Cargo.toml index 9d80e63f05..81b7686eb2 100644 --- a/gateway-test-utils/Cargo.toml +++ b/gateway-test-utils/Cargo.toml @@ -14,4 +14,4 @@ slog.workspace = true sp-sim.workspace = true tokio.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/gateway/Cargo.toml b/gateway/Cargo.toml index f5abce88e9..07934a6ad3 100644 --- a/gateway/Cargo.toml +++ b/gateway/Cargo.toml @@ -34,7 +34,7 @@ tokio-tungstenite.workspace = true tokio-util.workspace = true toml.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/illumos-utils/Cargo.toml b/illumos-utils/Cargo.toml index e292097bc5..e521b54d02 100644 --- a/illumos-utils/Cargo.toml +++ b/illumos-utils/Cargo.toml @@ -29,7 +29,7 @@ zone.workspace = true # only enabled via the `testing` feature mockall = { workspace = true, optional = true } -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [target.'cfg(target_os = "illumos")'.dependencies] opte-ioctl.workspace = true diff --git a/illumos-utils/src/running_zone.rs b/illumos-utils/src/running_zone.rs index 4d3481b6c3..734f22bd30 100644 --- a/illumos-utils/src/running_zone.rs +++ b/illumos-utils/src/running_zone.rs @@ -987,7 +987,7 @@ impl RunningZone { let output = self.run_cmd(&["svcs", "-H", "-o", "fmri"])?; Ok(output .lines() - .filter(|line| is_oxide_smf_log_file(line)) + .filter(|line| is_oxide_smf_service(line)) .map(|line| line.trim().to_string()) .collect()) } @@ -1267,10 +1267,51 @@ impl InstalledZone { } } -/// Return true if the named file appears to be a log file for an Oxide SMF -/// service. -pub fn is_oxide_smf_log_file(name: impl AsRef) -> bool { - const SMF_SERVICE_PREFIXES: [&str; 2] = ["/oxide", "/system/illumos"]; - let name = name.as_ref(); - SMF_SERVICE_PREFIXES.iter().any(|needle| name.contains(needle)) +/// Return true if the service with the given FMRI appears to be an +/// Oxide-managed service. +pub fn is_oxide_smf_service(fmri: impl AsRef) -> bool { + const SMF_SERVICE_PREFIXES: [&str; 2] = + ["svc:/oxide/", "svc:/system/illumos/"]; + let fmri = fmri.as_ref(); + SMF_SERVICE_PREFIXES.iter().any(|prefix| fmri.starts_with(prefix)) +} + +/// Return true if the provided file name appears to be a valid log file for an +/// Oxide-managed SMF service. +/// +/// Note that this operates on the _file name_. Any leading path components will +/// cause this check to return `false`. +pub fn is_oxide_smf_log_file(filename: impl AsRef) -> bool { + // Log files are named by the SMF services, with the `/` in the FMRI + // translated to a `-`. + const PREFIXES: [&str; 2] = ["oxide-", "system-illumos-"]; + let filename = filename.as_ref(); + PREFIXES + .iter() + .any(|prefix| filename.starts_with(prefix) && filename.contains(".log")) +} + +#[cfg(test)] +mod tests { + use super::is_oxide_smf_log_file; + use super::is_oxide_smf_service; + + #[test] + fn test_is_oxide_smf_service() { + assert!(is_oxide_smf_service("svc:/oxide/blah:default")); + assert!(is_oxide_smf_service("svc:/system/illumos/blah:default")); + assert!(!is_oxide_smf_service("svc:/system/blah:default")); + assert!(!is_oxide_smf_service("svc:/not/oxide/blah:default")); + } + + #[test] + fn test_is_oxide_smf_log_file() { + assert!(is_oxide_smf_log_file("oxide-blah:default.log")); + assert!(is_oxide_smf_log_file("oxide-blah:default.log.0")); + assert!(is_oxide_smf_log_file("oxide-blah:default.log.1111")); + assert!(is_oxide_smf_log_file("system-illumos-blah:default.log")); + assert!(is_oxide_smf_log_file("system-illumos-blah:default.log.0")); + assert!(!is_oxide_smf_log_file("not-oxide-blah:default.log")); + assert!(!is_oxide_smf_log_file("not-system-illumos-blah:default.log")); + } } diff --git a/installinator-artifactd/Cargo.toml b/installinator-artifactd/Cargo.toml index 9318b725db..b14ca4002f 100644 --- a/installinator-artifactd/Cargo.toml +++ b/installinator-artifactd/Cargo.toml @@ -20,7 +20,7 @@ uuid.workspace = true installinator-common.workspace = true omicron-common.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/installinator-common/Cargo.toml b/installinator-common/Cargo.toml index 0f1bf86901..8fea234e20 100644 --- a/installinator-common/Cargo.toml +++ b/installinator-common/Cargo.toml @@ -15,4 +15,4 @@ serde_json.workspace = true serde_with.workspace = true thiserror.workspace = true update-engine.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/installinator/Cargo.toml b/installinator/Cargo.toml index 3b2f04c38f..a4f170ddba 100644 --- a/installinator/Cargo.toml +++ b/installinator/Cargo.toml @@ -42,7 +42,7 @@ toml.workspace = true tufaceous-lib.workspace = true update-engine.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] omicron-test-utils.workspace = true @@ -57,3 +57,4 @@ tokio-stream.workspace = true [features] image-standard = [] image-trampoline = [] +rack-topology-single-sled = [] diff --git a/internal-dns-cli/Cargo.toml b/internal-dns-cli/Cargo.toml index fb5780d22a..dab92c6d7c 100644 --- a/internal-dns-cli/Cargo.toml +++ b/internal-dns-cli/Cargo.toml @@ -13,4 +13,4 @@ omicron-common.workspace = true slog.workspace = true tokio.workspace = true trust-dns-resolver.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/internal-dns/Cargo.toml b/internal-dns/Cargo.toml index d680ab3ce1..ecb2d48bda 100644 --- a/internal-dns/Cargo.toml +++ b/internal-dns/Cargo.toml @@ -17,7 +17,7 @@ thiserror.workspace = true trust-dns-proto.workspace = true trust-dns-resolver.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] assert_matches.workspace = true diff --git a/ipcc-key-value/Cargo.toml b/ipcc-key-value/Cargo.toml index 128fde9a01..04aea9f939 100644 --- a/ipcc-key-value/Cargo.toml +++ b/ipcc-key-value/Cargo.toml @@ -11,7 +11,7 @@ omicron-common.workspace = true serde.workspace = true thiserror.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] omicron-common = { workspace = true, features = ["testing"] } diff --git a/key-manager/Cargo.toml b/key-manager/Cargo.toml index 69ae3b25bd..c44ec61ea4 100644 --- a/key-manager/Cargo.toml +++ b/key-manager/Cargo.toml @@ -14,5 +14,5 @@ slog.workspace = true thiserror.workspace = true tokio.workspace = true zeroize.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 91872e2c32..3de6dac7c0 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -90,7 +90,7 @@ oximeter.workspace = true oximeter-instruments = { workspace = true, features = ["http-instruments"] } oximeter-producer.workspace = true rustls = { workspace = true } -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] async-bb8-diesel.workspace = true diff --git a/nexus/authz-macros/Cargo.toml b/nexus/authz-macros/Cargo.toml index 3d55afa477..15f18cb9c8 100644 --- a/nexus/authz-macros/Cargo.toml +++ b/nexus/authz-macros/Cargo.toml @@ -14,4 +14,4 @@ quote.workspace = true serde.workspace = true serde_tokenstream.workspace = true syn.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/nexus/db-macros/Cargo.toml b/nexus/db-macros/Cargo.toml index ce206bb56e..053c381ac9 100644 --- a/nexus/db-macros/Cargo.toml +++ b/nexus/db-macros/Cargo.toml @@ -15,7 +15,7 @@ quote.workspace = true serde.workspace = true serde_tokenstream.workspace = true syn = { workspace = true, features = ["extra-traits"] } -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] rustfmt-wrapper.workspace = true diff --git a/nexus/db-model/Cargo.toml b/nexus/db-model/Cargo.toml index aedbb9168b..a5cb9a06be 100644 --- a/nexus/db-model/Cargo.toml +++ b/nexus/db-model/Cargo.toml @@ -36,7 +36,7 @@ nexus-defaults.workspace = true nexus-types.workspace = true omicron-passwords.workspace = true sled-agent-client.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/nexus/db-model/src/queries/region_allocation.rs b/nexus/db-model/src/queries/region_allocation.rs index 43fac3c9a6..2025e79fb8 100644 --- a/nexus/db-model/src/queries/region_allocation.rs +++ b/nexus/db-model/src/queries/region_allocation.rs @@ -47,6 +47,13 @@ table! { } } +table! { + shuffled_candidate_datasets { + id -> Uuid, + pool_id -> Uuid, + } +} + table! { candidate_regions { id -> Uuid, @@ -89,6 +96,19 @@ table! { } } +table! { + one_zpool_per_sled (pool_id) { + pool_id -> Uuid + } +} + +table! { + one_dataset_per_zpool { + id -> Uuid, + pool_id -> Uuid + } +} + table! { inserted_regions { id -> Uuid, @@ -141,6 +161,7 @@ diesel::allow_tables_to_appear_in_same_query!( ); diesel::allow_tables_to_appear_in_same_query!(old_regions, dataset,); +diesel::allow_tables_to_appear_in_same_query!(old_regions, zpool,); diesel::allow_tables_to_appear_in_same_query!( inserted_regions, @@ -149,6 +170,7 @@ diesel::allow_tables_to_appear_in_same_query!( diesel::allow_tables_to_appear_in_same_query!(candidate_zpools, dataset,); diesel::allow_tables_to_appear_in_same_query!(candidate_zpools, zpool,); +diesel::allow_tables_to_appear_in_same_query!(candidate_datasets, dataset); // == Needed for random region allocation == diff --git a/nexus/db-queries/Cargo.toml b/nexus/db-queries/Cargo.toml index af01c1732b..eaf3dc1295 100644 --- a/nexus/db-queries/Cargo.toml +++ b/nexus/db-queries/Cargo.toml @@ -63,7 +63,7 @@ nexus-types.workspace = true omicron-common.workspace = true omicron-passwords.workspace = true oximeter.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] assert_matches.workspace = true diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index ff1df710bb..b1f3203c60 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -307,43 +307,6 @@ pub enum UpdatePrecondition { Value(T), } -/// Defines a strategy for choosing what physical disks to use when allocating -/// new crucible regions. -/// -/// NOTE: More strategies can - and should! - be added. -/// -/// See for a more -/// complete discussion. -/// -/// Longer-term, we should consider: -/// - Storage size + remaining free space -/// - Sled placement of datasets -/// - What sort of loads we'd like to create (even split across all disks -/// may not be preferable, especially if maintenance is expected) -#[derive(Debug, Clone)] -pub enum RegionAllocationStrategy { - /// Choose disks that have the least data usage in the rack. This strategy - /// can lead to bad failure states wherein the disks with the least usage - /// have the least usage because regions on them are actually failing in - /// some way. Further retried allocations will then continue to try to - /// allocate onto the disk, perpetuating the problem. Currently this - /// strategy only exists so we can test that using different allocation - /// strategies actually results in different allocation patterns, hence the - /// `#[cfg(test)]`. - /// - /// See https://github.com/oxidecomputer/omicron/issues/3416 for more on the - /// failure-states associated with this strategy - #[cfg(test)] - LeastUsedDisk, - - /// Choose disks pseudo-randomly. An optional seed may be provided to make - /// the ordering deterministic, otherwise the current time in nanoseconds - /// will be used. Ordering is based on sorting the output of `md5(UUID of - /// candidate dataset + seed)`. The seed does not need to come from a - /// cryptographically secure source. - Random(Option), -} - /// Constructs a DataStore for use in test suites that has preloaded the /// built-in users, roles, and role assignments that are needed for basic /// operation @@ -421,7 +384,9 @@ mod test { use omicron_common::api::external::{ self, ByteCount, Error, IdentityMetadataCreateParams, LookupType, Name, }; + use omicron_common::nexus_config::RegionAllocationStrategy; use omicron_test_utils::dev; + use std::collections::HashMap; use std::collections::HashSet; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddrV6}; use std::num::NonZeroU32; @@ -704,12 +669,18 @@ mod test { } } + struct TestDataset { + sled_id: Uuid, + dataset_id: Uuid, + } + async fn create_test_datasets_for_region_allocation( opctx: &OpContext, datastore: Arc, - ) -> Vec { + number_of_sleds: usize, + ) -> Vec { // Create sleds... - let sled_ids: Vec = stream::iter(0..REGION_REDUNDANCY_THRESHOLD) + let sled_ids: Vec = stream::iter(0..number_of_sleds) .then(|_| create_test_sled(&datastore)) .collect() .await; @@ -740,48 +711,69 @@ mod test { .collect() .await; + #[derive(Copy, Clone)] + struct Zpool { + sled_id: Uuid, + pool_id: Uuid, + } + // 1 pool per disk - let zpool_ids: Vec = stream::iter(physical_disks) + let zpools: Vec = stream::iter(physical_disks) .then(|disk| { - create_test_zpool(&datastore, disk.sled_id, disk.disk_id) + let pool_id_future = + create_test_zpool(&datastore, disk.sled_id, disk.disk_id); + async move { + let pool_id = pool_id_future.await; + Zpool { sled_id: disk.sled_id, pool_id } + } }) .collect() .await; let bogus_addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 8080, 0, 0); - // 1 dataset per zpool - let dataset_ids: Vec = stream::iter(zpool_ids) - .then(|zpool_id| { - let id = Uuid::new_v4(); - let dataset = Dataset::new( - id, - zpool_id, - bogus_addr, - DatasetKind::Crucible, - ); - let datastore = datastore.clone(); - async move { - datastore.dataset_upsert(dataset).await.unwrap(); - id - } + let datasets: Vec = stream::iter(zpools) + .map(|zpool| { + // 3 datasets per zpool, to test that pools are distinct + let zpool_iter: Vec = (0..3).map(|_| zpool).collect(); + stream::iter(zpool_iter).then(|zpool| { + let id = Uuid::new_v4(); + let dataset = Dataset::new( + id, + zpool.pool_id, + bogus_addr, + DatasetKind::Crucible, + ); + + let datastore = datastore.clone(); + async move { + datastore.dataset_upsert(dataset).await.unwrap(); + + TestDataset { sled_id: zpool.sled_id, dataset_id: id } + } + }) }) + .flatten() .collect() .await; - dataset_ids + datasets } #[tokio::test] /// Note that this test is currently non-deterministic. It can be made /// deterministic by generating deterministic *dataset* Uuids. The sled and /// pool IDs should not matter. - async fn test_region_allocation() { - let logctx = dev::test_setup_log("test_region_allocation"); + async fn test_region_allocation_strat_random() { + let logctx = dev::test_setup_log("test_region_allocation_strat_random"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - create_test_datasets_for_region_allocation(&opctx, datastore.clone()) - .await; + create_test_datasets_for_region_allocation( + &opctx, + datastore.clone(), + REGION_REDUNDANCY_THRESHOLD, + ) + .await; // Allocate regions from the datasets for this disk. Do it a few times // for good measure. @@ -799,7 +791,9 @@ mod test { volume_id, ¶ms.disk_source, params.size, - &RegionAllocationStrategy::Random(Some(alloc_seed as u128)), + &RegionAllocationStrategy::Random { + seed: Some(alloc_seed), + }, ) .await .unwrap(); @@ -809,8 +803,81 @@ mod test { let mut disk_datasets = HashSet::new(); let mut disk_zpools = HashSet::new(); - // TODO: When allocation chooses 3 distinct sleds, uncomment this. - // let mut disk1_sleds = HashSet::new(); + for (dataset, region) in dataset_and_regions { + // Must be 3 unique datasets + assert!(disk_datasets.insert(dataset.id())); + + // Must be 3 unique zpools + assert!(disk_zpools.insert(dataset.pool_id)); + + assert_eq!(volume_id, region.volume_id()); + assert_eq!(ByteCount::from(4096), region.block_size()); + let (_, extent_count) = DataStore::get_crucible_allocation( + &BlockSize::AdvancedFormat, + params.size, + ); + assert_eq!(extent_count, region.extent_count()); + } + } + + let _ = db.cleanup().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + /// Test the [`RegionAllocationStrategy::RandomWithDistinctSleds`] strategy. + /// It should always pick datasets where no two datasets are on the same + /// zpool and no two zpools are on the same sled. + async fn test_region_allocation_strat_random_with_distinct_sleds() { + let logctx = dev::test_setup_log( + "test_region_allocation_strat_random_with_distinct_sleds", + ); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // Create a rack without enough sleds for a successful allocation when + // we require 3 distinct sleds. + let test_datasets = create_test_datasets_for_region_allocation( + &opctx, + datastore.clone(), + REGION_REDUNDANCY_THRESHOLD, + ) + .await; + + // We need to check that our datasets end up on 3 distinct sleds, but the query doesn't return the sled ID, so we need to reverse map from dataset ID to sled ID + let sled_id_map: HashMap = test_datasets + .into_iter() + .map(|test_dataset| (test_dataset.dataset_id, test_dataset.sled_id)) + .collect(); + + // Allocate regions from the datasets for this disk. Do it a few times + // for good measure. + for alloc_seed in 0..10 { + let params = create_test_disk_create_params( + &format!("disk{}", alloc_seed), + ByteCount::from_mebibytes_u32(1), + ); + let volume_id = Uuid::new_v4(); + + let expected_region_count = REGION_REDUNDANCY_THRESHOLD; + let dataset_and_regions = datastore + .region_allocate( + &opctx, + volume_id, + ¶ms.disk_source, + params.size, + &&RegionAllocationStrategy::RandomWithDistinctSleds { + seed: Some(alloc_seed), + }, + ) + .await + .unwrap(); + + // Verify the allocation. + assert_eq!(expected_region_count, dataset_and_regions.len()); + let mut disk_datasets = HashSet::new(); + let mut disk_zpools = HashSet::new(); + let mut disk_sleds = HashSet::new(); for (dataset, region) in dataset_and_regions { // Must be 3 unique datasets assert!(disk_datasets.insert(dataset.id())); @@ -819,8 +886,8 @@ mod test { assert!(disk_zpools.insert(dataset.pool_id)); // Must be 3 unique sleds - // TODO: When allocation chooses 3 distinct sleds, uncomment this. - // assert!(disk1_sleds.insert(Err(dataset))); + let sled_id = sled_id_map.get(&dataset.id()).unwrap(); + assert!(disk_sleds.insert(*sled_id)); assert_eq!(volume_id, region.volume_id()); assert_eq!(ByteCount::from(4096), region.block_size()); @@ -836,14 +903,72 @@ mod test { logctx.cleanup_successful(); } + #[tokio::test] + /// Ensure the [`RegionAllocationStrategy::RandomWithDistinctSleds`] + /// strategy fails when there aren't enough distinct sleds. + async fn test_region_allocation_strat_random_with_distinct_sleds_fails() { + let logctx = dev::test_setup_log( + "test_region_allocation_strat_random_with_distinct_sleds_fails", + ); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // Create a rack without enough sleds for a successful allocation when + // we require 3 distinct sleds. + create_test_datasets_for_region_allocation( + &opctx, + datastore.clone(), + REGION_REDUNDANCY_THRESHOLD - 1, + ) + .await; + + // Allocate regions from the datasets for this disk. Do it a few times + // for good measure. + for alloc_seed in 0..10 { + let params = create_test_disk_create_params( + &format!("disk{}", alloc_seed), + ByteCount::from_mebibytes_u32(1), + ); + let volume_id = Uuid::new_v4(); + + let err = datastore + .region_allocate( + &opctx, + volume_id, + ¶ms.disk_source, + params.size, + &&RegionAllocationStrategy::RandomWithDistinctSleds { + seed: Some(alloc_seed), + }, + ) + .await + .unwrap_err(); + + let expected = "Not enough zpool space to allocate disks"; + assert!( + err.to_string().contains(expected), + "Saw error: \'{err}\', but expected \'{expected}\'" + ); + + assert!(matches!(err, Error::ServiceUnavailable { .. })); + } + + let _ = db.cleanup().await; + logctx.cleanup_successful(); + } + #[tokio::test] async fn test_region_allocation_is_idempotent() { let logctx = dev::test_setup_log("test_region_allocation_is_idempotent"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - create_test_datasets_for_region_allocation(&opctx, datastore.clone()) - .await; + create_test_datasets_for_region_allocation( + &opctx, + datastore.clone(), + REGION_REDUNDANCY_THRESHOLD, + ) + .await; // Allocate regions from the datasets for this volume. let params = create_test_disk_create_params( @@ -857,7 +982,7 @@ mod test { volume_id, ¶ms.disk_source, params.size, - &RegionAllocationStrategy::Random(Some(0)), + &RegionAllocationStrategy::Random { seed: Some(0) }, ) .await .unwrap(); @@ -870,7 +995,7 @@ mod test { volume_id, ¶ms.disk_source, params.size, - &RegionAllocationStrategy::Random(Some(1)), + &RegionAllocationStrategy::Random { seed: Some(1) }, ) .await .unwrap(); @@ -959,7 +1084,7 @@ mod test { volume1_id, ¶ms.disk_source, params.size, - &RegionAllocationStrategy::Random(Some(0)), + &RegionAllocationStrategy::Random { seed: Some(0) }, ) .await .unwrap_err(); @@ -983,8 +1108,12 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - create_test_datasets_for_region_allocation(&opctx, datastore.clone()) - .await; + create_test_datasets_for_region_allocation( + &opctx, + datastore.clone(), + REGION_REDUNDANCY_THRESHOLD, + ) + .await; let disk_size = test_zpool_size(); let alloc_size = ByteCount::try_from(disk_size.to_bytes() * 2).unwrap(); @@ -997,7 +1126,7 @@ mod test { volume1_id, ¶ms.disk_source, params.size, - &RegionAllocationStrategy::Random(Some(0)), + &RegionAllocationStrategy::Random { seed: Some(0) }, ) .await .is_err()); diff --git a/nexus/db-queries/src/db/datastore/region.rs b/nexus/db-queries/src/db/datastore/region.rs index 5bc79b9481..9465fe2792 100644 --- a/nexus/db-queries/src/db/datastore/region.rs +++ b/nexus/db-queries/src/db/datastore/region.rs @@ -5,7 +5,6 @@ //! [`DataStore`] methods on [`Region`]s. use super::DataStore; -use super::RegionAllocationStrategy; use super::RunnableQuery; use crate::context::OpContext; use crate::db; @@ -23,6 +22,7 @@ use omicron_common::api::external; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; use omicron_common::backoff::{self, BackoffError}; +use omicron_common::nexus_config::RegionAllocationStrategy; use slog::Logger; use uuid::Uuid; diff --git a/nexus/db-queries/src/db/queries/region_allocation.rs b/nexus/db-queries/src/db/queries/region_allocation.rs index b071ee3f44..7f7b2ea9bf 100644 --- a/nexus/db-queries/src/db/queries/region_allocation.rs +++ b/nexus/db-queries/src/db/queries/region_allocation.rs @@ -6,7 +6,6 @@ use crate::db::alias::ExpressionAlias; use crate::db::cast_uuid_as_bytea::CastUuidToBytea; -use crate::db::datastore::RegionAllocationStrategy; use crate::db::datastore::REGION_REDUNDANCY_THRESHOLD; use crate::db::model::{Dataset, DatasetKind, Region}; use crate::db::pool::DbConnection; @@ -24,10 +23,11 @@ use diesel::{ use nexus_db_model::queries::region_allocation::{ candidate_datasets, candidate_regions, candidate_zpools, cockroach_md5, do_insert, inserted_regions, old_regions, old_zpool_usage, - proposed_dataset_changes, updated_datasets, + proposed_dataset_changes, shuffled_candidate_datasets, updated_datasets, }; use nexus_db_model::schema; use omicron_common::api::external; +use omicron_common::nexus_config::RegionAllocationStrategy; const NOT_ENOUGH_DATASETS_SENTINEL: &'static str = "Not enough datasets"; const NOT_ENOUGH_ZPOOL_SPACE_SENTINEL: &'static str = "Not enough space"; @@ -53,7 +53,7 @@ pub fn from_diesel(e: async_bb8_diesel::ConnectionError) -> external::Error { } NOT_ENOUGH_ZPOOL_SPACE_SENTINEL => { return external::Error::unavail( - "Not enough zpool space to allocate disks", + "Not enough zpool space to allocate disks. There may not be enough disks with space for the requested region. You may also see this if your rack is in a degraded state, or you're running the default multi-rack topology configuration in a 1-sled development environment.", ); } NOT_ENOUGH_UNIQUE_ZPOOLS_SENTINEL => { @@ -91,6 +91,8 @@ impl OldRegions { /// This implicitly distinguishes between "M.2s" and "U.2s" -- Nexus needs to /// determine during dataset provisioning which devices should be considered for /// usage as Crucible storage. +/// +/// We select only one dataset from each zpool. #[derive(Subquery, QueryId)] #[subquery(name = candidate_datasets)] struct CandidateDatasets { @@ -98,71 +100,65 @@ struct CandidateDatasets { } impl CandidateDatasets { - fn new( - allocation_strategy: &RegionAllocationStrategy, - candidate_zpools: &CandidateZpools, - ) -> Self { + fn new(candidate_zpools: &CandidateZpools, seed: u128) -> Self { use crate::db::schema::dataset::dsl as dataset_dsl; use candidate_zpools::dsl as candidate_zpool_dsl; - let query = match allocation_strategy { - #[cfg(test)] - RegionAllocationStrategy::LeastUsedDisk => { - let query: Box< - dyn CteQuery, - > = Box::new( - dataset_dsl::dataset - .inner_join( - candidate_zpools - .query_source() - .on(dataset_dsl::pool_id - .eq(candidate_zpool_dsl::pool_id)), - ) - .filter(dataset_dsl::time_deleted.is_null()) - .filter(dataset_dsl::size_used.is_not_null()) - .filter(dataset_dsl::kind.eq(DatasetKind::Crucible)) - .order(dataset_dsl::size_used.asc()) - .limit(REGION_REDUNDANCY_THRESHOLD.try_into().unwrap()) - .select((dataset_dsl::id, dataset_dsl::pool_id)), - ); - query - } - RegionAllocationStrategy::Random(seed) => { - let seed = seed.unwrap_or_else(|| { - std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_nanos() - }); - - let seed_bytes = seed.to_le_bytes(); - - let query: Box< - dyn CteQuery, - > = Box::new( - dataset_dsl::dataset - .inner_join( - candidate_zpools - .query_source() - .on(dataset_dsl::pool_id - .eq(candidate_zpool_dsl::pool_id)), - ) - .filter(dataset_dsl::time_deleted.is_null()) - .filter(dataset_dsl::size_used.is_not_null()) - .filter(dataset_dsl::kind.eq(DatasetKind::Crucible)) - // We order by md5 to shuffle the ordering of the datasets. - // md5 has a uniform output distribution so it does the job. - .order(cockroach_md5::dsl::md5( + let seed_bytes = seed.to_le_bytes(); + + let query: Box> = + Box::new( + dataset_dsl::dataset + .inner_join(candidate_zpools.query_source().on( + dataset_dsl::pool_id.eq(candidate_zpool_dsl::pool_id), + )) + .filter(dataset_dsl::time_deleted.is_null()) + .filter(dataset_dsl::size_used.is_not_null()) + .filter(dataset_dsl::kind.eq(DatasetKind::Crucible)) + .distinct_on(dataset_dsl::pool_id) + .order_by(( + dataset_dsl::pool_id, + cockroach_md5::dsl::md5( CastUuidToBytea::new(dataset_dsl::id) .concat(seed_bytes.to_vec()), - )) - .select((dataset_dsl::id, dataset_dsl::pool_id)) - .limit(REGION_REDUNDANCY_THRESHOLD.try_into().unwrap()), - ); - query - } - }; + ), + )) + .select((dataset_dsl::id, dataset_dsl::pool_id)), + ); + Self { query } + } +} + +/// Shuffle the candidate datasets, and select REGION_REDUNDANCY_THRESHOLD +/// regions from it. +#[derive(Subquery, QueryId)] +#[subquery(name = shuffled_candidate_datasets)] +struct ShuffledCandidateDatasets { + query: Box>, +} +impl ShuffledCandidateDatasets { + fn new(candidate_datasets: &CandidateDatasets, seed: u128) -> Self { + use candidate_datasets::dsl as candidate_datasets_dsl; + + let seed_bytes = seed.to_le_bytes(); + + let query: Box> = + Box::new( + candidate_datasets + .query_source() + // We order by md5 to shuffle the ordering of the datasets. + // md5 has a uniform output distribution so it does the job. + .order(cockroach_md5::dsl::md5( + CastUuidToBytea::new(candidate_datasets_dsl::id) + .concat(seed_bytes.to_vec()), + )) + .select(( + candidate_datasets_dsl::id, + candidate_datasets_dsl::pool_id, + )) + .limit(REGION_REDUNDANCY_THRESHOLD.try_into().unwrap()), + ); Self { query } } } @@ -179,14 +175,14 @@ diesel::sql_function!(fn now() -> Timestamptz); impl CandidateRegions { fn new( - candidate_datasets: &CandidateDatasets, + shuffled_candidate_datasets: &ShuffledCandidateDatasets, volume_id: uuid::Uuid, block_size: u64, blocks_per_extent: u64, extent_count: u64, ) -> Self { - use candidate_datasets::dsl as candidate_datasets_dsl; use schema::region; + use shuffled_candidate_datasets::dsl as shuffled_candidate_datasets_dsl; let volume_id = volume_id.into_sql::(); let block_size = (block_size as i64).into_sql::(); @@ -195,20 +191,22 @@ impl CandidateRegions { let extent_count = (extent_count as i64).into_sql::(); Self { - query: Box::new(candidate_datasets.query_source().select(( - ExpressionAlias::new::(gen_random_uuid()), - ExpressionAlias::new::(now()), - ExpressionAlias::new::(now()), - ExpressionAlias::new::( - candidate_datasets_dsl::id, + query: Box::new(shuffled_candidate_datasets.query_source().select( + ( + ExpressionAlias::new::(gen_random_uuid()), + ExpressionAlias::new::(now()), + ExpressionAlias::new::(now()), + ExpressionAlias::new::( + shuffled_candidate_datasets_dsl::id, + ), + ExpressionAlias::new::(volume_id), + ExpressionAlias::new::(block_size), + ExpressionAlias::new::( + blocks_per_extent, + ), + ExpressionAlias::new::(extent_count), ), - ExpressionAlias::new::(volume_id), - ExpressionAlias::new::(block_size), - ExpressionAlias::new::( - blocks_per_extent, - ), - ExpressionAlias::new::(extent_count), - ))), + )), } } } @@ -285,12 +283,14 @@ struct CandidateZpools { } impl CandidateZpools { - fn new(old_zpool_usage: &OldPoolUsage, zpool_size_delta: u64) -> Self { + fn new( + old_zpool_usage: &OldPoolUsage, + zpool_size_delta: u64, + seed: u128, + distinct_sleds: bool, + ) -> Self { use schema::zpool::dsl as zpool_dsl; - let with_zpool = zpool_dsl::zpool - .on(zpool_dsl::id.eq(old_zpool_usage::dsl::pool_id)); - // Why are we using raw `diesel::dsl::sql` here? // // When SQL performs the "SUM" operation on "bigint" type, the result @@ -309,15 +309,40 @@ impl CandidateZpools { + diesel::dsl::sql(&zpool_size_delta.to_string())) .le(diesel::dsl::sql(zpool_dsl::total_size::NAME)); - Self { - query: Box::new( - old_zpool_usage - .query_source() - .inner_join(with_zpool) - .filter(it_will_fit) - .select((old_zpool_usage::dsl::pool_id,)), - ), - } + let with_zpool = zpool_dsl::zpool + .on(zpool_dsl::id.eq(old_zpool_usage::dsl::pool_id)); + + let base_query = old_zpool_usage + .query_source() + .inner_join(with_zpool) + .filter(it_will_fit) + .select((old_zpool_usage::dsl::pool_id,)); + + let query = if distinct_sleds { + let seed_bytes = seed.to_le_bytes(); + + let query: Box> = + Box::new( + base_query + .order_by(( + zpool_dsl::sled_id, + cockroach_md5::dsl::md5( + CastUuidToBytea::new(zpool_dsl::id) + .concat(seed_bytes.to_vec()), + ), + )) + .distinct_on(zpool_dsl::sled_id), + ); + + query + } else { + let query: Box> = + Box::new(base_query); + + query + }; + + Self { query } } } @@ -508,19 +533,47 @@ impl RegionAllocate { extent_count: u64, allocation_strategy: &RegionAllocationStrategy, ) -> Self { + let (seed, distinct_sleds) = { + let (input_seed, distinct_sleds) = match allocation_strategy { + RegionAllocationStrategy::Random { seed } => (seed, false), + RegionAllocationStrategy::RandomWithDistinctSleds { seed } => { + (seed, true) + } + }; + ( + input_seed.map_or_else( + || { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_nanos() + }, + |seed| seed as u128, + ), + distinct_sleds, + ) + }; + let size_delta = block_size * blocks_per_extent * extent_count; let old_regions = OldRegions::new(volume_id); let old_pool_usage = OldPoolUsage::new(); - let candidate_zpools = - CandidateZpools::new(&old_pool_usage, size_delta); + let candidate_zpools = CandidateZpools::new( + &old_pool_usage, + size_delta, + seed, + distinct_sleds, + ); let candidate_datasets = - CandidateDatasets::new(&allocation_strategy, &candidate_zpools); + CandidateDatasets::new(&candidate_zpools, seed); + + let shuffled_candidate_datasets = + ShuffledCandidateDatasets::new(&candidate_datasets, seed); let candidate_regions = CandidateRegions::new( - &candidate_datasets, + &shuffled_candidate_datasets, volume_id, block_size, blocks_per_extent, @@ -577,6 +630,7 @@ impl RegionAllocate { .add_subquery(old_pool_usage) .add_subquery(candidate_zpools) .add_subquery(candidate_datasets) + .add_subquery(shuffled_candidate_datasets) .add_subquery(candidate_regions) .add_subquery(proposed_changes) .add_subquery(do_insert) diff --git a/nexus/defaults/Cargo.toml b/nexus/defaults/Cargo.toml index 09a95fa839..0724b5bf4d 100644 --- a/nexus/defaults/Cargo.toml +++ b/nexus/defaults/Cargo.toml @@ -11,4 +11,4 @@ rand.workspace = true serde_json.workspace = true omicron-common.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/nexus/examples/config.toml b/nexus/examples/config.toml index f1b20c32a1..1a9afbc6bd 100644 --- a/nexus/examples/config.toml +++ b/nexus/examples/config.toml @@ -92,3 +92,14 @@ dns_external.max_concurrent_server_updates = 5 # certificates it will take _other_ Nexus instances to notice and stop serving # them (on a sunny day). external_endpoints.period_secs = 60 + +[default_region_allocation_strategy] +# allocate region on 3 random distinct zpools, on 3 random distinct sleds. +type = "random_with_distinct_sleds" + +# the same as random_with_distinct_sleds, but without requiring distinct sleds +# type = "random" + +# setting `seed` to a fixed value will make dataset selection ordering use the +# same shuffling order for every region allocation. +# seed = 0 diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 5bab5e2820..354df0ead3 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -23,6 +23,7 @@ use omicron_common::address::DENDRITE_PORT; use omicron_common::address::MGS_PORT; use omicron_common::api::external::Error; use omicron_common::api::internal::shared::SwitchLocation; +use omicron_common::nexus_config::RegionAllocationStrategy; use slog::Logger; use std::collections::HashMap; use std::net::Ipv6Addr; @@ -153,6 +154,9 @@ pub struct Nexus { /// Background tasks background_tasks: background::BackgroundTasks, + + /// Default Crucible region allocation strategy + default_region_allocation_strategy: RegionAllocationStrategy, } impl Nexus { @@ -325,6 +329,10 @@ impl Nexus { external_resolver, dpd_clients, background_tasks, + default_region_allocation_strategy: config + .pkg + .default_region_allocation_strategy + .clone(), }; // TODO-cleanup all the extra Arcs here seems wrong diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index cca36cefa7..275c8738cc 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -12,11 +12,10 @@ use super::{ ACTION_GENERATE_ID, }; use crate::app::sagas::declare_saga_actions; +use crate::app::{authn, authz, db}; use crate::external_api::params; -use nexus_db_queries::db::datastore::RegionAllocationStrategy; use nexus_db_queries::db::identity::{Asset, Resource}; use nexus_db_queries::db::lookup::LookupPath; -use nexus_db_queries::{authn, authz, db}; use omicron_common::api::external::DiskState; use omicron_common::api::external::Error; use rand::{rngs::StdRng, RngCore, SeedableRng}; @@ -255,6 +254,9 @@ async fn sdc_alloc_regions( &sagactx, ¶ms.serialized_authn, ); + + let strategy = &osagactx.nexus().default_region_allocation_strategy; + let datasets_and_regions = osagactx .datastore() .region_allocate( @@ -262,7 +264,7 @@ async fn sdc_alloc_regions( volume_id, ¶ms.create_params.disk_source, params.create_params.size, - &RegionAllocationStrategy::Random(None), + &strategy, ) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index b27f4a3a9b..eeabf64894 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -100,14 +100,13 @@ use super::{ }; use crate::app::sagas::declare_saga_actions; use crate::app::sagas::retry_until_known_result; +use crate::app::{authn, authz, db}; use crate::external_api::params; use anyhow::anyhow; use crucible_agent_client::{types::RegionId, Client as CrucibleAgentClient}; use nexus_db_model::Generation; -use nexus_db_queries::db::datastore::RegionAllocationStrategy; use nexus_db_queries::db::identity::{Asset, Resource}; use nexus_db_queries::db::lookup::LookupPath; -use nexus_db_queries::{authn, authz, db}; use omicron_common::api::external; use omicron_common::api::external::Error; use rand::{rngs::StdRng, RngCore, SeedableRng}; @@ -332,6 +331,8 @@ async fn ssc_alloc_regions( .await .map_err(ActionError::action_failed)?; + let strategy = &osagactx.nexus().default_region_allocation_strategy; + let datasets_and_regions = osagactx .datastore() .region_allocate( @@ -344,7 +345,7 @@ async fn ssc_alloc_regions( .map_err(|e| ActionError::action_failed(e.to_string()))?, }, external::ByteCount::from(disk.size), - &RegionAllocationStrategy::Random(None), + &strategy, ) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/test-interface/Cargo.toml b/nexus/test-interface/Cargo.toml index e0743e84bc..0071ffaa28 100644 --- a/nexus/test-interface/Cargo.toml +++ b/nexus/test-interface/Cargo.toml @@ -12,4 +12,4 @@ nexus-types.workspace = true omicron-common.workspace = true slog.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/nexus/test-utils-macros/Cargo.toml b/nexus/test-utils-macros/Cargo.toml index 1bfa25017a..d3d28a7640 100644 --- a/nexus/test-utils-macros/Cargo.toml +++ b/nexus/test-utils-macros/Cargo.toml @@ -11,4 +11,4 @@ proc-macro = true proc-macro2.workspace = true quote.workspace = true syn = { workspace = true, features = [ "fold", "parsing" ] } -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/nexus/test-utils/Cargo.toml b/nexus/test-utils/Cargo.toml index a2e7600e93..8eb8df4a5b 100644 --- a/nexus/test-utils/Cargo.toml +++ b/nexus/test-utils/Cargo.toml @@ -38,4 +38,4 @@ tempfile.workspace = true trust-dns-proto.workspace = true trust-dns-resolver.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/nexus/tests/config.test.toml b/nexus/tests/config.test.toml index 6eeacceaed..1b1ae2c912 100644 --- a/nexus/tests/config.test.toml +++ b/nexus/tests/config.test.toml @@ -89,3 +89,8 @@ dns_external.max_concurrent_server_updates = 5 # certificates it will take _other_ Nexus instances to notice and stop serving # them (on a sunny day). external_endpoints.period_secs = 60 + +[default_region_allocation_strategy] +# we only have one sled in the test environment, so we need to use the +# `Random` strategy, instead of `RandomWithDistinctSleds` +type = "random" \ No newline at end of file diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index 67dfa6c255..1d4556e8ed 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -15,7 +15,7 @@ use omicron_common::api::internal::shared::SwitchLocation; use omicron_common::nexus_config::Config; use omicron_common::nexus_config::SchemaConfig; use omicron_test_utils::dev::db::CockroachInstance; -use pretty_assertions::assert_eq; +use pretty_assertions::{assert_eq, assert_ne}; use similar_asserts; use slog::Logger; use std::collections::{BTreeMap, BTreeSet}; @@ -62,6 +62,47 @@ async fn test_setup<'a>( builder } +// Attempts to apply an update as a transaction. +// +// Only returns an error if the transaction failed to commit. +async fn apply_update_as_transaction_inner( + client: &omicron_test_utils::dev::db::Client, + sql: &str, +) -> Result<(), tokio_postgres::Error> { + client.batch_execute("BEGIN;").await.expect("Failed to BEGIN transaction"); + client.batch_execute(&sql).await.expect("Failed to execute update"); + client.batch_execute("COMMIT;").await?; + Ok(()) +} + +// Applies an update as a transaction. +// +// Automatically retries transactions that can be retried client-side. +async fn apply_update_as_transaction( + log: &Logger, + client: &omicron_test_utils::dev::db::Client, + sql: &str, +) { + loop { + match apply_update_as_transaction_inner(client, sql).await { + Ok(()) => break, + Err(err) => { + client + .batch_execute("ROLLBACK;") + .await + .expect("Failed to ROLLBACK failed transaction"); + if let Some(code) = err.code() { + if code == &tokio_postgres::error::SqlState::T_R_SERIALIZATION_FAILURE { + warn!(log, "Transaction retrying"); + continue; + } + } + panic!("Failed to apply update: {err}"); + } + } + } +} + async fn apply_update( log: &Logger, crdb: &CockroachInstance, @@ -87,15 +128,7 @@ async fn apply_update( for _ in 0..times_to_apply { for sql in sqls.iter() { - client - .batch_execute("BEGIN;") - .await - .expect("Failed to BEGIN update"); - client.batch_execute(&sql).await.expect("Failed to execute update"); - client - .batch_execute("COMMIT;") - .await - .expect("Failed to COMMIT update"); + apply_update_as_transaction(log, &client, sql).await; } } @@ -132,6 +165,7 @@ enum AnySqlType { String(String), Bool(bool), Uuid(Uuid), + Int8(i64), // TODO: This isn't exhaustive, feel free to add more. // // These should only be necessary for rows where the database schema changes also choose to @@ -167,6 +201,9 @@ impl<'a> tokio_postgres::types::FromSql<'a> for AnySqlType { if Uuid::accepts(ty) { return Ok(AnySqlType::Uuid(Uuid::from_sql(ty, raw)?)); } + if i64::accepts(ty) { + return Ok(AnySqlType::Int8(i64::from_sql(ty, raw)?)); + } Err(anyhow::anyhow!( "Cannot parse type {ty}. If you're trying to use this type in a table which is populated \ during a schema migration, consider adding it to `AnySqlType`." @@ -432,6 +469,16 @@ const CHECK_CONSTRAINTS: [&'static str; 4] = [ "check_clause", ]; +const CONSTRAINT_COLUMN_USAGE: [&'static str; 7] = [ + "table_catalog", + "table_schema", + "table_name", + "column_name", + "constraint_catalog", + "constraint_schema", + "constraint_name", +]; + const KEY_COLUMN_USAGE: [&'static str; 7] = [ "constraint_catalog", "constraint_schema", @@ -456,29 +503,66 @@ const REFERENTIAL_CONSTRAINTS: [&'static str; 8] = [ const VIEWS: [&'static str; 4] = ["table_catalog", "table_schema", "table_name", "view_definition"]; -const STATISTICS: [&'static str; 8] = [ +const STATISTICS: [&'static str; 11] = [ "table_catalog", "table_schema", "table_name", "non_unique", "index_schema", "index_name", + "seq_in_index", "column_name", "direction", + "storing", + "implicit", +]; + +const SEQUENCES: [&'static str; 12] = [ + "sequence_catalog", + "sequence_schema", + "sequence_name", + "data_type", + "numeric_precision", + "numeric_precision_radix", + "numeric_scale", + "start_value", + "minimum_value", + "maximum_value", + "increment", + "cycle_option", ]; +const PG_INDEXES: [&'static str; 5] = + ["schemaname", "tablename", "indexname", "tablespace", "indexdef"]; + const TABLES: [&'static str; 4] = ["table_catalog", "table_schema", "table_name", "table_type"]; +const TABLE_CONSTRAINTS: [&'static str; 9] = [ + "constraint_catalog", + "constraint_schema", + "constraint_name", + "table_catalog", + "table_schema", + "table_name", + "constraint_type", + "is_deferrable", + "initially_deferred", +]; + #[derive(Eq, PartialEq, Debug)] struct InformationSchema { columns: Vec, check_constraints: Vec, + constraint_column_usage: Vec, key_column_usage: Vec, referential_constraints: Vec, views: Vec, statistics: Vec, + sequences: Vec, + pg_indexes: Vec, tables: Vec, + table_constraints: Vec, } impl InformationSchema { @@ -490,6 +574,10 @@ impl InformationSchema { self.check_constraints, other.check_constraints ); + similar_asserts::assert_eq!( + self.constraint_column_usage, + other.constraint_column_usage + ); similar_asserts::assert_eq!( self.key_column_usage, other.key_column_usage @@ -500,7 +588,13 @@ impl InformationSchema { ); similar_asserts::assert_eq!(self.views, other.views); similar_asserts::assert_eq!(self.statistics, other.statistics); + similar_asserts::assert_eq!(self.sequences, other.sequences); + similar_asserts::assert_eq!(self.pg_indexes, other.pg_indexes); similar_asserts::assert_eq!(self.tables, other.tables); + similar_asserts::assert_eq!( + self.table_constraints, + other.table_constraints + ); } async fn new(crdb: &CockroachInstance) -> Self { @@ -524,6 +618,14 @@ impl InformationSchema { ) .await; + let constraint_column_usage = query_crdb_for_rows_of_strings( + crdb, + CONSTRAINT_COLUMN_USAGE.as_slice().into(), + "information_schema.constraint_column_usage", + None, + ) + .await; + let key_column_usage = query_crdb_for_rows_of_strings( crdb, KEY_COLUMN_USAGE.as_slice().into(), @@ -556,6 +658,22 @@ impl InformationSchema { ) .await; + let sequences = query_crdb_for_rows_of_strings( + crdb, + SEQUENCES.as_slice().into(), + "information_schema.sequences", + None, + ) + .await; + + let pg_indexes = query_crdb_for_rows_of_strings( + crdb, + PG_INDEXES.as_slice().into(), + "pg_indexes", + Some("schemaname = 'public'"), + ) + .await; + let tables = query_crdb_for_rows_of_strings( crdb, TABLES.as_slice().into(), @@ -564,14 +682,26 @@ impl InformationSchema { ) .await; + let table_constraints = query_crdb_for_rows_of_strings( + crdb, + TABLE_CONSTRAINTS.as_slice().into(), + "information_schema.table_constraints", + Some("table_schema = 'public'"), + ) + .await; + Self { columns, check_constraints, + constraint_column_usage, key_column_usage, referential_constraints, views, statistics, + sequences, + pg_indexes, tables, + table_constraints, } } @@ -659,3 +789,229 @@ async fn dbinit_equals_sum_of_all_up() { crdb.cleanup().await.unwrap(); logctx.cleanup_successful(); } + +// Returns the InformationSchema object for a database populated via `sql`. +async fn get_information_schema(log: &Logger, sql: &str) -> InformationSchema { + let populate = false; + let mut crdb = test_setup_just_crdb(&log, populate).await; + + let client = crdb.connect().await.expect("failed to connect"); + client.batch_execute(sql).await.expect("failed to apply SQL"); + + let observed_schema = InformationSchema::new(&crdb).await; + crdb.cleanup().await.unwrap(); + observed_schema +} + +// Reproduction case for https://github.com/oxidecomputer/omicron/issues/4143 +#[tokio::test] +async fn compare_index_creation_differing_where_clause() { + let config = load_test_config(); + let logctx = LogContext::new( + "compare_index_creation_differing_where_clause", + &config.pkg.log, + ); + let log = &logctx.log; + + let schema1 = get_information_schema(log, " + CREATE DATABASE omicron; + CREATE TABLE omicron.public.animal ( + id UUID PRIMARY KEY, + name TEXT, + time_deleted TIMESTAMPTZ + ); + + CREATE INDEX IF NOT EXISTS lookup_animal_by_name ON omicron.public.animal ( + name, id + ) WHERE name IS NOT NULL AND time_deleted IS NULL; + ").await; + + let schema2 = get_information_schema(log, " + CREATE DATABASE omicron; + CREATE TABLE omicron.public.animal ( + id UUID PRIMARY KEY, + name TEXT, + time_deleted TIMESTAMPTZ + ); + + CREATE INDEX IF NOT EXISTS lookup_animal_by_name ON omicron.public.animal ( + name, id + ) WHERE time_deleted IS NULL; + ").await; + + // pg_indexes includes a column "indexdef" that compares partial indexes. + // This should catch the differing "WHERE" clause. + assert_ne!(schema1.pg_indexes, schema2.pg_indexes); + + logctx.cleanup_successful(); +} + +// Reproduction case for https://github.com/oxidecomputer/omicron/issues/4143 +#[tokio::test] +async fn compare_index_creation_differing_columns() { + let config = load_test_config(); + let logctx = LogContext::new( + "compare_index_creation_differing_columns", + &config.pkg.log, + ); + let log = &logctx.log; + + let schema1 = get_information_schema(log, " + CREATE DATABASE omicron; + CREATE TABLE omicron.public.animal ( + id UUID PRIMARY KEY, + name TEXT, + time_deleted TIMESTAMPTZ + ); + + CREATE INDEX IF NOT EXISTS lookup_animal_by_name ON omicron.public.animal ( + name + ) WHERE name IS NOT NULL AND time_deleted IS NULL; + ").await; + + let schema2 = get_information_schema(log, " + CREATE DATABASE omicron; + CREATE TABLE omicron.public.animal ( + id UUID PRIMARY KEY, + name TEXT, + time_deleted TIMESTAMPTZ + ); + + CREATE INDEX IF NOT EXISTS lookup_animal_by_name ON omicron.public.animal ( + name, id + ) WHERE name IS NOT NULL AND time_deleted IS NULL; + ").await; + + // "statistics" identifies table indices. + // These tables should differ in the "implicit" column. + assert_ne!(schema1.statistics, schema2.statistics); + + logctx.cleanup_successful(); +} + +#[tokio::test] +async fn compare_view_differing_where_clause() { + let config = load_test_config(); + let logctx = + LogContext::new("compare_view_differing_where_clause", &config.pkg.log); + let log = &logctx.log; + + let schema1 = get_information_schema( + log, + " + CREATE DATABASE omicron; + CREATE TABLE omicron.public.animal ( + id UUID PRIMARY KEY, + name TEXT, + time_deleted TIMESTAMPTZ + ); + + CREATE VIEW live_view AS + SELECT animal.id, animal.name + FROM omicron.public.animal + WHERE animal.time_deleted IS NOT NULL; + ", + ) + .await; + + let schema2 = get_information_schema( + log, + " + CREATE DATABASE omicron; + CREATE TABLE omicron.public.animal ( + id UUID PRIMARY KEY, + name TEXT, + time_deleted TIMESTAMPTZ + ); + + CREATE VIEW live_view AS + SELECT animal.id, animal.name + FROM omicron.public.animal + WHERE animal.time_deleted IS NOT NULL AND animal.name = 'Thomas'; + ", + ) + .await; + + assert_ne!(schema1.views, schema2.views); + + logctx.cleanup_successful(); +} + +#[tokio::test] +async fn compare_sequence_differing_increment() { + let config = load_test_config(); + let logctx = LogContext::new( + "compare_sequence_differing_increment", + &config.pkg.log, + ); + let log = &logctx.log; + + let schema1 = get_information_schema( + log, + " + CREATE DATABASE omicron; + CREATE SEQUENCE omicron.public.myseq START 1 INCREMENT 1; + ", + ) + .await; + + let schema2 = get_information_schema( + log, + " + CREATE DATABASE omicron; + CREATE SEQUENCE omicron.public.myseq START 1 INCREMENT 2; + ", + ) + .await; + + assert_ne!(schema1.sequences, schema2.sequences); + + logctx.cleanup_successful(); +} + +#[tokio::test] +async fn compare_table_differing_constraint() { + let config = load_test_config(); + let logctx = + LogContext::new("compare_table_differing_constraint", &config.pkg.log); + let log = &logctx.log; + + let schema1 = get_information_schema( + log, + " + CREATE DATABASE omicron; + CREATE TABLE omicron.public.animal ( + id UUID PRIMARY KEY, + name TEXT, + time_deleted TIMESTAMPTZ, + + CONSTRAINT dead_animals_have_names CHECK ( + (time_deleted IS NULL) OR + (name IS NOT NULL) + ) + ); + ", + ) + .await; + + let schema2 = get_information_schema( + log, + " + CREATE DATABASE omicron; + CREATE TABLE omicron.public.animal ( + id UUID PRIMARY KEY, + name TEXT, + time_deleted TIMESTAMPTZ, + + CONSTRAINT dead_animals_have_names CHECK ( + (time_deleted IS NULL) OR + (name IS NULL) + ) + ); + ", + ) + .await; + + assert_ne!(schema1.check_constraints, schema2.check_constraints); + logctx.cleanup_successful(); +} diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index f7ffafec52..c499714c31 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -25,4 +25,4 @@ api_identity.workspace = true dns-service-client.workspace = true omicron-common.workspace = true omicron-passwords.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/openapi/nexus.json b/openapi/nexus.json index 779b1f556c..9330b0ef47 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -9615,6 +9615,7 @@ }, "image_id": { "nullable": true, + "description": "ID of image from which disk was created, if any", "type": "string", "format": "uuid" }, @@ -9635,6 +9636,7 @@ }, "snapshot_id": { "nullable": true, + "description": "ID of snapshot from which disk was created, if any", "type": "string", "format": "uuid" }, diff --git a/openapi/oximeter.json b/openapi/oximeter.json index 6781b77892..ebc7957c2e 100644 --- a/openapi/oximeter.json +++ b/openapi/oximeter.json @@ -10,7 +10,76 @@ "version": "0.0.1" }, "paths": { + "/info": { + "get": { + "operationId": "collector_info", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CollectorInfo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/producers": { + "get": { + "operationId": "producers_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProducerEndpointResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, "post": { "operationId": "producers_post", "requestBody": { @@ -35,6 +104,33 @@ } } } + }, + "/producers/{producer_id}": { + "delete": { + "operationId": "producer_delete", + "parameters": [ + { + "in": "path", + "name": "producer_id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } } }, "components": { @@ -51,6 +147,19 @@ } }, "schemas": { + "CollectorInfo": { + "type": "object", + "properties": { + "id": { + "description": "The collector's UUID.", + "type": "string", + "format": "uuid" + } + }, + "required": [ + "id" + ] + }, "Duration": { "type": "object", "properties": { @@ -113,6 +222,27 @@ "id", "interval" ] + }, + "ProducerEndpointResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/ProducerEndpoint" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] } } } diff --git a/oximeter/collector/Cargo.toml b/oximeter/collector/Cargo.toml index c8c4030dba..470d9db312 100644 --- a/oximeter/collector/Cargo.toml +++ b/oximeter/collector/Cargo.toml @@ -6,23 +6,30 @@ description = "The oximeter metric collection server" license = "MPL-2.0" [dependencies] +anyhow.workspace = true clap.workspace = true dropshot.workspace = true futures.workspace = true internal-dns.workspace = true nexus-client.workspace = true +nexus-types.workspace = true omicron-common.workspace = true oximeter.workspace = true +oximeter-client.workspace = true oximeter-db.workspace = true +rand.workspace = true reqwest = { workspace = true, features = [ "json" ] } +schemars.workspace = true serde.workspace = true slog.workspace = true +slog-async.workspace = true slog-dtrace.workspace = true +slog-term.workspace = true thiserror.workspace = true tokio.workspace = true toml.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/oximeter/collector/src/bin/oximeter.rs b/oximeter/collector/src/bin/oximeter.rs index bf54cf33fa..8c4bf0e27c 100644 --- a/oximeter/collector/src/bin/oximeter.rs +++ b/oximeter/collector/src/bin/oximeter.rs @@ -3,12 +3,21 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. //! Main entry point to run an `oximeter` server in the control plane. -// Copyright 2021 Oxide Computer Company + +// Copyright 2023 Oxide Computer Company use clap::Parser; use omicron_common::cmd::fatal; use omicron_common::cmd::CmdError; -use oximeter_collector::{oximeter_api, Config, Oximeter, OximeterArguments}; +use oximeter_collector::oximeter_api; +use oximeter_collector::standalone_nexus_api; +use oximeter_collector::Config; +use oximeter_collector::Oximeter; +use oximeter_collector::OximeterArguments; +use oximeter_collector::StandaloneNexus; +use slog::Level; +use std::net::Ipv6Addr; +use std::net::SocketAddr; use std::net::SocketAddrV6; use std::path::PathBuf; use uuid::Uuid; @@ -23,6 +32,16 @@ pub fn run_openapi() -> Result<(), String> { .map_err(|e| e.to_string()) } +pub fn run_standalone_openapi() -> Result<(), String> { + standalone_nexus_api() + .openapi("Oxide Nexus API", "0.0.1") + .description("API for interacting with Nexus") + .contact_url("https://oxide.computer") + .contact_email("api@oxide.computer") + .write(&mut std::io::stdout()) + .map_err(|e| e.to_string()) +} + /// Run an oximeter metric collection server in the Oxide Control Plane. #[derive(Parser)] #[clap(name = "oximeter", about = "See README.adoc for more information")] @@ -36,12 +55,71 @@ enum Args { #[clap(name = "CONFIG_FILE", action)] config_file: PathBuf, + /// The UUID for this instance of the `oximeter` collector. #[clap(short, long, action)] id: Uuid, + /// The socket address at which `oximeter`'s HTTP server runs. #[clap(short, long, action)] address: SocketAddrV6, }, + + /// Run `oximeter` in standalone mode for development. + /// + /// In this mode, `oximeter` can be used to test the collection of metrics + /// from producers, without requiring all the normal machinery of the + /// control plane. The collector is run as usual, but additionally starts a + /// API server to stand-in for Nexus. The registrations of the producers and + /// collectors occurs through the normal code path, but uses this mock Nexus + /// instead of the real thing. + Standalone { + /// The ID for the collector. + /// + /// Default is to generate a new, random UUID. + #[arg(long, default_value_t = Uuid::new_v4())] + id: Uuid, + + /// Address at which `oximeter` itself listens. + /// + /// This address can be used to register new producers, after the + /// program has already started. + #[arg( + long, + default_value_t = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 12223, 0, 0) + )] + address: SocketAddrV6, + + /// The address for the mock Nexus server used to register. + /// + /// This program starts a mock version of Nexus, which is used only to + /// register the producers and collectors. This allows them to operate + /// as they usually would, registering each other with Nexus so that an + /// assignment between them can be made. + #[arg( + long, + default_value_t = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 12221, 0, 0) + )] + nexus: SocketAddrV6, + + /// The address for ClickHouse. + /// + /// If not provided, `oximeter` will not attempt to insert records into + /// the database at all. In this mode, the program will print the + /// collected samples, instead of inserting them into the database. + #[arg(long)] + clickhouse: Option, + + /// The log-level. + #[arg(long, default_value_t = Level::Info, value_parser = parse_log_level)] + log_level: Level, + }, + + /// Print the fake Nexus's standalone API. + StandaloneOpenapi, +} + +fn parse_log_level(s: &str) -> Result { + s.parse().map_err(|_| "Invalid log level".to_string()) } #[tokio::main] @@ -65,5 +143,26 @@ async fn do_run() -> Result<(), CmdError> { .await .map_err(|e| CmdError::Failure(e.to_string())) } + Args::Standalone { id, address, nexus, clickhouse, log_level } => { + // Start the standalone Nexus server, for registration of both the + // collector and producers. + let nexus_server = StandaloneNexus::new(nexus.into(), log_level) + .map_err(|e| CmdError::Failure(e.to_string()))?; + let args = OximeterArguments { id, address }; + Oximeter::new_standalone( + nexus_server.log(), + &args, + nexus_server.local_addr(), + clickhouse, + ) + .await + .unwrap() + .serve_forever() + .await + .map_err(|e| CmdError::Failure(e.to_string())) + } + Args::StandaloneOpenapi => { + run_standalone_openapi().map_err(CmdError::Failure) + } } } diff --git a/oximeter/collector/src/lib.rs b/oximeter/collector/src/lib.rs index bf75b567ea..6674d65ecd 100644 --- a/oximeter/collector/src/lib.rs +++ b/oximeter/collector/src/lib.rs @@ -4,35 +4,71 @@ //! Implementation of the `oximeter` metric collection server. -// Copyright 2021 Oxide Computer Company - -use dropshot::{ - endpoint, ApiDescription, ConfigDropshot, ConfigLogging, HttpError, - HttpResponseUpdatedNoContent, HttpServer, HttpServerStarter, - RequestContext, TypedBody, -}; -use internal_dns::resolver::{ResolveError, Resolver}; +// Copyright 2023 Oxide Computer Company + +use anyhow::anyhow; +use anyhow::Context; +use dropshot::endpoint; +use dropshot::ApiDescription; +use dropshot::ConfigDropshot; +use dropshot::ConfigLogging; +use dropshot::EmptyScanParams; +use dropshot::HttpError; +use dropshot::HttpResponseDeleted; +use dropshot::HttpResponseOk; +use dropshot::HttpResponseUpdatedNoContent; +use dropshot::HttpServer; +use dropshot::HttpServerStarter; +use dropshot::PaginationParams; +use dropshot::Query; +use dropshot::RequestContext; +use dropshot::ResultsPage; +use dropshot::TypedBody; +use dropshot::WhichPage; +use internal_dns::resolver::ResolveError; +use internal_dns::resolver::Resolver; use internal_dns::ServiceName; -use omicron_common::address::{CLICKHOUSE_PORT, NEXUS_INTERNAL_PORT}; +use omicron_common::address::CLICKHOUSE_PORT; +use omicron_common::address::NEXUS_INTERNAL_PORT; use omicron_common::api::internal::nexus::ProducerEndpoint; -use omicron_common::{backoff, FileKv}; -use oximeter::types::{ProducerResults, ProducerResultsItem}; -use oximeter_db::{Client, DbWrite}; -use serde::{Deserialize, Serialize}; -use slog::{debug, error, info, o, trace, warn, Drain, Logger}; -use std::collections::{btree_map::Entry, BTreeMap}; -use std::net::{SocketAddr, SocketAddrV6}; +use omicron_common::backoff; +use omicron_common::FileKv; +use oximeter::types::ProducerResults; +use oximeter::types::ProducerResultsItem; +use oximeter_db::Client; +use oximeter_db::DbWrite; +use serde::Deserialize; +use serde::Serialize; +use slog::debug; +use slog::error; +use slog::info; +use slog::o; +use slog::trace; +use slog::warn; +use slog::Drain; +use slog::Logger; +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; +use std::net::SocketAddr; +use std::net::SocketAddrV6; +use std::ops::Bound; use std::path::Path; use std::sync::Arc; use std::time::Duration; use thiserror::Error; -use tokio::{ - sync::mpsc, sync::oneshot, sync::Mutex, task::JoinHandle, time::interval, -}; +use tokio::sync::mpsc; +use tokio::sync::oneshot; +use tokio::sync::Mutex; +use tokio::task::JoinHandle; +use tokio::time::interval; use uuid::Uuid; +mod standalone; +pub use standalone::standalone_nexus_api; +pub use standalone::Server as StandaloneNexus; + /// Errors collecting metric data -#[derive(Debug, Clone, Error)] +#[derive(Debug, Error)] pub enum Error { #[error("Error running Oximeter collector server: {0}")] Server(String), @@ -45,6 +81,48 @@ pub enum Error { #[error(transparent)] ResolveError(#[from] ResolveError), + + #[error("No producer is registered with ID")] + NoSuchProducer(Uuid), + + #[error("Error running standalone")] + Standalone(#[from] anyhow::Error), +} + +impl From for HttpError { + fn from(e: Error) -> Self { + match e { + Error::NoSuchProducer(id) => HttpError::for_not_found( + None, + format!("No such producer: {id}"), + ), + _ => HttpError::for_internal_error(e.to_string()), + } + } +} + +/// A simple representation of a producer, used mostly for standalone mode. +/// +/// These are usually specified as a structured string, formatted like: +/// `"@
"`. +#[derive(Copy, Clone, Debug)] +pub struct ProducerInfo { + /// The ID of the producer. + pub id: Uuid, + /// The address on which the producer listens. + pub address: SocketAddr, +} + +impl std::str::FromStr for ProducerInfo { + type Err = anyhow::Error; + fn from_str(s: &str) -> Result { + let (id, addr) = s + .split_once('@') + .context("Producer info should written as @
")?; + let id = id.parse().context("Invalid UUID")?; + let address = addr.parse().context("Invalid address")?; + Ok(Self { id, address }) + } } type CollectionToken = oneshot::Sender<()>; @@ -61,7 +139,6 @@ enum CollectionMessage { // from its producer. Update(ProducerEndpoint), // Request that the task exit - #[allow(dead_code)] Shutdown, } @@ -72,7 +149,7 @@ async fn perform_collection( outbox: &mpsc::Sender<(Option, ProducerResults)>, token: Option, ) { - info!(log, "collecting from producer"); + debug!(log, "collecting from producer"); let res = client .get(format!( "http://{}{}", @@ -187,6 +264,44 @@ struct CollectionTask { pub task: JoinHandle<()>, } +// A task run by `oximeter` in standalone mode, which simply prints results as +// they're received. +async fn results_printer( + log: Logger, + mut rx: mpsc::Receiver<(Option, ProducerResults)>, +) { + loop { + match rx.recv().await { + Some((_, results)) => { + for res in results.into_iter() { + match res { + ProducerResultsItem::Ok(samples) => { + for sample in samples.into_iter() { + info!( + log, + ""; + "sample" => ?sample, + ); + } + } + ProducerResultsItem::Err(e) => { + error!( + log, + "received error from a producer"; + "err" => ?e, + ); + } + } + } + } + None => { + debug!(log, "result queue closed, exiting"); + return; + } + } + } +} + // Aggregation point for all results, from all collection tasks. async fn results_sink( log: Logger, @@ -286,6 +401,20 @@ pub struct DbConfig { pub batch_interval: u64, } +impl DbConfig { + pub const DEFAULT_BATCH_SIZE: usize = 1000; + pub const DEFAULT_BATCH_INTERVAL: u64 = 5; + + // Construct config with an address, using the defaults for other fields + fn with_address(address: SocketAddr) -> Self { + Self { + address: Some(address), + batch_size: Self::DEFAULT_BATCH_SIZE, + batch_interval: Self::DEFAULT_BATCH_INTERVAL, + } + } +} + /// The internal agent the oximeter server uses to collect metrics from producers. #[derive(Debug)] pub struct OximeterAgent { @@ -295,7 +424,8 @@ pub struct OximeterAgent { // Handle to the TX-side of a channel for collecting results from the collection tasks result_sender: mpsc::Sender<(Option, ProducerResults)>, // The actual tokio tasks running the collection on a timer. - collection_tasks: Arc>>, + collection_tasks: + Arc>>, } impl OximeterAgent { @@ -307,7 +437,10 @@ impl OximeterAgent { log: &Logger, ) -> Result { let (result_sender, result_receiver) = mpsc::channel(8); - let log = log.new(o!("component" => "oximeter-agent", "collector_id" => id.to_string())); + let log = log.new(o!( + "component" => "oximeter-agent", + "collector_id" => id.to_string(), + )); let insertion_log = log.new(o!("component" => "results-sink")); // Construct the ClickHouse client first, propagate an error if we can't reach the @@ -347,6 +480,61 @@ impl OximeterAgent { }) } + /// Construct a new standalone `oximeter` collector. + pub async fn new_standalone( + id: Uuid, + db_config: Option, + log: &Logger, + ) -> Result { + let (result_sender, result_receiver) = mpsc::channel(8); + let log = log.new(o!( + "component" => "oximeter-standalone", + "collector_id" => id.to_string(), + )); + + // If we have configuration for ClickHouse, we'll spawn the results + // sink task as usual. If not, we'll spawn a dummy task that simply + // prints the results as they're received. + let insertion_log = log.new(o!("component" => "results-sink")); + if let Some(db_config) = db_config { + let Some(address) = db_config.address else { + return Err(Error::Standalone(anyhow!( + "Must provide explicit IP address in standalone mode" + ))); + }; + let client = Client::new(address, &log); + let replicated = client.is_oximeter_cluster().await?; + if !replicated { + client.init_single_node_db().await?; + } else { + client.init_replicated_db().await?; + } + + // Spawn the task for aggregating and inserting all metrics + tokio::spawn(async move { + results_sink( + insertion_log, + client, + db_config.batch_size, + Duration::from_secs(db_config.batch_interval), + result_receiver, + ) + .await + }); + } else { + tokio::spawn(results_printer(insertion_log, result_receiver)); + } + + // Construct the ClickHouse client first, propagate an error if we can't reach the + // database. + Ok(Self { + id, + log, + result_sender, + collection_tasks: Arc::new(Mutex::new(BTreeMap::new())), + }) + } + /// Register a new producer with this oximeter instance. pub async fn register_producer( &self, @@ -355,30 +543,36 @@ impl OximeterAgent { let id = info.id; match self.collection_tasks.lock().await.entry(id) { Entry::Vacant(value) => { - info!(self.log, "registered new metric producer"; - "producer_id" => id.to_string(), - "address" => info.address, + debug!( + self.log, + "registered new metric producer"; + "producer_id" => id.to_string(), + "address" => info.address, ); // Build channel to control the task and receive results. let (tx, rx) = mpsc::channel(4); let q = self.result_sender.clone(); let log = self.log.new(o!("component" => "collection-task", "producer_id" => id.to_string())); + let info_clone = info.clone(); let task = tokio::spawn(async move { - collection_task(log, info, rx, q).await; + collection_task(log, info_clone, rx, q).await; }); - value.insert(CollectionTask { inbox: tx, task }); + value.insert((info, CollectionTask { inbox: tx, task })); } - Entry::Occupied(value) => { - info!( + Entry::Occupied(mut value) => { + debug!( self.log, - "received request to register existing metric producer, updating collection information"; + "received request to register existing metric \ + producer, updating collection information"; "producer_id" => id.to_string(), "interval" => ?info.interval, "address" => info.address, ); + value.get_mut().0 = info.clone(); value .get() + .1 .inbox .send(CollectionMessage::Update(info)) .await @@ -395,10 +589,10 @@ impl OximeterAgent { pub async fn force_collection(&self) { let mut collection_oneshots = vec![]; let collection_tasks = self.collection_tasks.lock().await; - for task in collection_tasks.iter() { + for (_id, (_endpoint, task)) in collection_tasks.iter() { let (tx, rx) = oneshot::channel(); // Scrape from each producer, into oximeter... - task.1.inbox.send(CollectionMessage::Collect(tx)).await.unwrap(); + task.inbox.send(CollectionMessage::Collect(tx)).await.unwrap(); // ... and keep track of the token that indicates once the metric // has made it into Clickhouse. collection_oneshots.push(rx); @@ -412,6 +606,55 @@ impl OximeterAgent { // successfully, or an error occurred in the collection pathway. futures::future::join_all(collection_oneshots).await; } + + /// List existing producers. + pub async fn list_producers( + &self, + start_id: Option, + limit: usize, + ) -> Vec { + let start = if let Some(id) = start_id { + Bound::Excluded(id) + } else { + Bound::Unbounded + }; + self.collection_tasks + .lock() + .await + .range((start, Bound::Unbounded)) + .take(limit) + .map(|(_id, (info, _t))| info.clone()) + .collect() + } + + /// Delete a producer by ID, stopping its collection task. + pub async fn delete_producer(&self, id: Uuid) -> Result<(), Error> { + let (_info, task) = self + .collection_tasks + .lock() + .await + .remove(&id) + .ok_or_else(|| Error::NoSuchProducer(id))?; + debug!( + self.log, + "removed collection task from set"; + "producer_id" => %id, + ); + match task.inbox.send(CollectionMessage::Shutdown).await { + Ok(_) => debug!( + self.log, + "shut down collection task"; + "producer_id" => %id, + ), + Err(e) => error!( + self.log, + "failed to shut down collection task"; + "producer_id" => %id, + "error" => ?e, + ), + } + Ok(()) + } } /// Configuration used to initialize an oximeter server @@ -440,6 +683,7 @@ impl Config { } } +/// Arguments for running the `oximeter` collector. pub struct OximeterArguments { pub id: Uuid, pub address: SocketAddrV6, @@ -447,7 +691,7 @@ pub struct OximeterArguments { /// A server used to collect metrics from components in the control plane. pub struct Oximeter { - _agent: Arc, + agent: Arc, server: HttpServer>, } @@ -572,7 +816,67 @@ impl Oximeter { .expect("Expected an infinite retry loop contacting Nexus"); info!(log, "oximeter registered with nexus"; "id" => ?agent.id); - Ok(Self { _agent: agent, server }) + Ok(Self { agent, server }) + } + + /// Create a new `oximeter` collector running in standalone mode. + pub async fn new_standalone( + log: &Logger, + args: &OximeterArguments, + nexus: SocketAddr, + clickhouse: Option, + ) -> Result { + let db_config = clickhouse.map(DbConfig::with_address); + let agent = Arc::new( + OximeterAgent::new_standalone(args.id, db_config, &log).await?, + ); + + let dropshot_log = log.new(o!("component" => "dropshot")); + let server = HttpServerStarter::new( + &ConfigDropshot { + bind_address: SocketAddr::V6(args.address), + ..Default::default() + }, + oximeter_api(), + Arc::clone(&agent), + &dropshot_log, + ) + .map_err(|e| Error::Server(e.to_string()))? + .start(); + info!(log, "started oximeter standalone server"); + + // Notify the standalone nexus. + let client = reqwest::Client::new(); + let notify_nexus = || async { + debug!(log, "contacting nexus"); + client + .post(format!("http://{}/metrics/collectors", nexus)) + .json(&nexus_client::types::OximeterInfo { + address: server.local_addr().to_string(), + collector_id: agent.id, + }) + .send() + .await + .map_err(|e| backoff::BackoffError::transient(e.to_string()))? + .error_for_status() + .map_err(|e| backoff::BackoffError::transient(e.to_string())) + }; + let log_notification_failure = |error, delay| { + warn!( + log, + "failed to contact nexus, will retry in {:?}", delay; + "error" => ?error + ); + }; + backoff::retry_notify( + backoff::retry_policy_internal_service(), + notify_nexus, + log_notification_failure, + ) + .await + .expect("Expected an infinite retry loop contacting Nexus"); + + Ok(Self { agent, server }) } /// Serve requests forever, consuming the server. @@ -592,6 +896,20 @@ impl Oximeter { pub async fn force_collect(&self) { self.server.app_private().force_collection().await } + + /// List producers. + pub async fn list_producers( + &self, + start: Option, + limit: usize, + ) -> Vec { + self.agent.list_producers(start, limit).await + } + + /// Delete a producer by ID, stopping its collection task. + pub async fn delete_producer(&self, id: Uuid) -> Result<(), Error> { + self.agent.delete_producer(id).await + } } // Build the HTTP API internal to the control plane @@ -599,6 +917,12 @@ pub fn oximeter_api() -> ApiDescription> { let mut api = ApiDescription::new(); api.register(producers_post) .expect("Could not register producers_post API handler"); + api.register(producers_list) + .expect("Could not register producers_list API handler"); + api.register(producer_delete) + .expect("Could not register producers_delete API handler"); + api.register(collector_info) + .expect("Could not register collector_info API handler"); api } @@ -616,6 +940,79 @@ async fn producers_post( agent .register_producer(producer_info) .await - .map_err(|e| HttpError::for_internal_error(e.to_string()))?; - Ok(HttpResponseUpdatedNoContent()) + .map_err(HttpError::from) + .map(|_| HttpResponseUpdatedNoContent()) +} + +// Parameters for paginating the list of producers. +#[derive(Clone, Copy, Debug, Deserialize, schemars::JsonSchema, Serialize)] +struct ProducerPage { + id: Uuid, +} + +// List all producers +#[endpoint { + method = GET, + path = "/producers", +}] +async fn producers_list( + request_context: RequestContext>, + query: Query>, +) -> Result>, HttpError> { + let agent = request_context.context(); + let pagination = query.into_inner(); + let limit = request_context.page_limit(&pagination)?.get() as usize; + let start = match &pagination.page { + WhichPage::First(..) => None, + WhichPage::Next(ProducerPage { id }) => Some(*id), + }; + let producers = agent.list_producers(start, limit).await; + ResultsPage::new( + producers, + &EmptyScanParams {}, + |info: &ProducerEndpoint, _| ProducerPage { id: info.id }, + ) + .map(HttpResponseOk) +} + +#[derive(Clone, Copy, Debug, Deserialize, schemars::JsonSchema, Serialize)] +struct ProducerIdPathParams { + producer_id: Uuid, +} + +// Delete a producer by ID. +#[endpoint { + method = DELETE, + path = "/producers/{producer_id}", +}] +async fn producer_delete( + request_context: RequestContext>, + path: dropshot::Path, +) -> Result { + let agent = request_context.context(); + let producer_id = path.into_inner().producer_id; + agent + .delete_producer(producer_id) + .await + .map_err(HttpError::from) + .map(|_| HttpResponseDeleted()) +} + +#[derive(Clone, Copy, Debug, Deserialize, schemars::JsonSchema, Serialize)] +pub struct CollectorInfo { + /// The collector's UUID. + pub id: Uuid, +} + +// Return identifying information about this collector +#[endpoint { + method = GET, + path = "/info", +}] +async fn collector_info( + request_context: RequestContext>, +) -> Result, HttpError> { + let agent = request_context.context(); + let info = CollectorInfo { id: agent.id }; + Ok(HttpResponseOk(info)) } diff --git a/oximeter/collector/src/standalone.rs b/oximeter/collector/src/standalone.rs new file mode 100644 index 0000000000..826a5f4663 --- /dev/null +++ b/oximeter/collector/src/standalone.rs @@ -0,0 +1,263 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Implementation of a standalone fake Nexus, simply for registering producers +//! and collectors with one another. + +// Copyright 2023 Oxide Computer Company + +use crate::Error; +use dropshot::endpoint; +use dropshot::ApiDescription; +use dropshot::ConfigDropshot; +use dropshot::HttpError; +use dropshot::HttpResponseUpdatedNoContent; +use dropshot::HttpServer; +use dropshot::HttpServerStarter; +use dropshot::RequestContext; +use dropshot::TypedBody; +use nexus_types::internal_api::params::OximeterInfo; +use omicron_common::api::internal::nexus::ProducerEndpoint; +use omicron_common::FileKv; +use oximeter_client::Client; +use rand::seq::IteratorRandom; +use slog::debug; +use slog::error; +use slog::info; +use slog::o; +use slog::Drain; +use slog::Level; +use slog::Logger; +use std::collections::HashMap; +use std::net::SocketAddr; +use std::sync::Arc; +use tokio::sync::Mutex; +use uuid::Uuid; + +// An assignment of a producer to an oximeter collector. +#[derive(Debug)] +struct ProducerAssignment { + producer: ProducerEndpoint, + collector_id: Uuid, +} + +#[derive(Debug)] +struct Inner { + // Map of producers by ID to their information and assigned oximeter + // collector. + producers: HashMap, + // Map of available oximeter collectors. + collectors: HashMap, +} + +impl Inner { + fn random_collector(&self) -> Option<(Uuid, OximeterInfo)> { + self.collectors + .iter() + .choose(&mut rand::thread_rng()) + .map(|(id, info)| (*id, *info)) + } +} + +// A stripped-down Nexus server, with only the APIs for registering metric +// producers and collectors. +#[derive(Debug)] +pub struct StandaloneNexus { + pub log: Logger, + inner: Mutex, +} + +impl StandaloneNexus { + fn new(log: Logger) -> Self { + Self { + log, + inner: Mutex::new(Inner { + producers: HashMap::new(), + collectors: HashMap::new(), + }), + } + } + + async fn register_producer( + &self, + info: &ProducerEndpoint, + ) -> Result<(), HttpError> { + let mut inner = self.inner.lock().await; + let assignment = match inner.producers.get_mut(&info.id) { + None => { + // There is no record for this producer. + // + // Select a random collector, and assign it to the producer. + // We'll return the assignment from this match block. + let Some((collector_id, collector_info)) = + inner.random_collector() + else { + return Err(HttpError::for_unavail( + None, + String::from("No collectors available"), + )); + }; + let client = Client::new( + format!("http://{}", collector_info.address).as_str(), + self.log.clone(), + ); + client.producers_post(&info.into()).await.map_err(|e| { + HttpError::for_internal_error(e.to_string()) + })?; + let assignment = + ProducerAssignment { producer: info.clone(), collector_id }; + assignment + } + Some(existing_assignment) => { + // We have a record, first check if it matches the assignment we + // have. + if &existing_assignment.producer == info { + return Ok(()); + } + + // This appears to be a re-registration, e.g., the producer + // changed its IP address. Re-register it with the collector to + // which it's already assigned. + let collector_id = existing_assignment.collector_id; + let collector_info = + inner.collectors.get(&collector_id).unwrap(); + let client = Client::new( + format!("http://{}", collector_info.address).as_str(), + self.log.clone(), + ); + client.producers_post(&info.into()).await.map_err(|e| { + HttpError::for_internal_error(e.to_string()) + })?; + ProducerAssignment { producer: info.clone(), collector_id } + } + }; + inner.producers.insert(info.id, assignment); + Ok(()) + } + + async fn register_collector( + &self, + info: OximeterInfo, + ) -> Result<(), HttpError> { + // If this is being registered again, send all its assignments again. + let mut inner = self.inner.lock().await; + if inner.collectors.insert(info.collector_id, info).is_some() { + let client = Client::new( + format!("http://{}", info.address).as_str(), + self.log.clone(), + ); + for producer_info in + inner.producers.values().filter_map(|assignment| { + if assignment.collector_id == info.collector_id { + Some(&assignment.producer) + } else { + None + } + }) + { + client.producers_post(&producer_info.into()).await.map_err( + |e| HttpError::for_internal_error(e.to_string()), + )?; + } + } + Ok(()) + } +} + +// Build the HTTP API of the fake Nexus for registration. +pub fn standalone_nexus_api() -> ApiDescription> { + let mut api = ApiDescription::new(); + api.register(cpapi_producers_post) + .expect("Could not register cpapi_producers_post API handler"); + api.register(cpapi_collectors_post) + .expect("Could not register cpapi_collectors_post API handler"); + api +} + +/// Accept a registration from a new metric producer +#[endpoint { + method = POST, + path = "/metrics/producers", + }] +async fn cpapi_producers_post( + request_context: RequestContext>, + producer_info: TypedBody, +) -> Result { + let context = request_context.context(); + let producer_info = producer_info.into_inner(); + context + .register_producer(&producer_info) + .await + .map(|_| HttpResponseUpdatedNoContent()) + .map_err(|e| HttpError::for_internal_error(e.to_string())) +} + +/// Accept a notification of a new oximeter collection server. +#[endpoint { + method = POST, + path = "/metrics/collectors", + }] +async fn cpapi_collectors_post( + request_context: RequestContext>, + oximeter_info: TypedBody, +) -> Result { + let context = request_context.context(); + let oximeter_info = oximeter_info.into_inner(); + context + .register_collector(oximeter_info) + .await + .map(|_| HttpResponseUpdatedNoContent()) + .map_err(|e| HttpError::for_internal_error(e.to_string())) +} + +/// A standalone Nexus server, with APIs only for registering metric collectors +/// and producers. +pub struct Server { + server: HttpServer>, +} + +impl Server { + /// Create a new server listening on the provided address. + pub fn new(address: SocketAddr, log_level: Level) -> Result { + let decorator = slog_term::TermDecorator::new().build(); + let drain = slog_term::FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).build().fuse(); + let drain = slog::LevelFilter::new(drain, log_level).fuse(); + let (drain, registration) = slog_dtrace::with_drain(drain); + let log = slog::Logger::root(drain.fuse(), o!(FileKv)); + if let slog_dtrace::ProbeRegistration::Failed(e) = registration { + let msg = format!("failed to register DTrace probes: {}", e); + error!(log, "{}", msg); + return Err(Error::Server(msg)); + } else { + debug!(log, "registered DTrace probes"); + } + + let nexus = Arc::new(StandaloneNexus::new( + log.new(slog::o!("component" => "nexus-standalone")), + )); + let server = HttpServerStarter::new( + &ConfigDropshot { bind_address: address, ..Default::default() }, + standalone_nexus_api(), + Arc::clone(&nexus), + &log, + ) + .map_err(|e| Error::Server(e.to_string()))? + .start(); + info!( + log, + "created standalone nexus server for metric collections"; + "address" => %address, + ); + Ok(Self { server }) + } + + pub fn log(&self) -> &Logger { + &self.server.app_private().log + } + + pub fn local_addr(&self) -> SocketAddr { + self.server.local_addr() + } +} diff --git a/oximeter/collector/tests/output/cmd-oximeter-noargs-stderr b/oximeter/collector/tests/output/cmd-oximeter-noargs-stderr index 7b736fe8a1..3f0fd4726d 100644 --- a/oximeter/collector/tests/output/cmd-oximeter-noargs-stderr +++ b/oximeter/collector/tests/output/cmd-oximeter-noargs-stderr @@ -3,9 +3,11 @@ See README.adoc for more information Usage: oximeter Commands: - openapi Print the external OpenAPI Spec document and exit - run Start an Oximeter server - help Print this message or the help of the given subcommand(s) + openapi Print the external OpenAPI Spec document and exit + run Start an Oximeter server + standalone Run `oximeter` in standalone mode for development + standalone-openapi Print the fake Nexus's standalone API + help Print this message or the help of the given subcommand(s) Options: -h, --help Print help diff --git a/oximeter/db/Cargo.toml b/oximeter/db/Cargo.toml index 77bce09db9..ad6d584b1b 100644 --- a/oximeter/db/Cargo.toml +++ b/oximeter/db/Cargo.toml @@ -25,7 +25,7 @@ thiserror.workspace = true tokio = { workspace = true, features = [ "rt-multi-thread", "macros" ] } usdt.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] itertools.workspace = true diff --git a/oximeter/instruments/Cargo.toml b/oximeter/instruments/Cargo.toml index 4adff0463a..3653ab8011 100644 --- a/oximeter/instruments/Cargo.toml +++ b/oximeter/instruments/Cargo.toml @@ -12,7 +12,7 @@ oximeter.workspace = true tokio.workspace = true http = { workspace = true, optional = true } uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true [features] default = ["http-instruments"] diff --git a/oximeter/oximeter-macro-impl/Cargo.toml b/oximeter/oximeter-macro-impl/Cargo.toml index ff116e1c9d..df9ed547ed 100644 --- a/oximeter/oximeter-macro-impl/Cargo.toml +++ b/oximeter/oximeter-macro-impl/Cargo.toml @@ -12,4 +12,4 @@ proc-macro = true proc-macro2.workspace = true quote.workspace = true syn = { workspace = true, features = [ "full", "extra-traits" ] } -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/oximeter/oximeter/Cargo.toml b/oximeter/oximeter/Cargo.toml index b2aa15f85e..7d01b8f8be 100644 --- a/oximeter/oximeter/Cargo.toml +++ b/oximeter/oximeter/Cargo.toml @@ -15,7 +15,7 @@ schemars = { workspace = true, features = [ "uuid1", "bytes", "chrono" ] } serde.workspace = true thiserror.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] approx.workspace = true diff --git a/oximeter/producer/Cargo.toml b/oximeter/producer/Cargo.toml index f171f57e8a..ef2f16c8ad 100644 --- a/oximeter/producer/Cargo.toml +++ b/oximeter/producer/Cargo.toml @@ -19,4 +19,8 @@ slog-dtrace.workspace = true tokio.workspace = true thiserror.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } +omicron-workspace-hack.workspace = true + +[dev-dependencies] +anyhow.workspace = true +clap.workspace = true diff --git a/oximeter/producer/examples/producer.rs b/oximeter/producer/examples/producer.rs index 9ff30032ca..dd9722c80a 100644 --- a/oximeter/producer/examples/producer.rs +++ b/oximeter/producer/examples/producer.rs @@ -6,14 +6,17 @@ // Copyright 2023 Oxide Computer Company +use anyhow::Context; use chrono::DateTime; use chrono::Utc; +use clap::Parser; use dropshot::ConfigDropshot; use dropshot::ConfigLogging; use dropshot::ConfigLoggingLevel; use dropshot::HandlerTaskMode; use omicron_common::api::internal::nexus::ProducerEndpoint; use oximeter::types::Cumulative; +use oximeter::types::ProducerRegistry; use oximeter::types::Sample; use oximeter::Metric; use oximeter::MetricsError; @@ -22,9 +25,22 @@ use oximeter::Target; use oximeter_producer::Config; use oximeter_producer::LogConfig; use oximeter_producer::Server; +use std::net::SocketAddr; use std::time::Duration; use uuid::Uuid; +/// Run an example oximeter metric producer. +#[derive(Parser)] +struct Args { + /// The address to use for the producer server. + #[arg(long, default_value = "[::1]:0")] + address: SocketAddr, + + /// The address of nexus at which to register. + #[arg(long, default_value = "[::1]:12221")] + nexus: SocketAddr, +} + /// Example target describing a virtual machine. #[derive(Debug, Clone, Target)] pub struct VirtualMachine { @@ -93,30 +109,29 @@ impl Producer for CpuBusyProducer { } #[tokio::main] -async fn main() { - let address = "[::1]:0".parse().unwrap(); +async fn main() -> anyhow::Result<()> { + let args = Args::parse(); let dropshot = ConfigDropshot { - bind_address: address, + bind_address: args.address, request_body_max_bytes: 2048, default_handler_task_mode: HandlerTaskMode::Detached, }; let log = LogConfig::Config(ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Debug, }); + let registry = ProducerRegistry::new(); + let producer = CpuBusyProducer::new(4); + registry.register_producer(producer).unwrap(); let server_info = ProducerEndpoint { - id: Uuid::new_v4(), - address, + id: registry.producer_id(), + address: args.address, base_route: "/collect".to_string(), interval: Duration::from_secs(10), }; - let config = Config { - server_info, - registration_address: "[::1]:12221".parse().unwrap(), - dropshot, - log, - }; - let server = Server::start(&config).await.unwrap(); - let producer = CpuBusyProducer::new(4); - server.registry().register_producer(producer).unwrap(); - server.serve_forever().await.unwrap(); + let config = + Config { server_info, registration_address: args.nexus, dropshot, log }; + let server = Server::with_registry(registry, &config) + .await + .context("failed to create producer")?; + server.serve_forever().await.context("server failed") } diff --git a/oximeter/producer/src/lib.rs b/oximeter/producer/src/lib.rs index 01910af8e8..2354f9c217 100644 --- a/oximeter/producer/src/lib.rs +++ b/oximeter/producer/src/lib.rs @@ -40,6 +40,9 @@ pub enum Error { #[error("Error registering as metric producer: {0}")] RegistrationError(String), + + #[error("Producer registry and config UUIDs do not match")] + UuidMismatch, } /// Either configuration for building a logger, or an actual logger already @@ -82,14 +85,59 @@ impl Server { /// Start a new metric server, registering it with the chosen endpoint, and listening for /// requests on the associated address and route. pub async fn start(config: &Config) -> Result { - // Clone mutably, as we may update the address after the server starts, see below. - let mut config = config.clone(); + Self::with_registry( + ProducerRegistry::with_id(config.server_info.id), + &config, + ) + .await + } + + /// Create a new metric producer server, with an existing registry. + pub async fn with_registry( + registry: ProducerRegistry, + config: &Config, + ) -> Result { + Self::new_impl( + registry, + config.server_info.clone(), + &config.registration_address, + &config.dropshot, + &config.log, + ) + .await + } + + /// Serve requests for metrics. + pub async fn serve_forever(self) -> Result<(), Error> { + self.server.await.map_err(Error::Server) + } + + /// Close the server + pub async fn close(self) -> Result<(), Error> { + self.server.close().await.map_err(Error::Server) + } + + /// Return the [`ProducerRegistry`] managed by this server. + /// + /// The registry is thread-safe and clonable, so the returned reference can be used throughout + /// an application to register types implementing the [`Producer`](oximeter::traits::Producer) + /// trait. The samples generated by the registered producers will be included in response to a + /// request on the collection endpoint. + pub fn registry(&self) -> &ProducerRegistry { + &self.registry + } + + /// Return the server's local listening address + pub fn address(&self) -> std::net::SocketAddr { + self.server.local_addr() + } + fn build_logger(log: &LogConfig) -> Result { // Build a logger, either using the configuration or actual logger // provided. First build the base logger from the configuration or a // clone of the provided logger, and then add the DTrace and Dropshot // loggers on top of it. - let base_logger = match config.log { + let base_logger = match log { LogConfig::Config(conf) => conf .to_logger("metric-server") .map_err(|msg| Error::Server(msg.to_string()))?, @@ -104,74 +152,64 @@ impl Server { } else { debug!(log, "registered DTrace probes"); } - let dropshot_log = log.new(o!("component" => "dropshot")); + Ok(log) + } - // Build the producer registry and server that uses it as its context. - let registry = ProducerRegistry::with_id(config.server_info.id); - let server = HttpServerStarter::new( - &config.dropshot, + fn build_dropshot_server( + log: &Logger, + registry: &ProducerRegistry, + dropshot: &ConfigDropshot, + ) -> Result, Error> { + let dropshot_log = log.new(o!("component" => "dropshot")); + HttpServerStarter::new( + dropshot, metric_server_api(), registry.clone(), &dropshot_log, ) - .map_err(|e| Error::Server(e.to_string()))? - .start(); - - // Client code may decide to assign a specific address and/or port, or to listen on any - // available address and port, assigned by the OS. For example, `[::1]:0` would assign any - // port on localhost. If needed, update the address in the `ProducerEndpoint` with the - // actual address the server has bound. - // - // TODO-robustness: Is there a better way to do this? We'd like to support users picking an - // exact address or using whatever's available. The latter is useful during tests or other - // situations in which we don't know which ports are available. - if config.server_info.address != server.local_addr() { - assert_eq!(config.server_info.address.port(), 0); + .map_err(|e| Error::Server(e.to_string())) + .map(HttpServerStarter::start) + } + + // Create a new server registering with Nexus. + async fn new_impl( + registry: ProducerRegistry, + mut server_info: ProducerEndpoint, + registration_address: &SocketAddr, + dropshot: &ConfigDropshot, + log: &LogConfig, + ) -> Result { + if registry.producer_id() != server_info.id { + return Err(Error::UuidMismatch); + } + let log = Self::build_logger(log)?; + let server = Self::build_dropshot_server(&log, ®istry, dropshot)?; + + // Update the producer endpoint address with the actual server's + // address, to handle cases where client listens on any available + // address. + if server_info.address != server.local_addr() { + assert_eq!(server_info.address.port(), 0); debug!( log, "Requested any available port, Dropshot server has been bound to {}", server.local_addr(), ); - config.server_info.address = server.local_addr(); + server_info.address = server.local_addr(); } debug!(log, "registering metric server as a producer"); - register(config.registration_address, &log, &config.server_info) - .await?; + register(*registration_address, &log, &server_info).await?; info!( log, - "starting oximeter metric server"; - "route" => config.server_info.collection_route(), + "starting oximeter metric producer server"; + "route" => server_info.collection_route(), "producer_id" => ?registry.producer_id(), - "address" => config.server_info.address, + "address" => server.local_addr(), + "interval" => ?server_info.interval, ); Ok(Self { registry, server }) } - - /// Serve requests for metrics. - pub async fn serve_forever(self) -> Result<(), Error> { - self.server.await.map_err(Error::Server) - } - - /// Close the server - pub async fn close(self) -> Result<(), Error> { - self.server.close().await.map_err(Error::Server) - } - - /// Return the [`ProducerRegistry`] managed by this server. - /// - /// The registry is thread-safe and clonable, so the returned reference can be used throughout - /// an application to register types implementing the [`Producer`](oximeter::traits::Producer) - /// trait. The samples generated by the registered producers will be included in response to a - /// request on the collection endpoint. - pub fn registry(&self) -> &ProducerRegistry { - &self.registry - } - - /// Return the server's local listening address - pub fn address(&self) -> std::net::SocketAddr { - self.server.local_addr() - } } // Register API endpoints of the `Server`. diff --git a/package-manifest.toml b/package-manifest.toml index c73af5ccf7..a7f8683eee 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -92,7 +92,8 @@ source.rust.binary_names = ["nexus", "schema-updater"] source.rust.release = true source.paths = [ { from = "/opt/ooce/pgsql-13/lib/amd64", to = "/opt/ooce/pgsql-13/lib/amd64" }, - { from = "smf/nexus", to = "/var/svc/manifest/site/nexus" }, + { from = "smf/nexus/manifest.xml", to = "/var/svc/manifest/site/nexus/manifest.xml" }, + { from = "smf/nexus/{{rack-topology}}", to = "/var/svc/manifest/site/nexus" }, { from = "out/console-assets", to = "/var/nexus/static" }, { from = "schema/crdb", to = "/var/nexus/schema/crdb" }, ] @@ -380,10 +381,10 @@ only_for_targets.image = "standard" # 3. Use source.type = "manual" instead of "prebuilt" source.type = "prebuilt" source.repo = "crucible" -source.commit = "aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source.commit = "20273bcca1fd5834ebc3e67dfa7020f0e99ad681" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible.sha256.txt -source.sha256 = "3845327bde9df585ee8771c85eefc3e63a48981f14298d5fca62f4f6fe25c917" +source.sha256 = "0671570dfed8bff8e64c42a41269d961426bdd07e72b9ca8c2e3f28e7ead3c1c" output.type = "zone" [package.crucible-pantry] @@ -391,10 +392,10 @@ service_name = "crucible_pantry" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "crucible" -source.commit = "aeb69dda26c7e1a8b6eada425670cd4b83f91c07" +source.commit = "20273bcca1fd5834ebc3e67dfa7020f0e99ad681" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible-pantry.sha256.txt -source.sha256 = "a3f2fc92d9ae184a66c402dfe33b1d1c128f356d6be70671de421be600d4064a" +source.sha256 = "c35cc24945d047f8d77e438ee606e6a83be64f0f97356fdc3308be716dcf3718" output.type = "zone" # Refer to @@ -405,10 +406,10 @@ service_name = "propolis-server" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "propolis" -source.commit = "de6369aa45a255f896da0a3ddd2b7152c036a4e9" +source.commit = "42c878b71a58d430dfc306126af5d40ca816d70f" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/propolis/image//propolis-server.sha256.txt -source.sha256 = "182597a153793096826992f499a94be54c746e346a3566802e1fe7e78b2ccf2f" +source.sha256 = "dce4d82bb936e990262abcaa279eee7e33a19930880b23f49fa3851cded18567" output.type = "zone" [package.maghemite] diff --git a/package/Cargo.toml b/package/Cargo.toml index 9fc4610020..b840938db0 100644 --- a/package/Cargo.toml +++ b/package/Cargo.toml @@ -34,7 +34,7 @@ tokio = { workspace = true, features = [ "full" ] } toml.workspace = true topological-sort.workspace = true walkdir.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/package/src/bin/omicron-package.rs b/package/src/bin/omicron-package.rs index a0146eee50..bc07b61234 100644 --- a/package/src/bin/omicron-package.rs +++ b/package/src/bin/omicron-package.rs @@ -41,6 +41,11 @@ enum SubCommand { Deploy(DeployCommand), } +fn parse_duration_ms(arg: &str) -> Result { + let ms = arg.parse()?; + Ok(std::time::Duration::from_millis(ms)) +} + #[derive(Debug, Parser)] #[clap(name = "packaging tool")] struct Args { @@ -77,6 +82,23 @@ struct Args { )] force: bool, + #[clap( + long, + help = "Number of retries to use when re-attempting failed package downloads", + action, + default_value_t = 10 + )] + retry_count: usize, + + #[clap( + long, + help = "Duration, in ms, to wait before re-attempting failed package downloads", + action, + value_parser = parse_duration_ms, + default_value = "1000", + )] + retry_duration: std::time::Duration, + #[clap(subcommand)] subcommand: SubCommand, } @@ -189,11 +211,12 @@ async fn do_target( format!("failed to create directory {}", target_dir.display()) })?; match subcommand { - TargetCommand::Create { image, machine, switch } => { + TargetCommand::Create { image, machine, switch, rack_topology } => { let target = KnownTarget::new( image.clone(), machine.clone(), switch.clone(), + rack_topology.clone(), )?; let path = get_single_target(&target_dir, name).await?; @@ -303,8 +326,63 @@ async fn get_sha256_digest(path: &PathBuf) -> Result { Ok(context.finish()) } +async fn download_prebuilt( + progress: &PackageProgress, + package_name: &str, + repo: &str, + commit: &str, + expected_digest: &Vec, + path: &Path, +) -> Result<()> { + progress.set_message("downloading prebuilt".into()); + let url = format!( + "https://buildomat.eng.oxide.computer/public/file/oxidecomputer/{}/image/{}/{}", + repo, + commit, + path.file_name().unwrap().to_string_lossy(), + ); + let response = reqwest::Client::new() + .get(&url) + .send() + .await + .with_context(|| format!("failed to get {url}"))?; + progress.set_length( + response + .content_length() + .ok_or_else(|| anyhow!("Missing Content Length"))?, + ); + let mut file = tokio::fs::File::create(&path) + .await + .with_context(|| format!("failed to create {path:?}"))?; + let mut stream = response.bytes_stream(); + let mut context = DigestContext::new(&SHA256); + while let Some(chunk) = stream.next().await { + let chunk = chunk + .with_context(|| format!("failed reading response from {url}"))?; + // Update the running SHA digest + context.update(&chunk); + // Update the downloaded file + file.write_all(&chunk) + .await + .with_context(|| format!("failed writing {path:?}"))?; + // Record progress in the UI + progress.increment(chunk.len().try_into().unwrap()); + } + + let digest = context.finish(); + if digest.as_ref() != expected_digest { + bail!( + "Digest mismatch downloading {package_name}: Saw {}, expected {}", + hex::encode(digest.as_ref()), + hex::encode(expected_digest) + ); + } + Ok(()) +} + // Ensures a package exists, either by creating it or downloading it. async fn get_package( + config: &Config, target: &Target, ui: &Arc, package_name: &String, @@ -328,45 +406,30 @@ async fn get_package( }; if should_download { - progress.set_message("downloading prebuilt".into()); - let url = format!( - "https://buildomat.eng.oxide.computer/public/file/oxidecomputer/{}/image/{}/{}", - repo, - commit, - path.as_path().file_name().unwrap().to_string_lossy(), - ); - let response = reqwest::Client::new() - .get(&url) - .send() - .await - .with_context(|| format!("failed to get {url}"))?; - progress.set_length( - response - .content_length() - .ok_or_else(|| anyhow!("Missing Content Length"))?, - ); - let mut file = tokio::fs::File::create(&path) + let mut attempts_left = config.retry_count + 1; + loop { + match download_prebuilt( + &progress, + package_name, + repo, + commit, + &expected_digest, + path.as_path(), + ) .await - .with_context(|| format!("failed to create {path:?}"))?; - let mut stream = response.bytes_stream(); - let mut context = DigestContext::new(&SHA256); - while let Some(chunk) = stream.next().await { - let chunk = chunk.with_context(|| { - format!("failed reading response from {url}") - })?; - // Update the running SHA digest - context.update(&chunk); - // Update the downloaded file - file.write_all(&chunk) - .await - .with_context(|| format!("failed writing {path:?}"))?; - // Record progress in the UI - progress.increment(chunk.len().try_into().unwrap()); - } - - let digest = context.finish(); - if digest.as_ref() != expected_digest { - bail!("Digest mismatch downloading {package_name}: Saw {}, expected {}", hex::encode(digest.as_ref()), hex::encode(expected_digest)); + { + Ok(()) => break, + Err(err) => { + attempts_left -= 1; + let msg = format!("Failed to download prebuilt ({attempts_left} attempts remaining)"); + progress.set_error_message(msg.into()); + if attempts_left == 0 { + bail!("Failed to download package: {err}"); + } + tokio::time::sleep(config.retry_duration).await; + progress.reset(); + } + } } } } @@ -463,6 +526,7 @@ async fn do_package(config: &Config, output_directory: &Path) -> Result<()> { None, |((package_name, package), ui)| async move { get_package( + &config, &config.target, &ui, package_name, @@ -761,6 +825,13 @@ fn completed_progress_style() -> ProgressStyle { .progress_chars("#>.") } +fn error_progress_style() -> ProgressStyle { + ProgressStyle::default_bar() + .template("[{elapsed_precise}] {bar:40.cyan/blue} {pos:>7}/{len:7} {msg:.red}") + .expect("Invalid template") + .progress_chars("#>.") +} + // Struct managing display of progress to UI. struct ProgressUI { multi: MultiProgress, @@ -782,10 +853,21 @@ impl PackageProgress { fn set_length(&self, total: u64) { self.pb.set_length(total); } + + fn set_error_message(&self, message: std::borrow::Cow<'static, str>) { + self.pb.set_style(error_progress_style()); + self.pb.set_message(format!("{}: {}", self.service_name, message)); + self.pb.tick(); + } + + fn reset(&self) { + self.pb.reset(); + } } impl Progress for PackageProgress { fn set_message(&self, message: std::borrow::Cow<'static, str>) { + self.pb.set_style(in_progress_style()); self.pb.set_message(format!("{}: {}", self.service_name, message)); self.pb.tick(); } @@ -820,6 +902,10 @@ struct Config { target: Target, // True if we should skip confirmations for destructive operations. force: bool, + // Number of times to retry failed downloads. + retry_count: usize, + // Duration to wait before retrying failed downloads. + retry_duration: std::time::Duration, } impl Config { @@ -886,6 +972,8 @@ async fn main() -> Result<()> { package_config, target, force: args.force, + retry_count: args.retry_count, + retry_duration: args.retry_duration, }) }; diff --git a/package/src/lib.rs b/package/src/lib.rs index b0cc04970a..395f3ed472 100644 --- a/package/src/lib.rs +++ b/package/src/lib.rs @@ -46,6 +46,29 @@ pub enum TargetCommand { #[clap(short, long, default_value_if("image", "standard", "stub"))] switch: Option, + + #[clap( + short, + long, + default_value_if("image", "trampoline", Some("single-sled")), + + // This opt is required, and clap will enforce that even with + // `required = false`, since it's not an Option. But the + // default_value_if only works if we set `required` to false. It's + // jank, but it is what it is. + // https://github.com/clap-rs/clap/issues/4086 + required = false + )] + /// Specify whether nexus will run in a single-sled or multi-sled + /// environment. + /// + /// Set single-sled for dev purposes when you're running a single + /// sled-agent. Set multi-sled if you're running with mulitple sleds. + /// Currently this only affects the crucible disk allocation strategy- + /// VM disks will require 3 distinct sleds with `multi-sled`, which will + /// fail in a single-sled environment. `single-sled` relaxes this + /// requirement. + rack_topology: crate::target::RackTopology, }, /// List all existing targets List, diff --git a/package/src/target.rs b/package/src/target.rs index a7b2dd4539..d5d5e92c46 100644 --- a/package/src/target.rs +++ b/package/src/target.rs @@ -48,12 +48,27 @@ pub enum Switch { SoftNpu, } +/// Topology of the sleds within the rack. +#[derive(Clone, Debug, strum::EnumString, strum::Display, ValueEnum)] +#[strum(serialize_all = "kebab-case")] +#[clap(rename_all = "kebab-case")] +pub enum RackTopology { + /// Use configurations suitable for a multi-sled deployment, such as dogfood + /// and production racks. + MultiSled, + + /// Use configurations suitable for a single-sled deployment, such as CI and + /// dev machines. + SingleSled, +} + /// A strongly-typed variant of [Target]. #[derive(Clone, Debug)] pub struct KnownTarget { image: Image, machine: Option, switch: Option, + rack_topology: RackTopology, } impl KnownTarget { @@ -61,6 +76,7 @@ impl KnownTarget { image: Image, machine: Option, switch: Option, + rack_topology: RackTopology, ) -> Result { if matches!(image, Image::Trampoline) { if machine.is_some() { @@ -77,7 +93,7 @@ impl KnownTarget { bail!("'switch=asic' is only valid with 'machine=gimlet'"); } - Ok(Self { image, machine, switch }) + Ok(Self { image, machine, switch, rack_topology }) } } @@ -87,6 +103,7 @@ impl Default for KnownTarget { image: Image::Standard, machine: Some(Machine::NonGimlet), switch: Some(Switch::Stub), + rack_topology: RackTopology::MultiSled, } } } @@ -101,6 +118,7 @@ impl From for Target { if let Some(switch) = kt.switch { map.insert("switch".to_string(), switch.to_string()); } + map.insert("rack-topology".to_string(), kt.rack_topology.to_string()); Target(map) } } @@ -121,6 +139,7 @@ impl std::str::FromStr for KnownTarget { let mut image = Self::default().image; let mut machine = None; let mut switch = None; + let mut rack_topology = None; for (k, v) in target.0.into_iter() { match k.as_str() { @@ -133,6 +152,9 @@ impl std::str::FromStr for KnownTarget { "switch" => { switch = Some(v.parse()?); } + "rack-topology" => { + rack_topology = Some(v.parse()?); + } _ => { bail!( "Unknown target key {k}\nValid keys include: [{}]", @@ -146,6 +168,11 @@ impl std::str::FromStr for KnownTarget { } } } - KnownTarget::new(image, machine, switch) + KnownTarget::new( + image, + machine, + switch, + rack_topology.unwrap_or(RackTopology::MultiSled), + ) } } diff --git a/passwords/Cargo.toml b/passwords/Cargo.toml index cbd569ef4c..8adcf75a2e 100644 --- a/passwords/Cargo.toml +++ b/passwords/Cargo.toml @@ -11,7 +11,7 @@ thiserror.workspace = true schemars.workspace = true serde.workspace = true serde_with.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] argon2alt = { package = "rust-argon2", version = "1.0" } diff --git a/rpaths/Cargo.toml b/rpaths/Cargo.toml index 7671be4968..45e6c9b925 100644 --- a/rpaths/Cargo.toml +++ b/rpaths/Cargo.toml @@ -5,4 +5,4 @@ edition = "2021" license = "MPL-2.0" [dependencies] -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/sled-agent/Cargo.toml b/sled-agent/Cargo.toml index b131698395..82d7411d1a 100644 --- a/sled-agent/Cargo.toml +++ b/sled-agent/Cargo.toml @@ -76,7 +76,7 @@ uuid.workspace = true zeroize.workspace = true zone.workspace = true static_assertions.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [target.'cfg(target_os = "illumos")'.dependencies] opte-ioctl.workspace = true @@ -95,6 +95,7 @@ serial_test.workspace = true subprocess.workspace = true slog-async.workspace = true slog-term.workspace = true +tempfile.workspace = true illumos-utils = { workspace = true, features = ["testing"] } @@ -119,3 +120,5 @@ machine-non-gimlet = [] switch-asic = [] switch-stub = [] switch-softnpu = [] +rack-topology-single-sled = [] +rack-topology-multi-sled = [] diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index 96cdf8222b..60f0965612 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -1513,6 +1513,9 @@ impl ServiceManager { .open(&config_path) .await .map_err(|err| Error::io_path(&config_path, err))?; + file.write_all(b"\n\n") + .await + .map_err(|err| Error::io_path(&config_path, err))?; file.write_all(config_str.as_bytes()) .await .map_err(|err| Error::io_path(&config_path, err))?; diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index e53295f823..42fff355a5 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -617,14 +617,8 @@ impl SledAgent { ..Default::default() }; let propolis_log = log.new(o!("component" => "propolis-server-mock")); - let config = propolis_server::config::Config { - bootrom: Default::default(), - pci_bridges: Default::default(), - chipset: Default::default(), - devices: Default::default(), - block_devs: Default::default(), - }; - let private = Arc::new(PropolisContext::new(config, propolis_log)); + let private = + Arc::new(PropolisContext::new(Default::default(), propolis_log)); info!(log, "Starting mock propolis-server..."); let dropshot_log = log.new(o!("component" => "dropshot")); let mock_api = propolis_server::mock_server::api(); diff --git a/sled-agent/src/zone_bundle.rs b/sled-agent/src/zone_bundle.rs index 2eeb8ebe7d..4c2d6a4113 100644 --- a/sled-agent/src/zone_bundle.rs +++ b/sled-agent/src/zone_bundle.rs @@ -899,9 +899,9 @@ async fn find_archived_log_files( continue; }; let fname = path.file_name().unwrap(); - if is_oxide_smf_log_file(fname) - && fname.contains(svc_name) - { + let is_oxide = is_oxide_smf_log_file(fname); + let contains = fname.contains(svc_name); + if is_oxide && contains { debug!( log, "found archived log file"; @@ -910,6 +910,14 @@ async fn find_archived_log_files( "path" => ?path, ); files.push(path); + } else { + debug!( + log, + "skipping non-matching log file"; + "filename" => fname, + "is_oxide_smf_log_file" => is_oxide, + "contains_svc_name" => contains, + ); } } Err(e) => { @@ -1764,6 +1772,7 @@ mod tests { #[cfg(all(target_os = "illumos", test))] mod illumos_tests { + use super::find_archived_log_files; use super::zfs_quota; use super::CleanupContext; use super::CleanupPeriod; @@ -1852,12 +1861,17 @@ mod illumos_tests { } } - async fn setup_fake_cleanup_task() -> anyhow::Result { + fn test_logger() -> Logger { let dec = slog_term::PlainSyncDecorator::new(slog_term::TestStdoutWriter); let drain = slog_term::FullFormat::new(dec).build().fuse(); let log = Logger::root(drain, slog::o!("component" => "fake-cleanup-task")); + log + } + + async fn setup_fake_cleanup_task() -> anyhow::Result { + let log = test_logger(); let context = CleanupContext::default(); let resource_wrapper = ResourceWrapper::new().await; let bundler = @@ -2279,4 +2293,49 @@ mod illumos_tests { let bytes = tokio::fs::metadata(&path).await?.len(); Ok(ZoneBundleInfo { metadata, path, bytes }) } + + #[tokio::test] + async fn test_find_archived_log_files() { + let log = test_logger(); + let tmpdir = tempfile::tempdir().expect("Failed to make tempdir"); + + let mut should_match = [ + "oxide-foo:default.log", + "oxide-foo:default.log.1000", + "system-illumos-foo:default.log", + "system-illumos-foo:default.log.100", + ]; + let should_not_match = [ + "oxide-foo:default", + "not-oxide-foo:default.log.1000", + "system-illumos-foo", + "not-system-illumos-foo:default.log.100", + ]; + for name in should_match.iter().chain(should_not_match.iter()) { + let path = tmpdir.path().join(name); + tokio::fs::File::create(path) + .await + .expect("failed to create dummy file"); + } + + let path = + Utf8PathBuf::try_from(tmpdir.path().as_os_str().to_str().unwrap()) + .unwrap(); + let mut files = find_archived_log_files( + &log, + "zone-name", // unused here, for logging only + "foo", + &[path], + ) + .await; + + // Sort everything to compare correctly. + should_match.sort(); + files.sort(); + assert_eq!(files.len(), should_match.len()); + assert!(files + .iter() + .zip(should_match.iter()) + .all(|(file, name)| { file.file_name().unwrap() == *name })); + } } diff --git a/sled-hardware/Cargo.toml b/sled-hardware/Cargo.toml index 880f93441c..14ae15996b 100644 --- a/sled-hardware/Cargo.toml +++ b/sled-hardware/Cargo.toml @@ -24,7 +24,7 @@ thiserror.workspace = true tofino.workspace = true tokio.workspace = true uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [target.'cfg(target_os = "illumos")'.dependencies] illumos-devinfo = { git = "https://github.com/oxidecomputer/illumos-devinfo", branch = "main" } diff --git a/smf/nexus/multi-sled/config-partial.toml b/smf/nexus/multi-sled/config-partial.toml new file mode 100644 index 0000000000..2dfee81d02 --- /dev/null +++ b/smf/nexus/multi-sled/config-partial.toml @@ -0,0 +1,45 @@ +# +# Oxide API: partial configuration file +# + +[console] +# Directory for static assets. Absolute path or relative to CWD. +static_dir = "/var/nexus/static" +session_idle_timeout_minutes = 60 +session_absolute_timeout_minutes = 480 + +[authn] +schemes_external = ["session_cookie", "access_token"] + +[log] +# Show log messages of this level and more severe +level = "debug" +mode = "file" +path = "/dev/stdout" +if_exists = "append" + +# TODO: Uncomment the following lines to enable automatic schema +# migration on boot. +# +# [schema] +# schema_dir = "/var/nexus/schema/crdb" + +[background_tasks] +dns_internal.period_secs_config = 60 +dns_internal.period_secs_servers = 60 +dns_internal.period_secs_propagation = 60 +dns_internal.max_concurrent_server_updates = 5 +dns_external.period_secs_config = 60 +dns_external.period_secs_servers = 60 +dns_external.period_secs_propagation = 60 +dns_external.max_concurrent_server_updates = 5 +# How frequently we check the list of stored TLS certificates. This is +# approximately an upper bound on how soon after updating the list of +# certificates it will take _other_ Nexus instances to notice and stop serving +# them (on a sunny day). +external_endpoints.period_secs = 60 + +[default_region_allocation_strategy] +# by default, allocate across 3 distinct sleds +# seed is omitted so a new seed will be chosen with every allocation. +type = "random_with_distinct_sleds" \ No newline at end of file diff --git a/smf/nexus/config-partial.toml b/smf/nexus/single-sled/config-partial.toml similarity index 86% rename from smf/nexus/config-partial.toml rename to smf/nexus/single-sled/config-partial.toml index b29727c4aa..aff0a8a25f 100644 --- a/smf/nexus/config-partial.toml +++ b/smf/nexus/single-sled/config-partial.toml @@ -38,3 +38,8 @@ dns_external.max_concurrent_server_updates = 5 # certificates it will take _other_ Nexus instances to notice and stop serving # them (on a sunny day). external_endpoints.period_secs = 60 + +[default_region_allocation_strategy] +# by default, allocate without requirement for distinct sleds. +# seed is omitted so a new seed will be chosen with every allocation. +type = "random" \ No newline at end of file diff --git a/sp-sim/Cargo.toml b/sp-sim/Cargo.toml index 2a1ae19468..07d956e41e 100644 --- a/sp-sim/Cargo.toml +++ b/sp-sim/Cargo.toml @@ -21,7 +21,7 @@ sprockets-rot.workspace = true thiserror.workspace = true tokio = { workspace = true, features = [ "full" ] } toml.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [[bin]] name = "sp-sim" diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index a0227a4de2..9e21f3ca12 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -25,7 +25,7 @@ usdt.workspace = true rcgen.workspace = true regex.workspace = true reqwest.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/tools/console_version b/tools/console_version index dba32c3e94..0c30c707e1 100644 --- a/tools/console_version +++ b/tools/console_version @@ -1,2 +1,2 @@ -COMMIT="af6536d587a17a65398407ca03d364345aa24342" -SHA2="00701652eb1e495fd22409dcdf74ebae2ba081529f65fb41c5ac3a2fef50a149" +COMMIT="0cc1e03a24b3f5da275d15b969978a385d6b3b27" +SHA2="46a186fc3bf919a3aa2871aeab8441e4a13ed134f912b5d76c7ff891fed66cee" diff --git a/tools/dvt_dock_version b/tools/dvt_dock_version index d7c2d31948..e2151b846f 100644 --- a/tools/dvt_dock_version +++ b/tools/dvt_dock_version @@ -1 +1 @@ -COMMIT=3cc151e62af190062780389eeae78937c3041021 +COMMIT=65f1979c1d3f4d0874a64144941cc41b46a70c80 diff --git a/tools/install_opte.sh b/tools/install_opte.sh index f670adf163..20a33b05a5 100755 --- a/tools/install_opte.sh +++ b/tools/install_opte.sh @@ -51,6 +51,26 @@ fi # Grab the version of the opte package to install OPTE_VERSION="$(cat "$OMICRON_TOP/tools/opte_version")" +OMICRON_FROZEN_PKG_COMMENT="OMICRON-PINNED-PACKAGE" + +# Once we install, we mark the package as frozen at that particular version. +# This makes sure that a `pkg update` won't automatically move us forward +# (and hence defeat the whole point of pinning). +# But this also prevents us from installig the next version so we must +# unfreeze first. +if PKG_FROZEN=$(pkg freeze | grep driver/network/opte); then + FROZEN_COMMENT=$(echo "$PKG_FROZEN" | awk '{ print $(NF) }') + + # Compare the comment to make sure this is indeed our previous doing + if [ "$FROZEN_COMMENT" != "$OMICRON_FROZEN_PKG_COMMENT" ]; then + echo "Found driver/network/opte previously frozen but not by us:" + echo $PKG_FROZEN + exit 1 + fi + + pfexec pkg unfreeze driver/network/opte +fi + # Actually install the xde kernel module and opteadm tool RC=0 pfexec pkg install -v pkg://helios-dev/driver/network/opte@"$OPTE_VERSION" || RC=$? @@ -63,6 +83,13 @@ else exit "$RC" fi +RC=0 +pfexec pkg freeze -c "$OMICRON_FROZEN_PKG_COMMENT" driver/network/opte@"$OPTE_VERSION" || RC=$? +if [[ "$RC" -ne 0 ]]; then + echo "Failed to pin opte package to $OPTE_VERSION" + exit $RC +fi + # Check the user's path RC=0 which opteadm > /dev/null || RC=$? diff --git a/tools/uninstall_opte.sh b/tools/uninstall_opte.sh index a833d029aa..c8ee0f5b28 100755 --- a/tools/uninstall_opte.sh +++ b/tools/uninstall_opte.sh @@ -165,6 +165,19 @@ function restore_xde_and_opte { fi } +function unfreeze_opte_pkg { + OMICRON_FROZEN_PKG_COMMENT="OMICRON-PINNED-PACKAGE" + + # If we've frozen a particular version, let's be good citizens + # and clear that as well. + if PKG_FROZEN=$(pkg freeze | grep driver/network/opte); then + FROZEN_COMMENT=$(echo "$PKG_FROZEN" | awk '{ print $(NF) }') + if [ "$FROZEN_COMMENT" == "$OMICRON_FROZEN_PKG_COMMENT" ]; then + pkg unfreeze driver/network/opte + fi + fi +} + function ensure_not_already_on_helios { local RC=0 pkg list "$STOCK_CONSOLIDATION"* || RC=$? @@ -179,5 +192,6 @@ uninstall_xde_and_opte for PUBLISHER in "${PUBLISHERS[@]}"; do remove_publisher "$PUBLISHER" done +unfreeze_opte_pkg ensure_not_already_on_helios to_stock_helios "$CONSOLIDATION" diff --git a/tufaceous-lib/Cargo.toml b/tufaceous-lib/Cargo.toml index 8b5c4fa7ca..bcfcee6b9c 100644 --- a/tufaceous-lib/Cargo.toml +++ b/tufaceous-lib/Cargo.toml @@ -32,7 +32,7 @@ toml.workspace = true tough.workspace = true url = "2.4.1" zip.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] omicron-test-utils.workspace = true diff --git a/tufaceous/Cargo.toml b/tufaceous/Cargo.toml index f3e3b815d2..e48513e24c 100644 --- a/tufaceous/Cargo.toml +++ b/tufaceous/Cargo.toml @@ -18,7 +18,7 @@ slog-async.workspace = true slog-envlogger.workspace = true slog-term.workspace = true tufaceous-lib.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] assert_cmd.workspace = true diff --git a/update-engine/Cargo.toml b/update-engine/Cargo.toml index 25ade83f34..af988bf091 100644 --- a/update-engine/Cargo.toml +++ b/update-engine/Cargo.toml @@ -21,7 +21,7 @@ schemars = { workspace = true, features = ["uuid1"] } slog.workspace = true tokio = { workspace = true, features = ["macros", "sync", "time", "rt-multi-thread"] } uuid.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] buf-list.workspace = true diff --git a/wicket-common/Cargo.toml b/wicket-common/Cargo.toml index 229561cd38..b87e742133 100644 --- a/wicket-common/Cargo.toml +++ b/wicket-common/Cargo.toml @@ -13,4 +13,4 @@ serde.workspace = true serde_json.workspace = true thiserror.workspace = true update-engine.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true diff --git a/wicket-dbg/Cargo.toml b/wicket-dbg/Cargo.toml index bc22424c69..e7e8a58468 100644 --- a/wicket-dbg/Cargo.toml +++ b/wicket-dbg/Cargo.toml @@ -22,7 +22,7 @@ wicket.workspace = true # used only by wicket-dbg binary reedline = "0.23.0" -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [[bin]] name = "wicket-dbg" diff --git a/wicket/Cargo.toml b/wicket/Cargo.toml index 58605c8037..5392e72e9f 100644 --- a/wicket/Cargo.toml +++ b/wicket/Cargo.toml @@ -46,7 +46,7 @@ omicron-passwords.workspace = true update-engine.workspace = true wicket-common.workspace = true wicketd-client.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [dev-dependencies] assert_cmd.workspace = true diff --git a/wicketd/Cargo.toml b/wicketd/Cargo.toml index 6df5e0e4e5..1044e1ff51 100644 --- a/wicketd/Cargo.toml +++ b/wicketd/Cargo.toml @@ -54,7 +54,7 @@ sled-hardware.workspace = true tufaceous-lib.workspace = true update-engine.workspace = true wicket-common.workspace = true -omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } +omicron-workspace-hack.workspace = true [[bin]] name = "wicketd" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index d3e00b1831..8854ef27bc 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -14,248 +14,260 @@ publish = false ### BEGIN HAKARI SECTION [dependencies] -anyhow = { version = "1", features = ["backtrace"] } -bit-set = { version = "0.5" } -bit-vec = { version = "0.6" } -bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1" } -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["serde"] } -bitvec = { version = "1" } -bstr-6f8ce4dd05d13bba = { package = "bstr", version = "0.2" } -bstr-dff4ba8e3ae991db = { package = "bstr", version = "1" } -bytes = { version = "1", features = ["serde"] } -chrono = { version = "0.4", features = ["alloc", "serde"] } -cipher = { version = "0.4", default-features = false, features = ["block-padding", "zeroize"] } -clap = { version = "4", features = ["derive", "env", "wrap_help"] } -clap_builder = { version = "4", default-features = false, features = ["color", "env", "std", "suggestions", "usage", "wrap_help"] } -console = { version = "0.15" } -const-oid = { version = "0.9", default-features = false, features = ["db", "std"] } -crossbeam-epoch = { version = "0.9" } -crossbeam-utils = { version = "0.8" } -crypto-common = { version = "0.1", default-features = false, features = ["getrandom", "std"] } -diesel = { version = "2", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } -digest = { version = "0.10", features = ["mac", "oid", "std"] } -either = { version = "1" } -flate2 = { version = "1" } -futures = { version = "0.3" } -futures-channel = { version = "0.3", features = ["sink"] } -futures-core = { version = "0.3" } -futures-io = { version = "0.3", default-features = false, features = ["std"] } -futures-sink = { version = "0.3" } -futures-task = { version = "0.3", default-features = false, features = ["std"] } -futures-util = { version = "0.3", features = ["channel", "io", "sink"] } +anyhow = { version = "1.0.75", features = ["backtrace"] } +bit-set = { version = "0.5.3" } +bit-vec = { version = "0.6.3" } +bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1.3.2" } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["serde"] } +bitvec = { version = "1.0.1" } +bstr-6f8ce4dd05d13bba = { package = "bstr", version = "0.2.17" } +bstr-dff4ba8e3ae991db = { package = "bstr", version = "1.6.0" } +bytes = { version = "1.5.0", features = ["serde"] } +chrono = { version = "0.4.31", features = ["alloc", "serde"] } +cipher = { version = "0.4.4", default-features = false, features = ["block-padding", "zeroize"] } +clap = { version = "4.4.3", features = ["derive", "env", "wrap_help"] } +clap_builder = { version = "4.4.2", default-features = false, features = ["color", "env", "std", "suggestions", "usage", "wrap_help"] } +console = { version = "0.15.7" } +const-oid = { version = "0.9.5", default-features = false, features = ["db", "std"] } +crossbeam-epoch = { version = "0.9.15" } +crossbeam-utils = { version = "0.8.16" } +crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } +diesel = { version = "2.1.1", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } +digest = { version = "0.10.7", features = ["mac", "oid", "std"] } +either = { version = "1.9.0" } +flate2 = { version = "1.0.27" } +futures = { version = "0.3.28" } +futures-channel = { version = "0.3.28", features = ["sink"] } +futures-core = { version = "0.3.28" } +futures-executor = { version = "0.3.28" } +futures-io = { version = "0.3.28", default-features = false, features = ["std"] } +futures-sink = { version = "0.3.28" } +futures-task = { version = "0.3.28", default-features = false, features = ["std"] } +futures-util = { version = "0.3.28", features = ["channel", "io", "sink"] } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "1e180ae55e56bd17af35cb868ffbd18ce487351d", features = ["std"] } -generic-array = { version = "0.14", default-features = false, features = ["more_lengths", "zeroize"] } -getrandom = { version = "0.2", default-features = false, features = ["js", "rdrand", "std"] } -hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14", features = ["raw"] } -hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13" } -hex = { version = "0.4", features = ["serde"] } -hyper = { version = "0.14", features = ["full"] } -indexmap = { version = "2", features = ["serde"] } -inout = { version = "0.1", default-features = false, features = ["std"] } -ipnetwork = { version = "0.20", features = ["schemars"] } -itertools = { version = "0.10" } -lalrpop-util = { version = "0.19" } -lazy_static = { version = "1", default-features = false, features = ["spin_no_std"] } -libc = { version = "0.2", features = ["extra_traits"] } -log = { version = "0.4", default-features = false, features = ["std"] } -managed = { version = "0.8", default-features = false, features = ["alloc", "map"] } -memchr = { version = "2" } -num-bigint = { version = "0.4", features = ["rand"] } -num-integer = { version = "0.1", features = ["i128"] } -num-iter = { version = "0.1", default-features = false, features = ["i128"] } -num-traits = { version = "0.2", features = ["i128", "libm"] } -openapiv3 = { version = "1", default-features = false, features = ["skip_serializing_defaults"] } -petgraph = { version = "0.6", features = ["serde-1"] } -postgres-types = { version = "0.2", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } -ppv-lite86 = { version = "0.2", default-features = false, features = ["simd", "std"] } -predicates = { version = "3" } -rand = { version = "0.8", features = ["min_const_gen"] } -rand_chacha = { version = "0.3" } -regex = { version = "1" } -regex-automata = { version = "0.3", default-features = false, features = ["dfa-onepass", "dfa-search", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } -regex-syntax = { version = "0.7" } -reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls", "stream"] } -ring = { version = "0.16", features = ["std"] } -schemars = { version = "0.8", features = ["bytes", "chrono", "uuid1"] } -semver = { version = "1", features = ["serde"] } -serde = { version = "1", features = ["alloc", "derive", "rc"] } -sha2 = { version = "0.10", features = ["oid"] } -signature = { version = "2", default-features = false, features = ["digest", "rand_core", "std"] } -similar = { version = "2", features = ["inline", "unicode"] } -slog = { version = "2", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } -spin = { version = "0.9" } -string_cache = { version = "0.8" } -subtle = { version = "2" } -syn-dff4ba8e3ae991db = { package = "syn", version = "1", features = ["extra-traits", "fold", "full", "visit"] } -syn-f595c2ba2a3f28df = { package = "syn", version = "2", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } -textwrap = { version = "0.16" } -time = { version = "0.3", features = ["formatting", "local-offset", "macros", "parsing"] } -tokio = { version = "1", features = ["full", "test-util"] } -tokio-postgres = { version = "0.7", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } -tokio-stream = { version = "0.1", features = ["net"] } -toml = { version = "0.7" } -toml_datetime = { version = "0.6", default-features = false, features = ["serde"] } -toml_edit = { version = "0.19", features = ["serde"] } -tracing = { version = "0.1", features = ["log"] } -trust-dns-proto = { version = "0.22" } -unicode-bidi = { version = "0.3" } -unicode-normalization = { version = "0.1" } -usdt = { version = "0.3" } -uuid = { version = "1", features = ["serde", "v4"] } -yasna = { version = "0.5", features = ["bit-vec", "num-bigint", "std", "time"] } -zeroize = { version = "1", features = ["std", "zeroize_derive"] } -zip = { version = "0.6", default-features = false, features = ["bzip2", "deflate"] } +generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } +getrandom = { version = "0.2.10", default-features = false, features = ["js", "rdrand", "std"] } +hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14.0", features = ["raw"] } +hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13.2" } +hashbrown-5ef9efb8ec2df382 = { package = "hashbrown", version = "0.12.3", features = ["raw"] } +hex = { version = "0.4.3", features = ["serde"] } +hyper = { version = "0.14.27", features = ["full"] } +indexmap-dff4ba8e3ae991db = { package = "indexmap", version = "1.9.3", default-features = false, features = ["serde-1", "std"] } +indexmap-f595c2ba2a3f28df = { package = "indexmap", version = "2.0.0", features = ["serde"] } +inout = { version = "0.1.3", default-features = false, features = ["std"] } +ipnetwork = { version = "0.20.0", features = ["schemars"] } +itertools = { version = "0.10.5" } +lalrpop-util = { version = "0.19.12" } +lazy_static = { version = "1.4.0", default-features = false, features = ["spin_no_std"] } +libc = { version = "0.2.148", features = ["extra_traits"] } +log = { version = "0.4.20", default-features = false, features = ["std"] } +managed = { version = "0.8.0", default-features = false, features = ["alloc", "map"] } +memchr = { version = "2.6.3" } +num-bigint = { version = "0.4.4", features = ["rand"] } +num-integer = { version = "0.1.45", features = ["i128"] } +num-iter = { version = "0.1.43", default-features = false, features = ["i128"] } +num-traits = { version = "0.2.16", features = ["i128", "libm"] } +openapiv3 = { version = "1.0.3", default-features = false, features = ["skip_serializing_defaults"] } +parking_lot = { version = "0.12.1", features = ["send_guard"] } +petgraph = { version = "0.6.4", features = ["serde-1"] } +phf_shared = { version = "0.11.2" } +postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } +ppv-lite86 = { version = "0.2.17", default-features = false, features = ["simd", "std"] } +predicates = { version = "3.0.3" } +rand = { version = "0.8.5", features = ["min_const_gen", "small_rng"] } +rand_chacha = { version = "0.3.1" } +regex = { version = "1.9.5" } +regex-automata = { version = "0.3.8", default-features = false, features = ["dfa-onepass", "dfa-search", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } +regex-syntax = { version = "0.7.5" } +reqwest = { version = "0.11.20", features = ["blocking", "json", "rustls-tls", "stream"] } +ring = { version = "0.16.20", features = ["std"] } +schemars = { version = "0.8.13", features = ["bytes", "chrono", "uuid1"] } +semver = { version = "1.0.18", features = ["serde"] } +serde = { version = "1.0.188", features = ["alloc", "derive", "rc"] } +sha2 = { version = "0.10.7", features = ["oid"] } +signature = { version = "2.1.0", default-features = false, features = ["digest", "rand_core", "std"] } +similar = { version = "2.2.1", features = ["inline", "unicode"] } +slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } +spin = { version = "0.9.8" } +string_cache = { version = "0.8.7" } +subtle = { version = "2.5.0" } +syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.32", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +textwrap = { version = "0.16.0" } +time = { version = "0.3.27", features = ["formatting", "local-offset", "macros", "parsing"] } +tokio = { version = "1.32.0", features = ["full", "test-util"] } +tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } +tokio-stream = { version = "0.1.14", features = ["net"] } +toml = { version = "0.7.8" } +tracing = { version = "0.1.37", features = ["log"] } +tracing-core = { version = "0.1.31" } +trust-dns-proto = { version = "0.22.0" } +unicode-bidi = { version = "0.3.13" } +unicode-normalization = { version = "0.1.22" } +usdt = { version = "0.3.5" } +uuid = { version = "1.4.1", features = ["serde", "v4"] } +yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } +zeroize = { version = "1.6.0", features = ["std", "zeroize_derive"] } +zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } [build-dependencies] -anyhow = { version = "1", features = ["backtrace"] } -bit-set = { version = "0.5" } -bit-vec = { version = "0.6" } -bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1" } -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["serde"] } -bitvec = { version = "1" } -bstr-6f8ce4dd05d13bba = { package = "bstr", version = "0.2" } -bstr-dff4ba8e3ae991db = { package = "bstr", version = "1" } -bytes = { version = "1", features = ["serde"] } -cc = { version = "1", default-features = false, features = ["parallel"] } -chrono = { version = "0.4", features = ["alloc", "serde"] } -cipher = { version = "0.4", default-features = false, features = ["block-padding", "zeroize"] } -clap = { version = "4", features = ["derive", "env", "wrap_help"] } -clap_builder = { version = "4", default-features = false, features = ["color", "env", "std", "suggestions", "usage", "wrap_help"] } -console = { version = "0.15" } -const-oid = { version = "0.9", default-features = false, features = ["db", "std"] } -crossbeam-epoch = { version = "0.9" } -crossbeam-utils = { version = "0.8" } -crypto-common = { version = "0.1", default-features = false, features = ["getrandom", "std"] } -diesel = { version = "2", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } -digest = { version = "0.10", features = ["mac", "oid", "std"] } -either = { version = "1" } -flate2 = { version = "1" } -futures = { version = "0.3" } -futures-channel = { version = "0.3", features = ["sink"] } -futures-core = { version = "0.3" } -futures-io = { version = "0.3", default-features = false, features = ["std"] } -futures-sink = { version = "0.3" } -futures-task = { version = "0.3", default-features = false, features = ["std"] } -futures-util = { version = "0.3", features = ["channel", "io", "sink"] } +anyhow = { version = "1.0.75", features = ["backtrace"] } +bit-set = { version = "0.5.3" } +bit-vec = { version = "0.6.3" } +bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1.3.2" } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["serde"] } +bitvec = { version = "1.0.1" } +bstr-6f8ce4dd05d13bba = { package = "bstr", version = "0.2.17" } +bstr-dff4ba8e3ae991db = { package = "bstr", version = "1.6.0" } +bytes = { version = "1.5.0", features = ["serde"] } +cc = { version = "1.0.83", default-features = false, features = ["parallel"] } +chrono = { version = "0.4.31", features = ["alloc", "serde"] } +cipher = { version = "0.4.4", default-features = false, features = ["block-padding", "zeroize"] } +clap = { version = "4.4.3", features = ["derive", "env", "wrap_help"] } +clap_builder = { version = "4.4.2", default-features = false, features = ["color", "env", "std", "suggestions", "usage", "wrap_help"] } +console = { version = "0.15.7" } +const-oid = { version = "0.9.5", default-features = false, features = ["db", "std"] } +crossbeam-epoch = { version = "0.9.15" } +crossbeam-utils = { version = "0.8.16" } +crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } +diesel = { version = "2.1.1", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } +digest = { version = "0.10.7", features = ["mac", "oid", "std"] } +either = { version = "1.9.0" } +flate2 = { version = "1.0.27" } +futures = { version = "0.3.28" } +futures-channel = { version = "0.3.28", features = ["sink"] } +futures-core = { version = "0.3.28" } +futures-executor = { version = "0.3.28" } +futures-io = { version = "0.3.28", default-features = false, features = ["std"] } +futures-sink = { version = "0.3.28" } +futures-task = { version = "0.3.28", default-features = false, features = ["std"] } +futures-util = { version = "0.3.28", features = ["channel", "io", "sink"] } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "1e180ae55e56bd17af35cb868ffbd18ce487351d", features = ["std"] } -generic-array = { version = "0.14", default-features = false, features = ["more_lengths", "zeroize"] } -getrandom = { version = "0.2", default-features = false, features = ["js", "rdrand", "std"] } -hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14", features = ["raw"] } -hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13" } -hex = { version = "0.4", features = ["serde"] } -hyper = { version = "0.14", features = ["full"] } -indexmap = { version = "2", features = ["serde"] } -inout = { version = "0.1", default-features = false, features = ["std"] } -ipnetwork = { version = "0.20", features = ["schemars"] } -itertools = { version = "0.10" } -lalrpop-util = { version = "0.19" } -lazy_static = { version = "1", default-features = false, features = ["spin_no_std"] } -libc = { version = "0.2", features = ["extra_traits"] } -log = { version = "0.4", default-features = false, features = ["std"] } -managed = { version = "0.8", default-features = false, features = ["alloc", "map"] } -memchr = { version = "2" } -num-bigint = { version = "0.4", features = ["rand"] } -num-integer = { version = "0.1", features = ["i128"] } -num-iter = { version = "0.1", default-features = false, features = ["i128"] } -num-traits = { version = "0.2", features = ["i128", "libm"] } -openapiv3 = { version = "1", default-features = false, features = ["skip_serializing_defaults"] } -petgraph = { version = "0.6", features = ["serde-1"] } -postgres-types = { version = "0.2", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } -ppv-lite86 = { version = "0.2", default-features = false, features = ["simd", "std"] } -predicates = { version = "3" } -rand = { version = "0.8", features = ["min_const_gen"] } -rand_chacha = { version = "0.3" } -regex = { version = "1" } -regex-automata = { version = "0.3", default-features = false, features = ["dfa-onepass", "dfa-search", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } -regex-syntax = { version = "0.7" } -reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls", "stream"] } -ring = { version = "0.16", features = ["std"] } -schemars = { version = "0.8", features = ["bytes", "chrono", "uuid1"] } -semver = { version = "1", features = ["serde"] } -serde = { version = "1", features = ["alloc", "derive", "rc"] } -sha2 = { version = "0.10", features = ["oid"] } -signature = { version = "2", default-features = false, features = ["digest", "rand_core", "std"] } -similar = { version = "2", features = ["inline", "unicode"] } -slog = { version = "2", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } -spin = { version = "0.9" } -string_cache = { version = "0.8" } -subtle = { version = "2" } -syn-dff4ba8e3ae991db = { package = "syn", version = "1", features = ["extra-traits", "fold", "full", "visit"] } -syn-f595c2ba2a3f28df = { package = "syn", version = "2", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } -textwrap = { version = "0.16" } -time = { version = "0.3", features = ["formatting", "local-offset", "macros", "parsing"] } -time-macros = { version = "0.2", default-features = false, features = ["formatting", "parsing"] } -tokio = { version = "1", features = ["full", "test-util"] } -tokio-postgres = { version = "0.7", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } -tokio-stream = { version = "0.1", features = ["net"] } -toml = { version = "0.7" } -toml_datetime = { version = "0.6", default-features = false, features = ["serde"] } -toml_edit = { version = "0.19", features = ["serde"] } -tracing = { version = "0.1", features = ["log"] } -trust-dns-proto = { version = "0.22" } -unicode-bidi = { version = "0.3" } -unicode-normalization = { version = "0.1" } -unicode-xid = { version = "0.2" } -usdt = { version = "0.3" } -uuid = { version = "1", features = ["serde", "v4"] } -yasna = { version = "0.5", features = ["bit-vec", "num-bigint", "std", "time"] } -zeroize = { version = "1", features = ["std", "zeroize_derive"] } -zip = { version = "0.6", default-features = false, features = ["bzip2", "deflate"] } +generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } +getrandom = { version = "0.2.10", default-features = false, features = ["js", "rdrand", "std"] } +hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14.0", features = ["raw"] } +hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13.2" } +hashbrown-5ef9efb8ec2df382 = { package = "hashbrown", version = "0.12.3", features = ["raw"] } +hex = { version = "0.4.3", features = ["serde"] } +hyper = { version = "0.14.27", features = ["full"] } +indexmap-dff4ba8e3ae991db = { package = "indexmap", version = "1.9.3", default-features = false, features = ["serde-1", "std"] } +indexmap-f595c2ba2a3f28df = { package = "indexmap", version = "2.0.0", features = ["serde"] } +inout = { version = "0.1.3", default-features = false, features = ["std"] } +ipnetwork = { version = "0.20.0", features = ["schemars"] } +itertools = { version = "0.10.5" } +lalrpop-util = { version = "0.19.12" } +lazy_static = { version = "1.4.0", default-features = false, features = ["spin_no_std"] } +libc = { version = "0.2.148", features = ["extra_traits"] } +log = { version = "0.4.20", default-features = false, features = ["std"] } +managed = { version = "0.8.0", default-features = false, features = ["alloc", "map"] } +memchr = { version = "2.6.3" } +num-bigint = { version = "0.4.4", features = ["rand"] } +num-integer = { version = "0.1.45", features = ["i128"] } +num-iter = { version = "0.1.43", default-features = false, features = ["i128"] } +num-traits = { version = "0.2.16", features = ["i128", "libm"] } +openapiv3 = { version = "1.0.3", default-features = false, features = ["skip_serializing_defaults"] } +parking_lot = { version = "0.12.1", features = ["send_guard"] } +petgraph = { version = "0.6.4", features = ["serde-1"] } +phf_shared = { version = "0.11.2" } +postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } +ppv-lite86 = { version = "0.2.17", default-features = false, features = ["simd", "std"] } +predicates = { version = "3.0.3" } +rand = { version = "0.8.5", features = ["min_const_gen", "small_rng"] } +rand_chacha = { version = "0.3.1" } +regex = { version = "1.9.5" } +regex-automata = { version = "0.3.8", default-features = false, features = ["dfa-onepass", "dfa-search", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } +regex-syntax = { version = "0.7.5" } +reqwest = { version = "0.11.20", features = ["blocking", "json", "rustls-tls", "stream"] } +ring = { version = "0.16.20", features = ["std"] } +schemars = { version = "0.8.13", features = ["bytes", "chrono", "uuid1"] } +semver = { version = "1.0.18", features = ["serde"] } +serde = { version = "1.0.188", features = ["alloc", "derive", "rc"] } +sha2 = { version = "0.10.7", features = ["oid"] } +signature = { version = "2.1.0", default-features = false, features = ["digest", "rand_core", "std"] } +similar = { version = "2.2.1", features = ["inline", "unicode"] } +slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } +spin = { version = "0.9.8" } +string_cache = { version = "0.8.7" } +subtle = { version = "2.5.0" } +syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.32", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +textwrap = { version = "0.16.0" } +time = { version = "0.3.27", features = ["formatting", "local-offset", "macros", "parsing"] } +time-macros = { version = "0.2.13", default-features = false, features = ["formatting", "parsing"] } +tokio = { version = "1.32.0", features = ["full", "test-util"] } +tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } +tokio-stream = { version = "0.1.14", features = ["net"] } +toml = { version = "0.7.8" } +tracing = { version = "0.1.37", features = ["log"] } +tracing-core = { version = "0.1.31" } +trust-dns-proto = { version = "0.22.0" } +unicode-bidi = { version = "0.3.13" } +unicode-normalization = { version = "0.1.22" } +unicode-xid = { version = "0.2.4" } +usdt = { version = "0.3.5" } +uuid = { version = "1.4.1", features = ["serde", "v4"] } +yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } +zeroize = { version = "1.6.0", features = ["std", "zeroize_derive"] } +zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } [target.x86_64-unknown-linux-gnu.dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24" } -mio = { version = "0.8", features = ["net", "os-ext"] } -once_cell = { version = "1", features = ["unstable"] } -rustix = { version = "0.38", features = ["fs", "termios"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.1" } +mio = { version = "0.8.8", features = ["net", "os-ext"] } +once_cell = { version = "1.18.0", features = ["unstable"] } +rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.x86_64-unknown-linux-gnu.build-dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24" } -mio = { version = "0.8", features = ["net", "os-ext"] } -once_cell = { version = "1", features = ["unstable"] } -rustix = { version = "0.38", features = ["fs", "termios"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.1" } +mio = { version = "0.8.8", features = ["net", "os-ext"] } +once_cell = { version = "1.18.0", features = ["unstable"] } +rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.x86_64-apple-darwin.dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24" } -mio = { version = "0.8", features = ["net", "os-ext"] } -once_cell = { version = "1", features = ["unstable"] } -rustix = { version = "0.38", features = ["fs", "termios"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.1" } +mio = { version = "0.8.8", features = ["net", "os-ext"] } +once_cell = { version = "1.18.0", features = ["unstable"] } +rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.x86_64-apple-darwin.build-dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24" } -mio = { version = "0.8", features = ["net", "os-ext"] } -once_cell = { version = "1", features = ["unstable"] } -rustix = { version = "0.38", features = ["fs", "termios"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.1" } +mio = { version = "0.8.8", features = ["net", "os-ext"] } +once_cell = { version = "1.18.0", features = ["unstable"] } +rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.aarch64-apple-darwin.dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24" } -mio = { version = "0.8", features = ["net", "os-ext"] } -once_cell = { version = "1", features = ["unstable"] } -rustix = { version = "0.38", features = ["fs", "termios"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.1" } +mio = { version = "0.8.8", features = ["net", "os-ext"] } +once_cell = { version = "1.18.0", features = ["unstable"] } +rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.aarch64-apple-darwin.build-dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24" } -mio = { version = "0.8", features = ["net", "os-ext"] } -once_cell = { version = "1", features = ["unstable"] } -rustix = { version = "0.38", features = ["fs", "termios"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.1" } +mio = { version = "0.8.8", features = ["net", "os-ext"] } +once_cell = { version = "1.18.0", features = ["unstable"] } +rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.x86_64-unknown-illumos.dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24" } -mio = { version = "0.8", features = ["net", "os-ext"] } -once_cell = { version = "1", features = ["unstable"] } -rustix = { version = "0.38", features = ["fs", "termios"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.1" } +mio = { version = "0.8.8", features = ["net", "os-ext"] } +once_cell = { version = "1.18.0", features = ["unstable"] } +rustix = { version = "0.38.9", features = ["fs", "termios"] } +toml_datetime = { version = "0.6.3", default-features = false, features = ["serde"] } +toml_edit = { version = "0.19.15", features = ["serde"] } [target.x86_64-unknown-illumos.build-dependencies] -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2", default-features = false, features = ["std"] } -hyper-rustls = { version = "0.24" } -mio = { version = "0.8", features = ["net", "os-ext"] } -once_cell = { version = "1", features = ["unstable"] } -rustix = { version = "0.38", features = ["fs", "termios"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +hyper-rustls = { version = "0.24.1" } +mio = { version = "0.8.8", features = ["net", "os-ext"] } +once_cell = { version = "1.18.0", features = ["unstable"] } +rustix = { version = "0.38.9", features = ["fs", "termios"] } +toml_datetime = { version = "0.6.3", default-features = false, features = ["serde"] } +toml_edit = { version = "0.19.15", features = ["serde"] } ### END HAKARI SECTION