From 9ef3f328a1331970492070d87a385f9f246ee251 Mon Sep 17 00:00:00 2001 From: Alan Hanson Date: Thu, 16 Nov 2023 07:47:35 -0800 Subject: [PATCH 01/56] Update Crucible (51a3121) and Propolis (8ad2d4f) (#4499) Crucible changes: test-crudd can collect more info, test_up can be gentle (#997) Decrypt without holding the downstairs lock (#1021) Add raw file backend (#991) Don't hold the Downstairs lock while doing encryption (#1019) Antagonize the Crucible Agent (#1011) The Pantry should reject non-block sized writes (#1013) Propolis changes: make headroom for linux virtio/9p client impl (#565) Guarantee Tokio access for Entity methods --------- Co-authored-by: Alan Hanson --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 12 ++++++------ package-manifest.toml | 12 ++++++------ 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f98f7c06ba..58d0653728 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -447,7 +447,7 @@ dependencies = [ [[package]] name = "bhyve_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=5ed82315541271e2734746a9ca79e39f35c12283#5ed82315541271e2734746a9ca79e39f35c12283" +source = "git+https://github.com/oxidecomputer/propolis?rev=54398875a2125227d13827d4236dce943c019b1c#54398875a2125227d13827d4236dce943c019b1c" dependencies = [ "bhyve_api_sys", "libc", @@ -457,7 +457,7 @@ dependencies = [ [[package]] name = "bhyve_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=5ed82315541271e2734746a9ca79e39f35c12283#5ed82315541271e2734746a9ca79e39f35c12283" +source = "git+https://github.com/oxidecomputer/propolis?rev=54398875a2125227d13827d4236dce943c019b1c#54398875a2125227d13827d4236dce943c019b1c" dependencies = [ "libc", "strum", @@ -1270,7 +1270,7 @@ dependencies = [ [[package]] name = "crucible-agent-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=da534e73380f3cc53ca0de073e1ea862ae32109b#da534e73380f3cc53ca0de073e1ea862ae32109b" +source = "git+https://github.com/oxidecomputer/crucible?rev=51a3121c8318fc7ac97d74f917ce1d37962e785f#51a3121c8318fc7ac97d74f917ce1d37962e785f" dependencies = [ "anyhow", "chrono", @@ -1286,7 +1286,7 @@ dependencies = [ [[package]] name = "crucible-pantry-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=da534e73380f3cc53ca0de073e1ea862ae32109b#da534e73380f3cc53ca0de073e1ea862ae32109b" +source = "git+https://github.com/oxidecomputer/crucible?rev=51a3121c8318fc7ac97d74f917ce1d37962e785f#51a3121c8318fc7ac97d74f917ce1d37962e785f" dependencies = [ "anyhow", "chrono", @@ -1303,7 +1303,7 @@ dependencies = [ [[package]] name = "crucible-smf" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/crucible?rev=da534e73380f3cc53ca0de073e1ea862ae32109b#da534e73380f3cc53ca0de073e1ea862ae32109b" +source = "git+https://github.com/oxidecomputer/crucible?rev=51a3121c8318fc7ac97d74f917ce1d37962e785f#51a3121c8318fc7ac97d74f917ce1d37962e785f" dependencies = [ "crucible-workspace-hack", "libc", @@ -6095,7 +6095,7 @@ dependencies = [ [[package]] name = "propolis-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=5ed82315541271e2734746a9ca79e39f35c12283#5ed82315541271e2734746a9ca79e39f35c12283" +source = "git+https://github.com/oxidecomputer/propolis?rev=54398875a2125227d13827d4236dce943c019b1c#54398875a2125227d13827d4236dce943c019b1c" dependencies = [ "async-trait", "base64 0.21.5", @@ -6116,7 +6116,7 @@ dependencies = [ [[package]] name = "propolis-mock-server" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=5ed82315541271e2734746a9ca79e39f35c12283#5ed82315541271e2734746a9ca79e39f35c12283" +source = "git+https://github.com/oxidecomputer/propolis?rev=54398875a2125227d13827d4236dce943c019b1c#54398875a2125227d13827d4236dce943c019b1c" dependencies = [ "anyhow", "atty", @@ -6146,7 +6146,7 @@ dependencies = [ [[package]] name = "propolis_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=5ed82315541271e2734746a9ca79e39f35c12283#5ed82315541271e2734746a9ca79e39f35c12283" +source = "git+https://github.com/oxidecomputer/propolis?rev=54398875a2125227d13827d4236dce943c019b1c#54398875a2125227d13827d4236dce943c019b1c" dependencies = [ "schemars", "serde", diff --git a/Cargo.toml b/Cargo.toml index c51ac069a9..82bca496a5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -169,9 +169,9 @@ cookie = "0.16" criterion = { version = "0.5.1", features = [ "async_tokio" ] } crossbeam = "0.8" crossterm = { version = "0.27.0", features = ["event-stream"] } -crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "da534e73380f3cc53ca0de073e1ea862ae32109b" } -crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "da534e73380f3cc53ca0de073e1ea862ae32109b" } -crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "da534e73380f3cc53ca0de073e1ea862ae32109b" } +crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "51a3121c8318fc7ac97d74f917ce1d37962e785f" } +crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "51a3121c8318fc7ac97d74f917ce1d37962e785f" } +crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "51a3121c8318fc7ac97d74f917ce1d37962e785f" } curve25519-dalek = "4" datatest-stable = "0.2.3" display-error-chain = "0.2.0" @@ -290,9 +290,9 @@ pretty-hex = "0.3.0" proc-macro2 = "1.0" progenitor = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } progenitor-client = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } -bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "5ed82315541271e2734746a9ca79e39f35c12283" } -propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "5ed82315541271e2734746a9ca79e39f35c12283" } -propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "5ed82315541271e2734746a9ca79e39f35c12283" } +bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "54398875a2125227d13827d4236dce943c019b1c" } +propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "54398875a2125227d13827d4236dce943c019b1c" } +propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "54398875a2125227d13827d4236dce943c019b1c" } proptest = "1.3.1" quote = "1.0" rand = "0.8.5" diff --git a/package-manifest.toml b/package-manifest.toml index 61c90a3e75..c65c5b6933 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -384,10 +384,10 @@ only_for_targets.image = "standard" # 3. Use source.type = "manual" instead of "prebuilt" source.type = "prebuilt" source.repo = "crucible" -source.commit = "da534e73380f3cc53ca0de073e1ea862ae32109b" +source.commit = "51a3121c8318fc7ac97d74f917ce1d37962e785f" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible.sha256.txt -source.sha256 = "572ac3b19e51b4e476266a62c2b7e06eff81c386cb48247c4b9f9b1e2ee81895" +source.sha256 = "897d0fd6c0b82db42256a63a13c228152e1117434afa2681f649b291e3c6f46d" output.type = "zone" [package.crucible-pantry] @@ -395,10 +395,10 @@ service_name = "crucible_pantry" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "crucible" -source.commit = "da534e73380f3cc53ca0de073e1ea862ae32109b" +source.commit = "51a3121c8318fc7ac97d74f917ce1d37962e785f" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible-pantry.sha256.txt -source.sha256 = "812269958e18f54d72bc10bb4fb81f26c084cf762da7fd98e63d58c689be9ad1" +source.sha256 = "fe545de7ac4f15454d7827927149c5f0fc68ce9545b4f1ef96aac9ac8039805a" output.type = "zone" # Refer to @@ -409,10 +409,10 @@ service_name = "propolis-server" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "propolis" -source.commit = "4019eb10fc2f4ba9bf210d0461dc6292b68309c2" +source.commit = "54398875a2125227d13827d4236dce943c019b1c" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/propolis/image//propolis-server.sha256.txt -source.sha256 = "aa1d9dc5c9117c100f9636901e8eec6679d7dfbf869c46b7f2873585f94a1b89" +source.sha256 = "01b8563db6626f90ee3fb6d97e7921b0a680373d843c1bea7ebf46fcea4f7b28" output.type = "zone" [package.mg-ddm-gz] From ca9d90a79c2a7a9ffdca12a705d007d7f28ce61f Mon Sep 17 00:00:00 2001 From: "oxide-reflector-bot[bot]" <130185838+oxide-reflector-bot[bot]@users.noreply.github.com> Date: Thu, 16 Nov 2023 17:02:11 +0000 Subject: [PATCH 02/56] Update maghemite to `12b392b` (#4505) --- package-manifest.toml | 12 ++++++------ tools/maghemite_ddm_openapi_version | 2 +- tools/maghemite_mg_openapi_version | 4 ++-- tools/maghemite_mgd_checksums | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/package-manifest.toml b/package-manifest.toml index c65c5b6933..f320215a13 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -425,10 +425,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "aefdfd3a57e5ca1949d4a913b8e35ce8cd7dfa8b" +source.commit = "12b392be94ff93abc3017bf2610a3b18e2174a2d" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//maghemite.sha256.txt -source.sha256 = "d871406ed926571efebdab248de08d4f1ca6c31d4f9a691ce47b186474165c57" +source.sha256 = "38851c79c85d53e997db748520fb27c82299ce7e58a550e35646a548498f1271" output.type = "tarball" [package.mg-ddm] @@ -441,10 +441,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "aefdfd3a57e5ca1949d4a913b8e35ce8cd7dfa8b" +source.commit = "12b392be94ff93abc3017bf2610a3b18e2174a2d" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt -source.sha256 = "85ec05a8726989b5cb0a567de6b0855f6f84b6f3409ac99ccaf372be5821e45d" +source.sha256 = "8cd94e9a6f6175081ce78f0281085a08a5306cde453d8e21deb28050945b1d88" output.type = "zone" output.intermediate_only = true @@ -456,10 +456,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "aefdfd3a57e5ca1949d4a913b8e35ce8cd7dfa8b" +source.commit = "12b392be94ff93abc3017bf2610a3b18e2174a2d" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt -source.sha256 = "aa7241cd35976f28f25aaf3ce2ce2af14dae1da9d67585c7de3b724dbcc55e60" +source.sha256 = "c4a7a626c84a28de3d2c6bfd85592bda2abad8cf5b41b2ce90b9c03904ccd3df" output.type = "zone" output.intermediate_only = true diff --git a/tools/maghemite_ddm_openapi_version b/tools/maghemite_ddm_openapi_version index 40db886f69..76bdb9ca92 100644 --- a/tools/maghemite_ddm_openapi_version +++ b/tools/maghemite_ddm_openapi_version @@ -1,2 +1,2 @@ -COMMIT="aefdfd3a57e5ca1949d4a913b8e35ce8cd7dfa8b" +COMMIT="12b392be94ff93abc3017bf2610a3b18e2174a2d" SHA2="9737906555a60911636532f00f1dc2866dc7cd6553beb106e9e57beabad41cdf" diff --git a/tools/maghemite_mg_openapi_version b/tools/maghemite_mg_openapi_version index ad88fef13e..d6d1788cbc 100644 --- a/tools/maghemite_mg_openapi_version +++ b/tools/maghemite_mg_openapi_version @@ -1,2 +1,2 @@ -COMMIT="aefdfd3a57e5ca1949d4a913b8e35ce8cd7dfa8b" -SHA2="b3f55fe24e54530fdf96c22a033f9edc0bad9c0a5e3344763a23e52b251d5113" +COMMIT="12b392be94ff93abc3017bf2610a3b18e2174a2d" +SHA2="6c1fab8d5028b52a161d8bf02aae47844699cdc5f7b28e1ac519fc4ec1ab3971" diff --git a/tools/maghemite_mgd_checksums b/tools/maghemite_mgd_checksums index 7c1644b031..9657147159 100644 --- a/tools/maghemite_mgd_checksums +++ b/tools/maghemite_mgd_checksums @@ -1,2 +1,2 @@ -CIDL_SHA256="aa7241cd35976f28f25aaf3ce2ce2af14dae1da9d67585c7de3b724dbcc55e60" -MGD_LINUX_SHA256="a39387c361ff2c2d0701d66c00b10e43c72fb5ddd1a5900b59ecccb832c80731" \ No newline at end of file +CIDL_SHA256="c4a7a626c84a28de3d2c6bfd85592bda2abad8cf5b41b2ce90b9c03904ccd3df" +MGD_LINUX_SHA256="81231b30872fa1c581aa22c101f32d11f33f335758ac1fd2653436fbc7aab93f" \ No newline at end of file From 2585b88d3b74976792d188fea6e79f4badd20077 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 17 Nov 2023 09:06:58 -0800 Subject: [PATCH 03/56] [db-queries] Avoid interactive transaction for all authz checks (#4506) While working on https://github.com/oxidecomputer/customer-support/issues/46 , I noticed that we perform an interactive transaction for every authz check perfomed by the database. This PR re-writes this code to avoid using an interactive transaction altogether. --- nexus/db-queries/src/db/datastore/role.rs | 69 +++++++++++------------ 1 file changed, 34 insertions(+), 35 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/role.rs b/nexus/db-queries/src/db/datastore/role.rs index b2ad441475..3a57ffc44c 100644 --- a/nexus/db-queries/src/db/datastore/role.rs +++ b/nexus/db-queries/src/db/datastore/role.rs @@ -127,7 +127,8 @@ impl DataStore { resource_type: ResourceType, resource_id: Uuid, ) -> Result, Error> { - use db::schema::role_assignment::dsl; + use db::schema::role_assignment::dsl as role_dsl; + use db::schema::silo_group_membership::dsl as group_dsl; // There is no resource-specific authorization check because all // authenticated users need to be able to list their own roles -- @@ -140,41 +141,39 @@ impl DataStore { // into some hurt by assigning loads of roles to someone and having that // person attempt to access anything. - self.pool_connection_authorized(opctx).await? - .transaction_async(|conn| async move { - let mut role_assignments = dsl::role_assignment - .filter(dsl::identity_type.eq(identity_type.clone())) - .filter(dsl::identity_id.eq(identity_id)) - .filter(dsl::resource_type.eq(resource_type.to_string())) - .filter(dsl::resource_id.eq(resource_id)) - .select(RoleAssignment::as_select()) - .load_async::(&conn) - .await?; - - // Return the roles that a silo user has from their group memberships - if identity_type == IdentityType::SiloUser { - use db::schema::silo_group_membership; - - let mut group_role_assignments = dsl::role_assignment - .filter(dsl::identity_type.eq(IdentityType::SiloGroup)) - .filter(dsl::identity_id.eq_any( - silo_group_membership::dsl::silo_group_membership - .filter(silo_group_membership::dsl::silo_user_id.eq(identity_id)) - .select(silo_group_membership::dsl::silo_group_id) - )) - .filter(dsl::resource_type.eq(resource_type.to_string())) - .filter(dsl::resource_id.eq(resource_id)) - .select(RoleAssignment::as_select()) - .load_async::(&conn) - .await?; - - role_assignments.append(&mut group_role_assignments); - } + let direct_roles_query = role_dsl::role_assignment + .filter(role_dsl::identity_type.eq(identity_type.clone())) + .filter(role_dsl::identity_id.eq(identity_id)) + .filter(role_dsl::resource_type.eq(resource_type.to_string())) + .filter(role_dsl::resource_id.eq(resource_id)) + .select(RoleAssignment::as_select()); + + let roles_from_groups_query = role_dsl::role_assignment + .filter(role_dsl::identity_type.eq(IdentityType::SiloGroup)) + .filter( + role_dsl::identity_id.eq_any( + group_dsl::silo_group_membership + .filter(group_dsl::silo_user_id.eq(identity_id)) + .select(group_dsl::silo_group_id), + ), + ) + .filter(role_dsl::resource_type.eq(resource_type.to_string())) + .filter(role_dsl::resource_id.eq(resource_id)) + .select(RoleAssignment::as_select()); - Ok(role_assignments) - }) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + let conn = self.pool_connection_authorized(opctx).await?; + if identity_type == IdentityType::SiloUser { + direct_roles_query + .union(roles_from_groups_query) + .load_async::(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } else { + direct_roles_query + .load_async::(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } } /// Fetches all of the externally-visible role assignments for the specified From 781ed1236489bb536506fb03ba1be39b34ed81a5 Mon Sep 17 00:00:00 2001 From: Kyle Simpson Date: Fri, 17 Nov 2023 17:30:02 +0000 Subject: [PATCH 04/56] Update `how-to-run.adoc` post-BGP (#4404) Small edits needed to bring this in line with how routes are configured now. `gateway_ip` is no longer explicitly named as such, so I've added an explicit comment to signpost how $GATEWAY_IP should be used w.r.t. the default route. --- docs/how-to-run.adoc | 40 +++++++++++++++++++++++++++++++++------- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/docs/how-to-run.adoc b/docs/how-to-run.adoc index 04d274da8b..f6d780ad72 100644 --- a/docs/how-to-run.adoc +++ b/docs/how-to-run.adoc @@ -266,26 +266,52 @@ last = "192.168.1.29" This is a range of IP addresses on your external network that Omicron can assign to externally-facing services (like DNS and the API). You'll need to change these if you've picked different addresses for your external network. See <<_external_networking>> above for more on this. +You will also need to update route information if your `$GATEWAY_IP` differs from the default. +The below example demonstrates a single static gateway route; in-depth explanations for testing with BGP can be found https://docs.oxide.computer/guides/system/network-preparations#_rack_switch_configuration_with_bgp[in the Network Preparations guide] and https://docs.oxide.computer/guides/operator/configuring-bgp[the Configuring BGP guide]: + [source,toml] ---- # Configuration to bring up boundary services and make Nexus reachable from the # outside. This block assumes that you're following option (2) above: putting # your Oxide system on an existing network that you control. [rack_network_config] -# The gateway for the external network -gateway_ip = "192.168.1.199" +# An internal-only IPv6 address block which contains AZ-wide services. +# This does not need to be changed. +rack_subnet = "fd00:1122:3344:01::/56" # A range of IP addresses used by Boundary Services on the network. In a real # system, these would be addresses of the uplink ports on the Sidecar. With # softnpu, only one address is used. infra_ip_first = "192.168.1.30" infra_ip_last = "192.168.1.30" -# Name of the port. This should always be "qsfp0" when using softnpu. -uplink_port = "qsfp0" -uplink_port_speed = "40G" -uplink_port_fec="none" + +# Configurations for BGP routers to run on the scrimlets. +# This array can typically be safely left empty for home/local use, +# otherwise this is a list of { asn: u32, originate: [""] } +# structs which will be be inserted when Nexus is started by sled-agent. +# See the 'Network Preparations' guide linked above. +bgp = [] + +[[rack_network_config.ports]] +# Routes associated with this port. +# NOTE: The below `nexthop` should be set to $GATEWAY_IP for your configuration +routes = [{nexthop = "192.168.1.199", destination = "0.0.0.0/0"}] +# Addresses associated with this port. # For softnpu, an address within the "infra" block above that will be used for # the softnpu uplink port. You can just pick the first address in that pool. -uplink_ip = "192.168.1.30" +addresses = ["192.168.1.30/32"] +# Name of the uplink port. This should always be "qsfp0" when using softnpu. +port = "qsfp0" +# The speed of this port. +uplink_port_speed = "40G" +# The forward error correction mode for this port. +uplink_port_fec="none" +# Switch to use for the uplink. For single-rack deployments this can be +# "switch0" (upper slot) or "switch1" (lower slot). For single-node softnpu +# and dendrite stub environments, use "switch0" +switch = "switch0" +# Neighbors we expect to peer with over BGP on this port. +# see: common/src/api/internal/shared.rs – BgpPeerConfig +bgp_peers = [] ---- In some configurations (not the one described here), it may be necessary to update `smf/sled-agent/$MACHINE/config.toml`: From 1860621199e1af8fd1e47a42a898d8ea9def5737 Mon Sep 17 00:00:00 2001 From: Rain Date: Fri, 17 Nov 2023 15:48:28 -0800 Subject: [PATCH 05/56] [ci] switch GHA to using the PR tip by default (#4462) Fixes #4461 (see that issue for more). --- .github/workflows/check-opte-ver.yml | 2 ++ .github/workflows/check-workspace-deps.yml | 2 ++ .github/workflows/hakari.yml | 2 ++ .github/workflows/rust.yml | 8 ++++++++ .github/workflows/validate-openapi-spec.yml | 2 ++ 5 files changed, 16 insertions(+) diff --git a/.github/workflows/check-opte-ver.yml b/.github/workflows/check-opte-ver.yml index 9fc390277b..42ef1dda11 100644 --- a/.github/workflows/check-opte-ver.yml +++ b/.github/workflows/check-opte-ver.yml @@ -10,6 +10,8 @@ jobs: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + with: + ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - name: Install jq run: sudo apt-get install -y jq - name: Install toml-cli diff --git a/.github/workflows/check-workspace-deps.yml b/.github/workflows/check-workspace-deps.yml index 7ba0c66566..f94ed32fde 100644 --- a/.github/workflows/check-workspace-deps.yml +++ b/.github/workflows/check-workspace-deps.yml @@ -11,5 +11,7 @@ jobs: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + with: + ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - name: Check Workspace Dependencies run: cargo xtask check-workspace-deps diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index 6f2dc04b91..07b7124f73 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -18,6 +18,8 @@ jobs: RUSTFLAGS: -D warnings steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + with: + ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af # v1 with: toolchain: stable diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index f2581845d9..6239add88f 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -10,6 +10,8 @@ jobs: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + with: + ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - name: Report cargo version run: cargo --version - name: Report rustfmt version @@ -30,6 +32,8 @@ jobs: - name: Disable packages.microsoft.com repo run: sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + with: + ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 if: ${{ github.ref != 'refs/heads/main' }} - name: Report cargo version @@ -58,6 +62,8 @@ jobs: - name: Disable packages.microsoft.com repo run: sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + with: + ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 if: ${{ github.ref != 'refs/heads/main' }} - name: Report cargo version @@ -86,6 +92,8 @@ jobs: - name: Disable packages.microsoft.com repo run: sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + with: + ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 if: ${{ github.ref != 'refs/heads/main' }} - name: Report cargo version diff --git a/.github/workflows/validate-openapi-spec.yml b/.github/workflows/validate-openapi-spec.yml index 2716c0571f..ea77ed9497 100644 --- a/.github/workflows/validate-openapi-spec.yml +++ b/.github/workflows/validate-openapi-spec.yml @@ -11,6 +11,8 @@ jobs: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + with: + ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - uses: actions/setup-node@1a4442cacd436585916779262731d5b162bc6ec7 # v3.8.2 with: node-version: '18' From d13a0dc8f8609daf57be6725bc1c5467ea7e1fe4 Mon Sep 17 00:00:00 2001 From: Rain Date: Fri, 17 Nov 2023 17:02:02 -0800 Subject: [PATCH 06/56] [wicket] produce a better error message if MGS isn't available (#4510) Currently, we just time out in this case. Produce a better error message by waiting for 80% of `WICKETD_TIMEOUT`. --- wicket-common/src/lib.rs | 7 ++++++ wicket/src/cli/rack_update.rs | 3 ++- wicket/src/wicketd.rs | 5 +---- wicketd/src/http_entrypoints.rs | 40 +++++++++++++++++++++++++-------- 4 files changed, 41 insertions(+), 14 deletions(-) diff --git a/wicket-common/src/lib.rs b/wicket-common/src/lib.rs index 9e92d20c0a..aea0634ce7 100644 --- a/wicket-common/src/lib.rs +++ b/wicket-common/src/lib.rs @@ -4,6 +4,13 @@ // Copyright 2023 Oxide Computer Company +use std::time::Duration; + pub mod rack_setup; pub mod rack_update; pub mod update_events; + +// WICKETD_TIMEOUT used to be 1 second, but that might be too short (and in +// particular might be responsible for +// https://github.com/oxidecomputer/omicron/issues/3103). +pub const WICKETD_TIMEOUT: Duration = Duration::from_secs(5); diff --git a/wicket/src/cli/rack_update.rs b/wicket/src/cli/rack_update.rs index f539c22c35..fa41fa7b8c 100644 --- a/wicket/src/cli/rack_update.rs +++ b/wicket/src/cli/rack_update.rs @@ -22,6 +22,7 @@ use update_engine::{ }; use wicket_common::{ rack_update::ClearUpdateStateResponse, update_events::EventReport, + WICKETD_TIMEOUT, }; use wicketd_client::types::{ClearUpdateStateParams, StartUpdateParams}; @@ -31,7 +32,7 @@ use crate::{ parse_event_report_map, ComponentId, CreateClearUpdateStateOptions, CreateStartUpdateOptions, }, - wicketd::{create_wicketd_client, WICKETD_TIMEOUT}, + wicketd::create_wicketd_client, }; use super::command::CommandOutput; diff --git a/wicket/src/wicketd.rs b/wicket/src/wicketd.rs index ec1130a594..a951bf428b 100644 --- a/wicket/src/wicketd.rs +++ b/wicket/src/wicketd.rs @@ -10,6 +10,7 @@ use std::net::SocketAddrV6; use tokio::sync::mpsc::{self, Sender, UnboundedSender}; use tokio::time::{interval, Duration, MissedTickBehavior}; use wicket_common::rack_update::{SpIdentifier, SpType}; +use wicket_common::WICKETD_TIMEOUT; use wicketd_client::types::{ AbortUpdateOptions, ClearUpdateStateOptions, ClearUpdateStateParams, GetInventoryParams, GetInventoryResponse, GetLocationResponse, @@ -38,10 +39,6 @@ impl From for SpIdentifier { } const WICKETD_POLL_INTERVAL: Duration = Duration::from_millis(500); -// WICKETD_TIMEOUT used to be 1 second, but that might be too short (and in -// particular might be responsible for -// https://github.com/oxidecomputer/omicron/issues/3103). -pub(crate) const WICKETD_TIMEOUT: Duration = Duration::from_secs(5); // Assume that these requests are periodic on the order of seconds or the // result of human interaction. In either case, this buffer should be plenty diff --git a/wicketd/src/http_entrypoints.rs b/wicketd/src/http_entrypoints.rs index d6cb6ebd6d..dbd3e31072 100644 --- a/wicketd/src/http_entrypoints.rs +++ b/wicketd/src/http_entrypoints.rs @@ -51,6 +51,7 @@ use std::time::Duration; use tokio::io::AsyncWriteExt; use wicket_common::rack_setup::PutRssUserConfigInsensitive; use wicket_common::update_events::EventReport; +use wicket_common::WICKETD_TIMEOUT; use crate::ServerContext; @@ -896,21 +897,42 @@ async fn post_start_update( // 1. We haven't pulled its state in our inventory (most likely cause: the // cubby is empty; less likely cause: the SP is misbehaving, which will // make updating it very unlikely to work anyway) - // 2. We have pulled its state but our hardware manager says we can't update - // it (most likely cause: the target is the sled we're currently running - // on; less likely cause: our hardware manager failed to get our local - // identifying information, and it refuses to update this target out of - // an abundance of caution). + // 2. We have pulled its state but our hardware manager says we can't + // update it (most likely cause: the target is the sled we're currently + // running on; less likely cause: our hardware manager failed to get our + // local identifying information, and it refuses to update this target + // out of an abundance of caution). // - // First, get our most-recently-cached inventory view. - let inventory = match rqctx.mgs_handle.get_cached_inventory().await { - Ok(inventory) => inventory, - Err(ShutdownInProgress) => { + // First, get our most-recently-cached inventory view. (Only wait 80% of + // WICKETD_TIMEOUT for this: if even a cached inventory isn't available, + // it's because we've never established contact with MGS. In that case, we + // should produce a useful error message rather than timing out on the + // client.) + let inventory = match tokio::time::timeout( + WICKETD_TIMEOUT.mul_f32(0.8), + rqctx.mgs_handle.get_cached_inventory(), + ) + .await + { + Ok(Ok(inventory)) => inventory, + Ok(Err(ShutdownInProgress)) => { return Err(HttpError::for_unavail( None, "Server is shutting down".into(), )); } + Err(_) => { + // Have to construct an HttpError manually because + // HttpError::for_unavail doesn't accept an external message. + let message = + "Rack inventory not yet available (is MGS alive?)".to_owned(); + return Err(HttpError { + status_code: http::StatusCode::SERVICE_UNAVAILABLE, + error_code: None, + external_message: message.clone(), + internal_message: message, + }); + } }; // Error cases. From 14f8f3159d40c7d044d12f1424d209cbc100a9cb Mon Sep 17 00:00:00 2001 From: Levon Tarver <11586085+internet-diglett@users.noreply.github.com> Date: Fri, 17 Nov 2023 22:22:17 -0600 Subject: [PATCH 07/56] NAT RPW (#3804) * Add db table for tracking nat entries * Add endpoint for retrieving changesets * Update instance sagas to update table and trigger RPW * Periodically cleanup soft-deleted entries that no longer need to be sync'd by dendrite. The other half of the RPW lives in Dendrite. It will periodically check for a changeset, or check for a changeset when the trigger endpoint is called by the relevant saga / nexus operation. --- common/src/api/external/mod.rs | 1 + common/src/nexus_config.rs | 17 +- dev-tools/omdb/src/bin/omdb/nexus.rs | 1 + dev-tools/omdb/tests/env.out | 15 + dev-tools/omdb/tests/successes.out | 12 + nexus/db-model/src/ipv4_nat_entry.rs | 81 ++++ nexus/db-model/src/lib.rs | 2 + nexus/db-model/src/schema.rs | 28 +- .../src/db/datastore/ipv4_nat_entry.rs | 440 ++++++++++++++++++ nexus/db-queries/src/db/datastore/mod.rs | 1 + nexus/examples/config.toml | 1 + nexus/src/app/background/init.rs | 24 + nexus/src/app/background/mod.rs | 1 + nexus/src/app/background/nat_cleanup.rs | 111 +++++ nexus/src/app/instance_network.rs | 239 ++++++---- nexus/src/app/mod.rs | 1 + nexus/src/internal_api/http_entrypoints.rs | 51 ++ nexus/tests/config.test.toml | 1 + openapi/nexus-internal.json | 104 +++++ package-manifest.toml | 12 +- schema/crdb/{10.0.0 => 11.0.0}/README.md | 0 schema/crdb/{10.0.0 => 11.0.0}/up01.sql | 0 schema/crdb/{10.0.0 => 11.0.0}/up02.sql | 0 schema/crdb/{10.0.0 => 11.0.0}/up03.sql | 0 schema/crdb/{10.0.0 => 11.0.0}/up04.sql | 0 schema/crdb/{10.0.0 => 11.0.0}/up05.sql | 0 schema/crdb/{10.0.0 => 11.0.0}/up06.sql | 0 schema/crdb/{10.0.0 => 11.0.0}/up07.sql | 0 schema/crdb/{10.0.0 => 11.0.0}/up08.sql | 0 schema/crdb/{10.0.0 => 11.0.0}/up09.sql | 0 schema/crdb/11.0.0/up1.sql | 1 + schema/crdb/{10.0.0 => 11.0.0}/up10.sql | 0 schema/crdb/{10.0.0 => 11.0.0}/up11.sql | 0 schema/crdb/{10.0.0 => 11.0.0}/up12.sql | 0 schema/crdb/11.0.0/up2.sql | 13 + schema/crdb/11.0.0/up3.sql | 13 + schema/crdb/11.0.0/up4.sql | 5 + schema/crdb/11.0.0/up5.sql | 1 + schema/crdb/11.0.0/up6.sql | 13 + schema/crdb/dbinit.sql | 80 +++- smf/nexus/multi-sled/config-partial.toml | 1 + smf/nexus/single-sled/config-partial.toml | 1 + tools/dendrite_openapi_version | 4 +- tools/dendrite_stub_checksums | 6 +- 44 files changed, 1168 insertions(+), 113 deletions(-) create mode 100644 nexus/db-model/src/ipv4_nat_entry.rs create mode 100644 nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs create mode 100644 nexus/src/app/background/nat_cleanup.rs rename schema/crdb/{10.0.0 => 11.0.0}/README.md (100%) rename schema/crdb/{10.0.0 => 11.0.0}/up01.sql (100%) rename schema/crdb/{10.0.0 => 11.0.0}/up02.sql (100%) rename schema/crdb/{10.0.0 => 11.0.0}/up03.sql (100%) rename schema/crdb/{10.0.0 => 11.0.0}/up04.sql (100%) rename schema/crdb/{10.0.0 => 11.0.0}/up05.sql (100%) rename schema/crdb/{10.0.0 => 11.0.0}/up06.sql (100%) rename schema/crdb/{10.0.0 => 11.0.0}/up07.sql (100%) rename schema/crdb/{10.0.0 => 11.0.0}/up08.sql (100%) rename schema/crdb/{10.0.0 => 11.0.0}/up09.sql (100%) create mode 100644 schema/crdb/11.0.0/up1.sql rename schema/crdb/{10.0.0 => 11.0.0}/up10.sql (100%) rename schema/crdb/{10.0.0 => 11.0.0}/up11.sql (100%) rename schema/crdb/{10.0.0 => 11.0.0}/up12.sql (100%) create mode 100644 schema/crdb/11.0.0/up2.sql create mode 100644 schema/crdb/11.0.0/up3.sql create mode 100644 schema/crdb/11.0.0/up4.sql create mode 100644 schema/crdb/11.0.0/up5.sql create mode 100644 schema/crdb/11.0.0/up6.sql diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index fcea57220d..adf661516a 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -750,6 +750,7 @@ pub enum ResourceType { UserBuiltin, Zpool, Vmm, + Ipv4NatEntry, } // IDENTITY METADATA diff --git a/common/src/nexus_config.rs b/common/src/nexus_config.rs index 4e821e2676..94c39b4436 100644 --- a/common/src/nexus_config.rs +++ b/common/src/nexus_config.rs @@ -335,6 +335,8 @@ pub struct BackgroundTaskConfig { pub dns_external: DnsTasksConfig, /// configuration for external endpoint list watcher pub external_endpoints: ExternalEndpointsConfig, + /// configuration for nat table garbage collector + pub nat_cleanup: NatCleanupConfig, /// configuration for inventory tasks pub inventory: InventoryConfig, } @@ -371,6 +373,14 @@ pub struct ExternalEndpointsConfig { // allow/disallow wildcard certs, don't serve expired certs, etc.) } +#[serde_as] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct NatCleanupConfig { + /// period (in seconds) for periodic activations of this background task + #[serde_as(as = "DurationSeconds")] + pub period_secs: Duration, +} + #[serde_as] #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct InventoryConfig { @@ -498,7 +508,7 @@ mod test { BackgroundTaskConfig, Config, ConfigDropshotWithTls, ConsoleConfig, Database, DeploymentConfig, DnsTasksConfig, DpdConfig, ExternalEndpointsConfig, InternalDns, InventoryConfig, LoadError, - LoadErrorKind, MgdConfig, PackageConfig, SchemeName, + LoadErrorKind, MgdConfig, NatCleanupConfig, PackageConfig, SchemeName, TimeseriesDbConfig, Tunables, UpdatesConfig, }; use crate::address::{Ipv6Subnet, RACK_PREFIX}; @@ -649,6 +659,7 @@ mod test { dns_external.period_secs_propagation = 7 dns_external.max_concurrent_server_updates = 8 external_endpoints.period_secs = 9 + nat_cleanup.period_secs = 30 inventory.period_secs = 10 inventory.nkeep = 11 inventory.disable = false @@ -746,6 +757,9 @@ mod test { external_endpoints: ExternalEndpointsConfig { period_secs: Duration::from_secs(9), }, + nat_cleanup: NatCleanupConfig { + period_secs: Duration::from_secs(30), + }, inventory: InventoryConfig { period_secs: Duration::from_secs(10), nkeep: 11, @@ -804,6 +818,7 @@ mod test { dns_external.period_secs_propagation = 7 dns_external.max_concurrent_server_updates = 8 external_endpoints.period_secs = 9 + nat_cleanup.period_secs = 30 inventory.period_secs = 10 inventory.nkeep = 3 inventory.disable = false diff --git a/dev-tools/omdb/src/bin/omdb/nexus.rs b/dev-tools/omdb/src/bin/omdb/nexus.rs index 128d4315f2..9f91d38504 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus.rs @@ -159,6 +159,7 @@ async fn cmd_nexus_background_tasks_show( "dns_config_external", "dns_servers_external", "dns_propagation_external", + "nat_v4_garbage_collector", ] { if let Some(bgtask) = tasks.remove(name) { print_task(&bgtask); diff --git a/dev-tools/omdb/tests/env.out b/dev-tools/omdb/tests/env.out index 7949c1eb61..fd50d80c81 100644 --- a/dev-tools/omdb/tests/env.out +++ b/dev-tools/omdb/tests/env.out @@ -61,6 +61,11 @@ task: "inventory_collection" collects hardware and software inventory data from the whole system +task: "nat_v4_garbage_collector" + prunes soft-deleted IPV4 NAT entries from ipv4_nat_entry table based on a + predetermined retention policy + + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT @@ -121,6 +126,11 @@ task: "inventory_collection" collects hardware and software inventory data from the whole system +task: "nat_v4_garbage_collector" + prunes soft-deleted IPV4 NAT entries from ipv4_nat_entry table based on a + predetermined retention policy + + --------------------------------------------- stderr: note: Nexus URL not specified. Will pick one from DNS. @@ -168,6 +178,11 @@ task: "inventory_collection" collects hardware and software inventory data from the whole system +task: "nat_v4_garbage_collector" + prunes soft-deleted IPV4 NAT entries from ipv4_nat_entry table based on a + predetermined retention policy + + --------------------------------------------- stderr: note: Nexus URL not specified. Will pick one from DNS. diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 8162b6d9de..6bc3a85e8a 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -255,6 +255,11 @@ task: "inventory_collection" collects hardware and software inventory data from the whole system +task: "nat_v4_garbage_collector" + prunes soft-deleted IPV4 NAT entries from ipv4_nat_entry table based on a + predetermined retention policy + + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ @@ -319,6 +324,13 @@ task: "dns_propagation_external" [::1]:REDACTED_PORT success +task: "nat_v4_garbage_collector" + configured period: every 30s + currently executing: no + last completed activation: iter 2, triggered by an explicit signal + started at (s ago) and ran for ms +warning: unknown background task: "nat_v4_garbage_collector" (don't know how to interpret details: Null) + task: "external_endpoints" configured period: every 1m currently executing: no diff --git a/nexus/db-model/src/ipv4_nat_entry.rs b/nexus/db-model/src/ipv4_nat_entry.rs new file mode 100644 index 0000000000..570a46b5e9 --- /dev/null +++ b/nexus/db-model/src/ipv4_nat_entry.rs @@ -0,0 +1,81 @@ +use std::net::{Ipv4Addr, Ipv6Addr}; + +use super::MacAddr; +use crate::{schema::ipv4_nat_entry, Ipv4Net, Ipv6Net, SqlU16, Vni}; +use chrono::{DateTime, Utc}; +use omicron_common::api::external; +use schemars::JsonSchema; +use serde::Serialize; +use uuid::Uuid; + +/// Values used to create an Ipv4NatEntry +#[derive(Insertable, Debug, Clone)] +#[diesel(table_name = ipv4_nat_entry)] +pub struct Ipv4NatValues { + pub external_address: Ipv4Net, + pub first_port: SqlU16, + pub last_port: SqlU16, + pub sled_address: Ipv6Net, + pub vni: Vni, + pub mac: MacAddr, +} + +/// Database representation of an Ipv4 NAT Entry. +#[derive(Queryable, Debug, Clone, Selectable)] +#[diesel(table_name = ipv4_nat_entry)] +pub struct Ipv4NatEntry { + pub id: Uuid, + pub external_address: Ipv4Net, + pub first_port: SqlU16, + pub last_port: SqlU16, + pub sled_address: Ipv6Net, + pub vni: Vni, + pub mac: MacAddr, + pub version_added: i64, + pub version_removed: Option, + pub time_created: DateTime, + pub time_deleted: Option>, +} + +impl Ipv4NatEntry { + pub fn first_port(&self) -> u16 { + self.first_port.into() + } + + pub fn last_port(&self) -> u16 { + self.last_port.into() + } +} + +/// NAT Record +#[derive(Clone, Debug, Serialize, JsonSchema)] +pub struct Ipv4NatEntryView { + pub external_address: Ipv4Addr, + pub first_port: u16, + pub last_port: u16, + pub sled_address: Ipv6Addr, + pub vni: external::Vni, + pub mac: external::MacAddr, + pub gen: i64, + pub deleted: bool, +} + +impl From for Ipv4NatEntryView { + fn from(value: Ipv4NatEntry) -> Self { + let (gen, deleted) = match value.version_removed { + Some(gen) => (gen, true), + None => (value.version_added, false), + }; + + Self { + external_address: value.external_address.ip(), + first_port: value.first_port(), + last_port: value.last_port(), + sled_address: value.sled_address.ip(), + vni: value.vni.0, + mac: *value.mac, + gen, + deleted, + } + } +} diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index 7aa8a6b076..6b65eb87ec 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -53,6 +53,7 @@ mod system_update; // These actually represent subqueries, not real table. // However, they must be defined in the same crate as our tables // for join-based marker trait generation. +mod ipv4_nat_entry; pub mod queries; mod rack; mod region; @@ -124,6 +125,7 @@ pub use instance_cpu_count::*; pub use instance_state::*; pub use inventory::*; pub use ip_pool::*; +pub use ipv4_nat_entry::*; pub use ipv4net::*; pub use ipv6::*; pub use ipv6net::*; diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 7c6b8bbd0a..4844f2a33f 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -489,6 +489,32 @@ table! { } } +table! { + ipv4_nat_entry (id) { + id -> Uuid, + external_address -> Inet, + first_port -> Int4, + last_port -> Int4, + sled_address -> Inet, + vni -> Int4, + mac -> Int8, + version_added -> Int8, + version_removed -> Nullable, + time_created -> Timestamptz, + time_deleted -> Nullable, + } +} + +// This is the sequence used for the version number +// in ipv4_nat_entry. +table! { + ipv4_nat_version (last_value) { + last_value -> Int8, + log_cnt -> Int8, + is_called -> Bool, + } +} + table! { external_ip (id) { id -> Uuid, @@ -1243,7 +1269,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(10, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(11, 0, 0); allow_tables_to_appear_in_same_query!( system_update, diff --git a/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs b/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs new file mode 100644 index 0000000000..274937b299 --- /dev/null +++ b/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs @@ -0,0 +1,440 @@ +use super::DataStore; +use crate::context::OpContext; +use crate::db; +use crate::db::error::public_error_from_diesel; +use crate::db::error::ErrorHandler; +use crate::db::model::{Ipv4NatEntry, Ipv4NatValues}; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::{DateTime, Utc}; +use diesel::prelude::*; +use diesel::sql_types::BigInt; +use nexus_db_model::ExternalIp; +use nexus_db_model::Ipv4NatEntryView; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::LookupResult; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; + +impl DataStore { + pub async fn ensure_ipv4_nat_entry( + &self, + opctx: &OpContext, + nat_entry: Ipv4NatValues, + ) -> CreateResult<()> { + use db::schema::ipv4_nat_entry::dsl; + use diesel::sql_types; + + // Look up any NAT entries that already have the exact parameters + // we're trying to INSERT. + let matching_entry_subquery = dsl::ipv4_nat_entry + .filter(dsl::external_address.eq(nat_entry.external_address)) + .filter(dsl::first_port.eq(nat_entry.first_port)) + .filter(dsl::last_port.eq(nat_entry.last_port)) + .filter(dsl::sled_address.eq(nat_entry.sled_address)) + .filter(dsl::vni.eq(nat_entry.vni)) + .filter(dsl::mac.eq(nat_entry.mac)) + .select(( + dsl::external_address, + dsl::first_port, + dsl::last_port, + dsl::sled_address, + dsl::vni, + dsl::mac, + )); + + // SELECT exactly the values we're trying to INSERT, but only + // if it does not already exist. + let new_entry_subquery = diesel::dsl::select(( + nat_entry.external_address.into_sql::(), + nat_entry.first_port.into_sql::(), + nat_entry.last_port.into_sql::(), + nat_entry.sled_address.into_sql::(), + nat_entry.vni.into_sql::(), + nat_entry.mac.into_sql::(), + )) + .filter(diesel::dsl::not(diesel::dsl::exists(matching_entry_subquery))); + + diesel::insert_into(dsl::ipv4_nat_entry) + .values(new_entry_subquery) + .into_columns(( + dsl::external_address, + dsl::first_port, + dsl::last_port, + dsl::sled_address, + dsl::vni, + dsl::mac, + )) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(()) + } + + pub async fn ipv4_nat_delete( + &self, + opctx: &OpContext, + nat_entry: &Ipv4NatEntry, + ) -> DeleteResult { + use db::schema::ipv4_nat_entry::dsl; + + let updated_rows = diesel::update(dsl::ipv4_nat_entry) + .set(( + dsl::version_removed.eq(ipv4_nat_next_version().nullable()), + dsl::time_deleted.eq(Utc::now()), + )) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::version_removed.is_null()) + .filter(dsl::id.eq(nat_entry.id)) + .filter(dsl::version_added.eq(nat_entry.version_added)) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + if updated_rows == 0 { + return Err(Error::ObjectNotFound { + type_name: ResourceType::Ipv4NatEntry, + lookup_type: LookupType::ByCompositeId( + "id, version_added".to_string(), + ), + }); + } + Ok(()) + } + + pub async fn ipv4_nat_find_by_id( + &self, + opctx: &OpContext, + id: uuid::Uuid, + ) -> LookupResult { + use db::schema::ipv4_nat_entry::dsl; + + let result = dsl::ipv4_nat_entry + .filter(dsl::id.eq(id)) + .select(Ipv4NatEntry::as_select()) + .limit(1) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + if let Some(nat_entry) = result.first() { + Ok(nat_entry.clone()) + } else { + Err(Error::InvalidRequest { + message: "no matching records".to_string(), + }) + } + } + + pub async fn ipv4_nat_delete_by_external_ip( + &self, + opctx: &OpContext, + external_ip: &ExternalIp, + ) -> DeleteResult { + use db::schema::ipv4_nat_entry::dsl; + + let updated_rows = diesel::update(dsl::ipv4_nat_entry) + .set(( + dsl::version_removed.eq(ipv4_nat_next_version().nullable()), + dsl::time_deleted.eq(Utc::now()), + )) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::version_removed.is_null()) + .filter(dsl::external_address.eq(external_ip.ip)) + .filter(dsl::first_port.eq(external_ip.first_port)) + .filter(dsl::last_port.eq(external_ip.last_port)) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + if updated_rows == 0 { + return Err(Error::ObjectNotFound { + type_name: ResourceType::Ipv4NatEntry, + lookup_type: LookupType::ByCompositeId( + "external_ip, first_port, last_port".to_string(), + ), + }); + } + Ok(()) + } + + pub async fn ipv4_nat_find_by_values( + &self, + opctx: &OpContext, + values: Ipv4NatValues, + ) -> LookupResult { + use db::schema::ipv4_nat_entry::dsl; + let result = dsl::ipv4_nat_entry + .filter(dsl::external_address.eq(values.external_address)) + .filter(dsl::first_port.eq(values.first_port)) + .filter(dsl::last_port.eq(values.last_port)) + .filter(dsl::mac.eq(values.mac)) + .filter(dsl::sled_address.eq(values.sled_address)) + .filter(dsl::vni.eq(values.vni)) + .filter(dsl::time_deleted.is_null()) + .select(Ipv4NatEntry::as_select()) + .limit(1) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + if let Some(nat_entry) = result.first() { + Ok(nat_entry.clone()) + } else { + Err(Error::InvalidRequest { + message: "no matching records".to_string(), + }) + } + } + + pub async fn ipv4_nat_list_since_version( + &self, + opctx: &OpContext, + version: i64, + limit: u32, + ) -> ListResultVec { + use db::schema::ipv4_nat_entry::dsl; + + let list = dsl::ipv4_nat_entry + .filter( + dsl::version_added + .gt(version) + .or(dsl::version_removed.gt(version)), + ) + .limit(limit as i64) + .select(Ipv4NatEntry::as_select()) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(list) + } + + pub async fn ipv4_nat_changeset( + &self, + opctx: &OpContext, + version: i64, + limit: u32, + ) -> ListResultVec { + let nat_entries = + self.ipv4_nat_list_since_version(opctx, version, limit).await?; + let nat_entries: Vec = + nat_entries.iter().map(|e| e.clone().into()).collect(); + Ok(nat_entries) + } + + pub async fn ipv4_nat_current_version( + &self, + opctx: &OpContext, + ) -> LookupResult { + use db::schema::ipv4_nat_version::dsl; + + let latest: Option = dsl::ipv4_nat_version + .select(diesel::dsl::max(dsl::last_value)) + .first_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + match latest { + Some(value) => Ok(value), + None => Err(Error::InvalidRequest { + message: "sequence table is empty!".to_string(), + }), + } + } + + pub async fn ipv4_nat_cleanup( + &self, + opctx: &OpContext, + version: i64, + before_timestamp: DateTime, + ) -> DeleteResult { + use db::schema::ipv4_nat_entry::dsl; + + diesel::delete(dsl::ipv4_nat_entry) + .filter(dsl::version_removed.lt(version)) + .filter(dsl::time_deleted.lt(before_timestamp)) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(()) + } +} + +fn ipv4_nat_next_version() -> diesel::expression::SqlLiteral { + diesel::dsl::sql::("nextval('omicron.public.ipv4_nat_version')") +} + +#[cfg(test)] +mod test { + use std::str::FromStr; + + use crate::db::datastore::datastore_test; + use chrono::Utc; + use nexus_db_model::{Ipv4NatValues, MacAddr, Vni}; + use nexus_test_utils::db::test_setup_database; + use omicron_common::api::external; + use omicron_test_utils::dev; + + // Test our ability to track additions and deletions since a given version number + #[tokio::test] + async fn nat_version_tracking() { + let logctx = dev::test_setup_log("test_nat_version_tracking"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // We should not have any NAT entries at this moment + let initial_state = + datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + + assert!(initial_state.is_empty()); + assert_eq!( + datastore.ipv4_nat_current_version(&opctx).await.unwrap(), + 0 + ); + + // Each change (creation / deletion) to the NAT table should increment the + // version number of the row in the NAT table + let external_address = external::Ipv4Net( + ipnetwork::Ipv4Network::try_from("10.0.0.100").unwrap(), + ); + + let sled_address = external::Ipv6Net( + ipnetwork::Ipv6Network::try_from("fd00:1122:3344:104::1").unwrap(), + ); + + // Add a nat entry. + let nat1 = Ipv4NatValues { + external_address: external_address.into(), + first_port: 0.into(), + last_port: 999.into(), + sled_address: sled_address.into(), + vni: Vni(external::Vni::random()), + mac: MacAddr( + external::MacAddr::from_str("A8:40:25:F5:EB:2A").unwrap(), + ), + }; + + datastore.ensure_ipv4_nat_entry(&opctx, nat1.clone()).await.unwrap(); + let first_entry = + datastore.ipv4_nat_find_by_values(&opctx, nat1).await.unwrap(); + + let nat_entries = + datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + + // The NAT table has undergone one change. One entry has been added, + // none deleted, so we should be at version 1. + assert_eq!(nat_entries.len(), 1); + assert_eq!(nat_entries.last().unwrap().version_added, 1); + assert_eq!( + datastore.ipv4_nat_current_version(&opctx).await.unwrap(), + 1 + ); + + // Add another nat entry. + let nat2 = Ipv4NatValues { + external_address: external_address.into(), + first_port: 1000.into(), + last_port: 1999.into(), + sled_address: sled_address.into(), + vni: Vni(external::Vni::random()), + mac: MacAddr( + external::MacAddr::from_str("A8:40:25:F5:EB:2B").unwrap(), + ), + }; + + datastore.ensure_ipv4_nat_entry(&opctx, nat2).await.unwrap(); + + let nat_entries = + datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + + // The NAT table has undergone two changes. Two entries have been + // added, none deleted, so we should be at version 2. + let nat_entry = + nat_entries.iter().find(|e| e.version_added == 2).unwrap(); + assert_eq!(nat_entries.len(), 2); + assert_eq!(nat_entry.version_added, 2); + assert_eq!( + datastore.ipv4_nat_current_version(&opctx).await.unwrap(), + 2 + ); + + // Test Cleanup logic + // Cleanup should only perma-delete entries that are older than a + // specified version number and whose `time_deleted` field is + // older than a specified age. + let time_cutoff = Utc::now(); + datastore.ipv4_nat_cleanup(&opctx, 2, time_cutoff).await.unwrap(); + + // Nothing should have changed (no records currently marked for deletion) + let nat_entries = + datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + + assert_eq!(nat_entries.len(), 2); + assert_eq!( + datastore.ipv4_nat_current_version(&opctx).await.unwrap(), + 2 + ); + + // Delete the first nat entry. It should show up as a later version number. + datastore.ipv4_nat_delete(&opctx, &first_entry).await.unwrap(); + let nat_entries = + datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + + // The NAT table has undergone three changes. Two entries have been + // added, one deleted, so we should be at version 3. Since the + // first entry was marked for deletion (and it was the third change), + // the first entry's version number should now be 3. + let nat_entry = + nat_entries.iter().find(|e| e.version_removed.is_some()).unwrap(); + assert_eq!(nat_entries.len(), 2); + assert_eq!(nat_entry.version_removed, Some(3)); + assert_eq!(nat_entry.id, first_entry.id); + assert_eq!( + datastore.ipv4_nat_current_version(&opctx).await.unwrap(), + 3 + ); + + // Try cleaning up with the old version and time cutoff values + datastore.ipv4_nat_cleanup(&opctx, 2, time_cutoff).await.unwrap(); + + // Try cleaning up with a greater version and old time cutoff values + datastore.ipv4_nat_cleanup(&opctx, 6, time_cutoff).await.unwrap(); + + // Try cleaning up with a older version and newer time cutoff values + datastore.ipv4_nat_cleanup(&opctx, 2, Utc::now()).await.unwrap(); + + // Both records should still exist (soft deleted record is newer than cutoff + // values ) + let nat_entries = + datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + + assert_eq!(nat_entries.len(), 2); + assert_eq!( + datastore.ipv4_nat_current_version(&opctx).await.unwrap(), + 3 + ); + + // Try cleaning up with a both cutoff values increased + datastore.ipv4_nat_cleanup(&opctx, 4, Utc::now()).await.unwrap(); + + // Soft deleted NAT entry should be removed from the table + let nat_entries = + datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + + assert_eq!(nat_entries.len(), 1); + + // version should be unchanged + assert_eq!( + datastore.ipv4_nat_current_version(&opctx).await.unwrap(), + 3 + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } +} diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 91373f6875..7385970fb1 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -63,6 +63,7 @@ mod image; mod instance; mod inventory; mod ip_pool; +mod ipv4_nat_entry; mod network_interface; mod oximeter; mod physical_disk; diff --git a/nexus/examples/config.toml b/nexus/examples/config.toml index efc9aa9c27..3679fa8196 100644 --- a/nexus/examples/config.toml +++ b/nexus/examples/config.toml @@ -92,6 +92,7 @@ dns_external.max_concurrent_server_updates = 5 # certificates it will take _other_ Nexus instances to notice and stop serving # them (on a sunny day). external_endpoints.period_secs = 60 +nat_cleanup.period_secs = 30 # How frequently to collect hardware/software inventory from the whole system # (even if we don't have reason to believe anything has changed). inventory.period_secs = 600 diff --git a/nexus/src/app/background/init.rs b/nexus/src/app/background/init.rs index b000dd9bda..d27248ffdc 100644 --- a/nexus/src/app/background/init.rs +++ b/nexus/src/app/background/init.rs @@ -10,12 +10,15 @@ use super::dns_propagation; use super::dns_servers; use super::external_endpoints; use super::inventory_collection; +use super::nat_cleanup; use nexus_db_model::DnsGroup; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; +use omicron_common::api::internal::shared::SwitchLocation; use omicron_common::nexus_config::BackgroundTaskConfig; use omicron_common::nexus_config::DnsTasksConfig; use std::collections::BTreeMap; +use std::collections::HashMap; use std::sync::Arc; use uuid::Uuid; @@ -44,6 +47,8 @@ pub struct BackgroundTasks { pub external_endpoints: tokio::sync::watch::Receiver< Option, >, + /// task handle for the ipv4 nat entry garbage collector + pub nat_cleanup: common::TaskHandle, /// task handle for the task that collects inventory pub task_inventory_collection: common::TaskHandle, @@ -55,6 +60,7 @@ impl BackgroundTasks { opctx: &OpContext, datastore: Arc, config: &BackgroundTaskConfig, + dpd_clients: &HashMap>, nexus_id: Uuid, resolver: internal_dns::resolver::Resolver, ) -> BackgroundTasks { @@ -96,6 +102,23 @@ impl BackgroundTasks { (task, watcher_channel) }; + let nat_cleanup = { + driver.register( + "nat_v4_garbage_collector".to_string(), + String::from( + "prunes soft-deleted IPV4 NAT entries from ipv4_nat_entry table \ + based on a predetermined retention policy", + ), + config.nat_cleanup.period_secs, + Box::new(nat_cleanup::Ipv4NatGarbageCollector::new( + datastore.clone(), + dpd_clients.values().map(|client| client.clone()).collect(), + )), + opctx.child(BTreeMap::new()), + vec![], + ) + }; + // Background task: inventory collector let task_inventory_collection = { let collector = inventory_collection::InventoryCollector::new( @@ -128,6 +151,7 @@ impl BackgroundTasks { task_external_dns_servers, task_external_endpoints, external_endpoints, + nat_cleanup, task_inventory_collection, } } diff --git a/nexus/src/app/background/mod.rs b/nexus/src/app/background/mod.rs index e1f474b41a..954207cb3c 100644 --- a/nexus/src/app/background/mod.rs +++ b/nexus/src/app/background/mod.rs @@ -11,6 +11,7 @@ mod dns_servers; mod external_endpoints; mod init; mod inventory_collection; +mod nat_cleanup; mod status; pub use common::Driver; diff --git a/nexus/src/app/background/nat_cleanup.rs b/nexus/src/app/background/nat_cleanup.rs new file mode 100644 index 0000000000..1691d96a4b --- /dev/null +++ b/nexus/src/app/background/nat_cleanup.rs @@ -0,0 +1,111 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Background task for garbage collecting ipv4_nat_entry table. +//! Responsible for cleaning up soft deleted entries once they +//! have been propagated to running dpd instances. + +use super::common::BackgroundTask; +use chrono::{Duration, Utc}; +use futures::future::BoxFuture; +use futures::FutureExt; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::DataStore; +use serde_json::json; +use std::sync::Arc; + +/// Background task that periodically prunes soft-deleted entries +/// from ipv4_nat_entry table +pub struct Ipv4NatGarbageCollector { + datastore: Arc, + dpd_clients: Vec>, +} + +impl Ipv4NatGarbageCollector { + pub fn new( + datastore: Arc, + dpd_clients: Vec>, + ) -> Ipv4NatGarbageCollector { + Ipv4NatGarbageCollector { datastore, dpd_clients } + } +} + +impl BackgroundTask for Ipv4NatGarbageCollector { + fn activate<'a, 'b, 'c>( + &'a mut self, + opctx: &'b OpContext, + ) -> BoxFuture<'c, serde_json::Value> + where + 'a: 'c, + 'b: 'c, + { + async { + let log = &opctx.log; + + let result = self.datastore.ipv4_nat_current_version(opctx).await; + + let mut min_gen = match result { + Ok(gen) => gen, + Err(error) => { + warn!( + &log, + "failed to read generation of database"; + "error" => format!("{:#}", error) + ); + return json!({ + "error": + format!( + "failed to read generation of database: \ + {:#}", + error + ) + }); + } + }; + + for client in &self.dpd_clients { + let response = client.ipv4_nat_generation().await; + match response { + Ok(gen) => min_gen = std::cmp::min(min_gen, *gen), + Err(error) => { + warn!( + &log, + "failed to read generation of dpd"; + "error" => format!("{:#}", error) + ); + return json!({ + "error": + format!( + "failed to read generation of dpd: \ + {:#}", + error + ) + }); + } + } + } + + let retention_threshold = Utc::now() - Duration::weeks(2); + + let result = self + .datastore + .ipv4_nat_cleanup(opctx, min_gen, retention_threshold) + .await + .unwrap(); + + let rv = serde_json::to_value(&result).unwrap_or_else(|error| { + json!({ + "error": + format!( + "failed to serialize final value: {:#}", + error + ) + }) + }); + + rv + } + .boxed() + } +} diff --git a/nexus/src/app/instance_network.rs b/nexus/src/app/instance_network.rs index 0f52cbd260..abb8c744e1 100644 --- a/nexus/src/app/instance_network.rs +++ b/nexus/src/app/instance_network.rs @@ -5,6 +5,10 @@ //! Routines that manage instance-related networking state. use crate::app::sagas::retry_until_known_result; +use ipnetwork::IpNetwork; +use ipnetwork::Ipv6Network; +use nexus_db_model::Ipv4NatValues; +use nexus_db_model::Vni as DbVni; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; @@ -12,6 +16,8 @@ use nexus_db_queries::db::identity::Asset; use nexus_db_queries::db::lookup::LookupPath; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; +use omicron_common::api::external::Ipv4Net; +use omicron_common::api::external::Ipv6Net; use omicron_common::api::internal::nexus; use omicron_common::api::internal::shared::SwitchLocation; use sled_agent_client::types::DeleteVirtualNetworkInterfaceHost; @@ -330,8 +336,6 @@ impl super::Nexus { )) })?; - let vni: u32 = network_interface.vni.into(); - info!(log, "looking up instance's external IPs"; "instance_id" => %instance_id); @@ -349,6 +353,9 @@ impl super::Nexus { } } + let sled_address = + Ipv6Net(Ipv6Network::new(*sled_ip_address.ip(), 128).unwrap()); + for target_ip in ips .iter() .enumerate() @@ -361,29 +368,58 @@ impl super::Nexus { }) .map(|(_, ip)| ip) { - retry_until_known_result(log, || async { - dpd_client - .ensure_nat_entry( - &log, - target_ip.ip, - dpd_client::types::MacAddr { - a: mac_address.into_array(), - }, - *target_ip.first_port, - *target_ip.last_port, - vni, - sled_ip_address.ip(), - ) - .await - }) - .await - .map_err(|e| { - Error::internal_error(&format!( - "failed to ensure dpd entry: {e}" - )) - })?; + // For each external ip, add a nat entry to the database + self.ensure_nat_entry( + target_ip, + sled_address, + &network_interface, + mac_address, + opctx, + ) + .await?; } + // Notify dendrite that there are changes for it to reconcile. + // In the event of a failure to notify dendrite, we'll log an error + // and rely on dendrite's RPW timer to catch it up. + if let Err(e) = dpd_client.ipv4_nat_trigger_update().await { + error!(self.log, "failed to notify dendrite of nat updates"; "error" => ?e); + }; + + Ok(()) + } + + async fn ensure_nat_entry( + &self, + target_ip: &nexus_db_model::ExternalIp, + sled_address: Ipv6Net, + network_interface: &sled_agent_client::types::NetworkInterface, + mac_address: macaddr::MacAddr6, + opctx: &OpContext, + ) -> Result<(), Error> { + match target_ip.ip { + IpNetwork::V4(v4net) => { + let nat_entry = Ipv4NatValues { + external_address: Ipv4Net(v4net).into(), + first_port: target_ip.first_port, + last_port: target_ip.last_port, + sled_address: sled_address.into(), + vni: DbVni(network_interface.vni.clone().into()), + mac: nexus_db_model::MacAddr( + omicron_common::api::external::MacAddr(mac_address), + ), + }; + self.db_datastore + .ensure_ipv4_nat_entry(opctx, nat_entry) + .await?; + } + IpNetwork::V6(_v6net) => { + // TODO: implement handling of v6 nat. + return Err(Error::InternalError { + internal_message: "ipv6 nat is not yet implemented".into(), + }); + } + }; Ok(()) } @@ -419,55 +455,54 @@ impl super::Nexus { let mut errors = vec![]; for entry in external_ips { - for switch in &boundary_switches { - debug!(log, "deleting instance nat mapping"; - "instance_id" => %instance_id, - "switch" => switch.to_string(), - "entry" => #?entry); - - let client_result = - self.dpd_clients.get(switch).ok_or_else(|| { - Error::internal_error(&format!( - "unable to find dendrite client for {switch}" - )) - }); - - let dpd_client = match client_result { - Ok(client) => client, - Err(new_error) => { - errors.push(new_error); - continue; + // Soft delete the NAT entry + match self + .db_datastore + .ipv4_nat_delete_by_external_ip(&opctx, &entry) + .await + { + Ok(_) => Ok(()), + Err(err) => match err { + Error::ObjectNotFound { .. } => { + warn!(log, "no matching nat entries to soft delete"); + Ok(()) } - }; + _ => { + let message = format!( + "failed to delete nat entry due to error: {err:?}" + ); + error!(log, "{}", message); + Err(Error::internal_error(&message)) + } + }, + }?; + } - let result = retry_until_known_result(log, || async { - dpd_client - .ensure_nat_entry_deleted( - log, - entry.ip, - *entry.first_port, - ) - .await - }) - .await; - - if let Err(e) = result { - let e = Error::internal_error(&format!( - "failed to delete nat entry via dpd: {e}" - )); - - error!(log, "error deleting nat mapping: {e:#?}"; - "instance_id" => %instance_id, - "switch" => switch.to_string(), - "entry" => #?entry); - errors.push(e); - } else { - debug!(log, "deleting nat mapping successful"; - "instance_id" => %instance_id, - "switch" => switch.to_string(), - "entry" => #?entry); + for switch in &boundary_switches { + debug!(&self.log, "notifying dendrite of updates"; + "instance_id" => %authz_instance.id(), + "switch" => switch.to_string()); + + let client_result = self.dpd_clients.get(switch).ok_or_else(|| { + Error::internal_error(&format!( + "unable to find dendrite client for {switch}" + )) + }); + + let dpd_client = match client_result { + Ok(client) => client, + Err(new_error) => { + errors.push(new_error); + continue; } - } + }; + + // Notify dendrite that there are changes for it to reconcile. + // In the event of a failure to notify dendrite, we'll log an error + // and rely on dendrite's RPW timer to catch it up. + if let Err(e) = dpd_client.ipv4_nat_trigger_update().await { + error!(self.log, "failed to notify dendrite of nat updates"; "error" => ?e); + }; } if let Some(e) = errors.into_iter().nth(0) { @@ -496,32 +531,48 @@ impl super::Nexus { let boundary_switches = self.boundary_switches(opctx).await?; for external_ip in external_ips { - for switch in &boundary_switches { - debug!(&self.log, "deleting instance nat mapping"; + match self + .db_datastore + .ipv4_nat_delete_by_external_ip(&opctx, &external_ip) + .await + { + Ok(_) => Ok(()), + Err(err) => match err { + Error::ObjectNotFound { .. } => { + warn!( + self.log, + "no matching nat entries to soft delete" + ); + Ok(()) + } + _ => { + let message = format!( + "failed to delete nat entry due to error: {err:?}" + ); + error!(self.log, "{}", message); + Err(Error::internal_error(&message)) + } + }, + }?; + } + + for switch in &boundary_switches { + debug!(&self.log, "notifying dendrite of updates"; "instance_id" => %authz_instance.id(), - "switch" => switch.to_string(), - "entry" => #?external_ip); - - let dpd_client = - self.dpd_clients.get(switch).ok_or_else(|| { - Error::internal_error(&format!( - "unable to find dendrite client for {switch}" - )) - })?; - - dpd_client - .ensure_nat_entry_deleted( - &self.log, - external_ip.ip, - *external_ip.first_port, - ) - .await - .map_err(|e| { - Error::internal_error(&format!( - "failed to delete nat entry via dpd: {e}" - )) - })?; - } + "switch" => switch.to_string()); + + let dpd_client = self.dpd_clients.get(switch).ok_or_else(|| { + Error::internal_error(&format!( + "unable to find dendrite client for {switch}" + )) + })?; + + // Notify dendrite that there are changes for it to reconcile. + // In the event of a failure to notify dendrite, we'll log an error + // and rely on dendrite's RPW timer to catch it up. + if let Err(e) = dpd_client.ipv4_nat_trigger_update().await { + error!(self.log, "failed to notify dendrite of nat updates"; "error" => ?e); + }; } Ok(()) diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index ef8132451a..18c9dae841 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -349,6 +349,7 @@ impl Nexus { &background_ctx, Arc::clone(&db_datastore), &config.pkg.background_tasks, + &dpd_clients, config.deployment.id, resolver.clone(), ); diff --git a/nexus/src/internal_api/http_entrypoints.rs b/nexus/src/internal_api/http_entrypoints.rs index ebb21feb40..9a20911893 100644 --- a/nexus/src/internal_api/http_entrypoints.rs +++ b/nexus/src/internal_api/http_entrypoints.rs @@ -24,6 +24,7 @@ use dropshot::RequestContext; use dropshot::ResultsPage; use dropshot::TypedBody; use hyper::Body; +use nexus_db_model::Ipv4NatEntryView; use nexus_types::internal_api::params::SwitchPutRequest; use nexus_types::internal_api::params::SwitchPutResponse; use nexus_types::internal_api::views::to_list; @@ -68,6 +69,8 @@ pub(crate) fn internal_api() -> NexusApiDescription { api.register(saga_list)?; api.register(saga_view)?; + api.register(ipv4_nat_changeset)?; + api.register(bgtask_list)?; api.register(bgtask_view)?; @@ -540,3 +543,51 @@ async fn bgtask_view( }; apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await } + +// NAT RPW internal APIs + +/// Path parameters for NAT ChangeSet +#[derive(Deserialize, JsonSchema)] +struct RpwNatPathParam { + /// which change number to start generating + /// the change set from + from_gen: i64, +} + +/// Query parameters for NAT ChangeSet +#[derive(Deserialize, JsonSchema)] +struct RpwNatQueryParam { + limit: u32, +} + +/// Fetch NAT ChangeSet +/// +/// Caller provides their generation as `from_gen`, along with a query +/// parameter for the page size (`limit`). Endpoint will return changes +/// that have occured since the caller's generation number up to the latest +/// change or until the `limit` is reached. If there are no changes, an +/// empty vec is returned. +#[endpoint { + method = GET, + path = "/nat/ipv4/changeset/{from_gen}" +}] +async fn ipv4_nat_changeset( + rqctx: RequestContext>, + path_params: Path, + query_params: Query, +) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let mut changeset = nexus + .datastore() + .ipv4_nat_changeset(&opctx, path.from_gen, query.limit) + .await?; + changeset.sort_by_key(|e| e.gen); + Ok(HttpResponseOk(changeset)) + }; + apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await +} diff --git a/nexus/tests/config.test.toml b/nexus/tests/config.test.toml index 54f7e03eef..fbed9aed8e 100644 --- a/nexus/tests/config.test.toml +++ b/nexus/tests/config.test.toml @@ -90,6 +90,7 @@ dns_external.max_concurrent_server_updates = 5 # certificates it will take _other_ Nexus instances to notice and stop serving # them (on a sunny day). external_endpoints.period_secs = 60 +nat_cleanup.period_secs = 30 # How frequently to collect hardware/software inventory from the whole system # (even if we don't have reason to believe anything has changed). inventory.period_secs = 600 diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index f83cf68a8a..fcb285d9eb 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -323,6 +323,57 @@ } } }, + "/nat/ipv4/changeset/{from_gen}": { + "get": { + "summary": "Fetch NAT ChangeSet", + "description": "Caller provides their generation as `from_gen`, along with a query parameter for the page size (`limit`). Endpoint will return changes that have occured since the caller's generation number up to the latest change or until the `limit` is reached. If there are no changes, an empty vec is returned.", + "operationId": "ipv4_nat_changeset", + "parameters": [ + { + "in": "path", + "name": "from_gen", + "description": "which change number to start generating the change set from", + "required": true, + "schema": { + "type": "integer", + "format": "int64" + } + }, + { + "in": "query", + "name": "limit", + "required": true, + "schema": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_Ipv4NatEntryView", + "type": "array", + "items": { + "$ref": "#/components/schemas/Ipv4NatEntryView" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/physical-disk": { "put": { "summary": "Report that a physical disk for the specified sled has come online.", @@ -3763,6 +3814,53 @@ } ] }, + "Ipv4NatEntryView": { + "description": "NAT Record", + "type": "object", + "properties": { + "deleted": { + "type": "boolean" + }, + "external_address": { + "type": "string", + "format": "ipv4" + }, + "first_port": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "gen": { + "type": "integer", + "format": "int64" + }, + "last_port": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "mac": { + "$ref": "#/components/schemas/MacAddr" + }, + "sled_address": { + "type": "string", + "format": "ipv6" + }, + "vni": { + "$ref": "#/components/schemas/Vni" + } + }, + "required": [ + "deleted", + "external_address", + "first_port", + "gen", + "last_port", + "mac", + "sled_address", + "vni" + ] + }, "Ipv4Network": { "type": "string", "pattern": "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\/(3[0-2]|[0-2]?[0-9])$" @@ -5335,6 +5433,12 @@ "time_updated" ] }, + "Vni": { + "description": "A Geneve Virtual Network Identifier", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, "ZpoolPutRequest": { "description": "Sent by a sled agent on startup to Nexus to request further instruction", "type": "object", diff --git a/package-manifest.toml b/package-manifest.toml index f320215a13..ca96341f2a 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -476,8 +476,8 @@ only_for_targets.image = "standard" # 2. Copy dendrite.tar.gz from dendrite/out to omicron/out source.type = "prebuilt" source.repo = "dendrite" -source.commit = "147b03901aa8305b5271e0133a09f628b8140949" -source.sha256 = "14fe7f904f963b50188d6e060106b63df6d061ca64238f7b21623c432b5944e3" +source.commit = "8ff834e7d0a6adb263240edd40537f2c0768f1a4" +source.sha256 = "c00e79f55e0bdf048069b2d18a4d009ddfef46e7e5d846887cf96e843a8884bd" output.type = "zone" output.intermediate_only = true @@ -501,8 +501,8 @@ only_for_targets.image = "standard" # 2. Copy the output zone image from dendrite/out to omicron/out source.type = "prebuilt" source.repo = "dendrite" -source.commit = "147b03901aa8305b5271e0133a09f628b8140949" -source.sha256 = "f3aa685e4096f8f6e2ea6c169f391dbb88707abcbf1d2bde29163d81736e8ec6" +source.commit = "8ff834e7d0a6adb263240edd40537f2c0768f1a4" +source.sha256 = "428cce1e9aa399b1b49c04e7fd0bc1cb0e3f3fae6fda96055892a42e010c9d6f" output.type = "zone" output.intermediate_only = true @@ -519,8 +519,8 @@ only_for_targets.image = "standard" # 2. Copy dendrite.tar.gz from dendrite/out to omicron/out/dendrite-softnpu.tar.gz source.type = "prebuilt" source.repo = "dendrite" -source.commit = "147b03901aa8305b5271e0133a09f628b8140949" -source.sha256 = "dece729ce4127216fba48e9cfed90ec2e5a57ee4ca6c4afc5fa770de6ea636bf" +source.commit = "8ff834e7d0a6adb263240edd40537f2c0768f1a4" +source.sha256 = "5dd3534bec5eb4f857d0bf3994b26650288f650d409eec6aaa29860a2f481c37" output.type = "zone" output.intermediate_only = true diff --git a/schema/crdb/10.0.0/README.md b/schema/crdb/11.0.0/README.md similarity index 100% rename from schema/crdb/10.0.0/README.md rename to schema/crdb/11.0.0/README.md diff --git a/schema/crdb/10.0.0/up01.sql b/schema/crdb/11.0.0/up01.sql similarity index 100% rename from schema/crdb/10.0.0/up01.sql rename to schema/crdb/11.0.0/up01.sql diff --git a/schema/crdb/10.0.0/up02.sql b/schema/crdb/11.0.0/up02.sql similarity index 100% rename from schema/crdb/10.0.0/up02.sql rename to schema/crdb/11.0.0/up02.sql diff --git a/schema/crdb/10.0.0/up03.sql b/schema/crdb/11.0.0/up03.sql similarity index 100% rename from schema/crdb/10.0.0/up03.sql rename to schema/crdb/11.0.0/up03.sql diff --git a/schema/crdb/10.0.0/up04.sql b/schema/crdb/11.0.0/up04.sql similarity index 100% rename from schema/crdb/10.0.0/up04.sql rename to schema/crdb/11.0.0/up04.sql diff --git a/schema/crdb/10.0.0/up05.sql b/schema/crdb/11.0.0/up05.sql similarity index 100% rename from schema/crdb/10.0.0/up05.sql rename to schema/crdb/11.0.0/up05.sql diff --git a/schema/crdb/10.0.0/up06.sql b/schema/crdb/11.0.0/up06.sql similarity index 100% rename from schema/crdb/10.0.0/up06.sql rename to schema/crdb/11.0.0/up06.sql diff --git a/schema/crdb/10.0.0/up07.sql b/schema/crdb/11.0.0/up07.sql similarity index 100% rename from schema/crdb/10.0.0/up07.sql rename to schema/crdb/11.0.0/up07.sql diff --git a/schema/crdb/10.0.0/up08.sql b/schema/crdb/11.0.0/up08.sql similarity index 100% rename from schema/crdb/10.0.0/up08.sql rename to schema/crdb/11.0.0/up08.sql diff --git a/schema/crdb/10.0.0/up09.sql b/schema/crdb/11.0.0/up09.sql similarity index 100% rename from schema/crdb/10.0.0/up09.sql rename to schema/crdb/11.0.0/up09.sql diff --git a/schema/crdb/11.0.0/up1.sql b/schema/crdb/11.0.0/up1.sql new file mode 100644 index 0000000000..a4d31edd71 --- /dev/null +++ b/schema/crdb/11.0.0/up1.sql @@ -0,0 +1 @@ +CREATE SEQUENCE IF NOT EXISTS omicron.public.ipv4_nat_version START 1 INCREMENT 1; diff --git a/schema/crdb/10.0.0/up10.sql b/schema/crdb/11.0.0/up10.sql similarity index 100% rename from schema/crdb/10.0.0/up10.sql rename to schema/crdb/11.0.0/up10.sql diff --git a/schema/crdb/10.0.0/up11.sql b/schema/crdb/11.0.0/up11.sql similarity index 100% rename from schema/crdb/10.0.0/up11.sql rename to schema/crdb/11.0.0/up11.sql diff --git a/schema/crdb/10.0.0/up12.sql b/schema/crdb/11.0.0/up12.sql similarity index 100% rename from schema/crdb/10.0.0/up12.sql rename to schema/crdb/11.0.0/up12.sql diff --git a/schema/crdb/11.0.0/up2.sql b/schema/crdb/11.0.0/up2.sql new file mode 100644 index 0000000000..b92d4c73d3 --- /dev/null +++ b/schema/crdb/11.0.0/up2.sql @@ -0,0 +1,13 @@ +CREATE TABLE IF NOT EXISTS omicron.public.ipv4_nat_entry ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + external_address INET NOT NULL, + first_port INT4 NOT NULL, + last_port INT4 NOT NULL, + sled_address INET NOT NULL, + vni INT4 NOT NULL, + mac INT8 NOT NULL, + version_added INT8 NOT NULL DEFAULT nextval('omicron.public.ipv4_nat_version'), + version_removed INT8, + time_created TIMESTAMPTZ NOT NULL DEFAULT now(), + time_deleted TIMESTAMPTZ +); diff --git a/schema/crdb/11.0.0/up3.sql b/schema/crdb/11.0.0/up3.sql new file mode 100644 index 0000000000..1247aad693 --- /dev/null +++ b/schema/crdb/11.0.0/up3.sql @@ -0,0 +1,13 @@ +CREATE UNIQUE INDEX IF NOT EXISTS ipv4_nat_version_added ON omicron.public.ipv4_nat_entry ( + version_added +) +STORING ( + external_address, + first_port, + last_port, + sled_address, + vni, + mac, + time_created, + time_deleted +); diff --git a/schema/crdb/11.0.0/up4.sql b/schema/crdb/11.0.0/up4.sql new file mode 100644 index 0000000000..b9cfe305d2 --- /dev/null +++ b/schema/crdb/11.0.0/up4.sql @@ -0,0 +1,5 @@ +CREATE UNIQUE INDEX IF NOT EXISTS overlapping_ipv4_nat_entry ON omicron.public.ipv4_nat_entry ( + external_address, + first_port, + last_port +) WHERE time_deleted IS NULL; diff --git a/schema/crdb/11.0.0/up5.sql b/schema/crdb/11.0.0/up5.sql new file mode 100644 index 0000000000..dce2211eae --- /dev/null +++ b/schema/crdb/11.0.0/up5.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS ipv4_nat_lookup ON omicron.public.ipv4_nat_entry (external_address, first_port, last_port, sled_address, vni, mac); diff --git a/schema/crdb/11.0.0/up6.sql b/schema/crdb/11.0.0/up6.sql new file mode 100644 index 0000000000..e4958eb352 --- /dev/null +++ b/schema/crdb/11.0.0/up6.sql @@ -0,0 +1,13 @@ +CREATE UNIQUE INDEX IF NOT EXISTS ipv4_nat_version_removed ON omicron.public.ipv4_nat_entry ( + version_removed +) +STORING ( + external_address, + first_port, + last_port, + sled_address, + vni, + mac, + time_created, + time_deleted +); diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 875877ee96..a74cabfe6e 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -2738,12 +2738,24 @@ CREATE TABLE IF NOT EXISTS omicron.public.inv_caboose ( COMMIT; BEGIN; -/*******************************************************************/ +CREATE TABLE IF NOT EXISTS omicron.public.db_metadata ( + -- There should only be one row of this table for the whole DB. + -- It's a little goofy, but filter on "singleton = true" before querying + -- or applying updates, and you'll access the singleton row. + -- + -- We also add a constraint on this table to ensure it's not possible to + -- access the version of this table with "singleton = false". + singleton BOOL NOT NULL PRIMARY KEY, + time_created TIMESTAMPTZ NOT NULL, + time_modified TIMESTAMPTZ NOT NULL, + -- Semver representation of the DB version + version STRING(64) NOT NULL, -/* - * Metadata for the schema itself. This version number isn't great, as there's - * nothing to ensure it gets bumped when it should be, but it's a start. - */ + -- (Optional) Semver representation of the DB version to which we're upgrading + target_version STRING(64), + + CHECK (singleton = true) +); -- Per-VMM state. CREATE TABLE IF NOT EXISTS omicron.public.vmm ( @@ -2812,6 +2824,62 @@ CREATE TYPE IF NOT EXISTS omicron.public.switch_link_speed AS ENUM ( ALTER TABLE omicron.public.switch_port_settings_link_config ADD COLUMN IF NOT EXISTS fec omicron.public.switch_link_fec; ALTER TABLE omicron.public.switch_port_settings_link_config ADD COLUMN IF NOT EXISTS speed omicron.public.switch_link_speed; +CREATE SEQUENCE IF NOT EXISTS omicron.public.ipv4_nat_version START 1 INCREMENT 1; + +CREATE TABLE IF NOT EXISTS omicron.public.ipv4_nat_entry ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + external_address INET NOT NULL, + first_port INT4 NOT NULL, + last_port INT4 NOT NULL, + sled_address INET NOT NULL, + vni INT4 NOT NULL, + mac INT8 NOT NULL, + version_added INT8 NOT NULL DEFAULT nextval('omicron.public.ipv4_nat_version'), + version_removed INT8, + time_created TIMESTAMPTZ NOT NULL DEFAULT now(), + time_deleted TIMESTAMPTZ +); + +CREATE UNIQUE INDEX IF NOT EXISTS ipv4_nat_version_added ON omicron.public.ipv4_nat_entry ( + version_added +) +STORING ( + external_address, + first_port, + last_port, + sled_address, + vni, + mac, + time_created, + time_deleted +); + +CREATE UNIQUE INDEX IF NOT EXISTS overlapping_ipv4_nat_entry ON omicron.public.ipv4_nat_entry ( + external_address, + first_port, + last_port +) WHERE time_deleted IS NULL; + +CREATE INDEX IF NOT EXISTS ipv4_nat_lookup ON omicron.public.ipv4_nat_entry (external_address, first_port, last_port, sled_address, vni, mac); + +CREATE UNIQUE INDEX IF NOT EXISTS ipv4_nat_version_removed ON omicron.public.ipv4_nat_entry ( + version_removed +) +STORING ( + external_address, + first_port, + last_port, + sled_address, + vni, + mac, + time_created, + time_deleted +); + +/* + * Metadata for the schema itself. This version number isn't great, as there's + * nothing to ensure it gets bumped when it should be, but it's a start. + */ CREATE TABLE IF NOT EXISTS omicron.public.db_metadata ( -- There should only be one row of this table for the whole DB. -- It's a little goofy, but filter on "singleton = true" before querying @@ -2838,7 +2906,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '10.0.0', NULL) + ( TRUE, NOW(), NOW(), '11.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/smf/nexus/multi-sled/config-partial.toml b/smf/nexus/multi-sled/config-partial.toml index cae1f650c9..94c8f5572e 100644 --- a/smf/nexus/multi-sled/config-partial.toml +++ b/smf/nexus/multi-sled/config-partial.toml @@ -38,6 +38,7 @@ dns_external.max_concurrent_server_updates = 5 # certificates it will take _other_ Nexus instances to notice and stop serving # them (on a sunny day). external_endpoints.period_secs = 60 +nat_cleanup.period_secs = 30 # How frequently to collect hardware/software inventory from the whole system # (even if we don't have reason to believe anything has changed). inventory.period_secs = 600 diff --git a/smf/nexus/single-sled/config-partial.toml b/smf/nexus/single-sled/config-partial.toml index be8683be54..fcaa6176a8 100644 --- a/smf/nexus/single-sled/config-partial.toml +++ b/smf/nexus/single-sled/config-partial.toml @@ -38,6 +38,7 @@ dns_external.max_concurrent_server_updates = 5 # certificates it will take _other_ Nexus instances to notice and stop serving # them (on a sunny day). external_endpoints.period_secs = 60 +nat_cleanup.period_secs = 30 # How frequently to collect hardware/software inventory from the whole system # (even if we don't have reason to believe anything has changed). inventory.period_secs = 600 diff --git a/tools/dendrite_openapi_version b/tools/dendrite_openapi_version index aadf68da1b..ba4b5a5722 100644 --- a/tools/dendrite_openapi_version +++ b/tools/dendrite_openapi_version @@ -1,2 +1,2 @@ -COMMIT="147b03901aa8305b5271e0133a09f628b8140949" -SHA2="82437c74afd4894aa5b9ea800d5777793e8777fe87471321dd22ad1a1c9c9ef3" +COMMIT="8ff834e7d0a6adb263240edd40537f2c0768f1a4" +SHA2="07d115bfa8498a8015ca2a8447efeeac32e24aeb25baf3d5e2313216e11293c0" diff --git a/tools/dendrite_stub_checksums b/tools/dendrite_stub_checksums index 81a957323c..619a6bf287 100644 --- a/tools/dendrite_stub_checksums +++ b/tools/dendrite_stub_checksums @@ -1,3 +1,3 @@ -CIDL_SHA256_ILLUMOS="14fe7f904f963b50188d6e060106b63df6d061ca64238f7b21623c432b5944e3" -CIDL_SHA256_LINUX_DPD="fff6c7484bbb06aa644e3fe41b200e4f7f8d7f65d067cbecd851c834c15fe2ec" -CIDL_SHA256_LINUX_SWADM="0449383a57468aec3b5a4ad26962cfc9e9a121bd13e777329e8a70767e6d9aae" +CIDL_SHA256_ILLUMOS="c00e79f55e0bdf048069b2d18a4d009ddfef46e7e5d846887cf96e843a8884bd" +CIDL_SHA256_LINUX_DPD="b5d829b4628759ac374106f3c56c29074b29577fd0ff72f61c3b8289fea430fe" +CIDL_SHA256_LINUX_SWADM="afc68828f54dc57b32dc1556fc588baeab12341c30e96cc0fadb49f401b4b48f" From 7adc3c0184b27328a3949b4e7a9809bde19ab834 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sat, 18 Nov 2023 16:33:55 -0800 Subject: [PATCH 08/56] Update Rust crate vsss-rs to 3.3.1 (#4478) --- bootstore/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bootstore/Cargo.toml b/bootstore/Cargo.toml index 18e3e3876b..93eb6a3c48 100644 --- a/bootstore/Cargo.toml +++ b/bootstore/Cargo.toml @@ -27,7 +27,7 @@ slog.workspace = true thiserror.workspace = true tokio.workspace = true uuid.workspace = true -vsss-rs = { version = "3.2.0", features = ["std", "curve25519"] } +vsss-rs = { version = "3.3.1", features = ["std", "curve25519"] } zeroize.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. From 612cdafa6d065c22cb0a12731e0568588623cc7c Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sat, 18 Nov 2023 17:01:35 -0800 Subject: [PATCH 09/56] Update Rust crate cookie to 0.18 (#4347) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 58d0653728..b9d70c50ba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1048,9 +1048,9 @@ checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] name = "cookie" -version = "0.16.2" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e859cd57d0710d9e06c381b550c06e76992472a8c6d527aecd2fc673dcc231fb" +checksum = "3cd91cf61412820176e137621345ee43b3f4423e589e7ae4e50d601d93e35ef8" dependencies = [ "time", "version_check", diff --git a/Cargo.toml b/Cargo.toml index 82bca496a5..7d904cb2ea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -165,7 +165,7 @@ ciborium = "0.2.1" cfg-if = "1.0" chrono = { version = "0.4", features = [ "serde" ] } clap = { version = "4.4", features = ["derive", "env", "wrap_help"] } -cookie = "0.16" +cookie = "0.18" criterion = { version = "0.5.1", features = [ "async_tokio" ] } crossbeam = "0.8" crossterm = { version = "0.27.0", features = ["event-stream"] } From d25aabca8828033987208346ae5a623265ba1397 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sat, 18 Nov 2023 17:01:58 -0800 Subject: [PATCH 10/56] Update actions/checkout action to v4 (#4479) --- .github/workflows/check-opte-ver.yml | 2 +- .github/workflows/check-workspace-deps.yml | 2 +- .github/workflows/rust.yml | 8 ++++---- .github/workflows/update-dendrite.yml | 2 +- .github/workflows/update-maghemite.yml | 2 +- .github/workflows/validate-openapi-spec.yml | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/check-opte-ver.yml b/.github/workflows/check-opte-ver.yml index 42ef1dda11..a8c0febc2d 100644 --- a/.github/workflows/check-opte-ver.yml +++ b/.github/workflows/check-opte-ver.yml @@ -9,7 +9,7 @@ jobs: check-opte-ver: runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - name: Install jq diff --git a/.github/workflows/check-workspace-deps.yml b/.github/workflows/check-workspace-deps.yml index f94ed32fde..ec2bcc3537 100644 --- a/.github/workflows/check-workspace-deps.yml +++ b/.github/workflows/check-workspace-deps.yml @@ -10,7 +10,7 @@ jobs: check-workspace-deps: runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - name: Check Workspace Dependencies diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 6239add88f..23ccc7e61f 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -9,7 +9,7 @@ jobs: check-style: runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - name: Report cargo version @@ -31,7 +31,7 @@ jobs: # This repo is unstable and unnecessary: https://github.com/microsoft/linux-package-repositories/issues/34 - name: Disable packages.microsoft.com repo run: sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 @@ -61,7 +61,7 @@ jobs: # This repo is unstable and unnecessary: https://github.com/microsoft/linux-package-repositories/issues/34 - name: Disable packages.microsoft.com repo run: sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 @@ -91,7 +91,7 @@ jobs: # This repo is unstable and unnecessary: https://github.com/microsoft/linux-package-repositories/issues/34 - name: Disable packages.microsoft.com repo run: sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 diff --git a/.github/workflows/update-dendrite.yml b/.github/workflows/update-dendrite.yml index 9d79dfc8f9..919c97f586 100644 --- a/.github/workflows/update-dendrite.yml +++ b/.github/workflows/update-dendrite.yml @@ -29,7 +29,7 @@ jobs: steps: # Checkout both the target and integration branches - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: token: ${{ inputs.reflector_access_token }} fetch-depth: 0 diff --git a/.github/workflows/update-maghemite.yml b/.github/workflows/update-maghemite.yml index e2512dc6ce..04023bc5b0 100644 --- a/.github/workflows/update-maghemite.yml +++ b/.github/workflows/update-maghemite.yml @@ -29,7 +29,7 @@ jobs: steps: # Checkout both the target and integration branches - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: token: ${{ inputs.reflector_access_token }} fetch-depth: 0 diff --git a/.github/workflows/validate-openapi-spec.yml b/.github/workflows/validate-openapi-spec.yml index ea77ed9497..39c6c1debb 100644 --- a/.github/workflows/validate-openapi-spec.yml +++ b/.github/workflows/validate-openapi-spec.yml @@ -10,7 +10,7 @@ jobs: format: runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - uses: actions/setup-node@1a4442cacd436585916779262731d5b162bc6ec7 # v3.8.2 From bc91ea487348d7a75841075699313d72278b525a Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sat, 18 Nov 2023 19:57:12 -0800 Subject: [PATCH 11/56] Update Rust crate omicron-zone-package to 0.9.1 (#4365) Co-authored-by: Rain --- Cargo.lock | 6 ++++-- Cargo.toml | 2 +- package/src/dot.rs | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b9d70c50ba..dd126fab31 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4988,9 +4988,9 @@ dependencies = [ [[package]] name = "omicron-zone-package" -version = "0.8.3" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dc0973625837d1c4e31d4aa60e72008f3af3aa9b0d0ebfd5b5dc67d2e721a48" +checksum = "620c53207d39a385f298444337d575690e0d9e793561d471ba7a614dc213e372" dependencies = [ "anyhow", "async-trait", @@ -4998,7 +4998,9 @@ dependencies = [ "filetime", "flate2", "futures-util", + "hex", "reqwest", + "ring 0.16.20", "semver 1.0.20", "serde", "serde_derive", diff --git a/Cargo.toml b/Cargo.toml index 7d904cb2ea..e008827405 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -256,7 +256,7 @@ omicron-package = { path = "package" } omicron-rpaths = { path = "rpaths" } omicron-sled-agent = { path = "sled-agent" } omicron-test-utils = { path = "test-utils" } -omicron-zone-package = "0.8.3" +omicron-zone-package = "0.9.1" oxide-client = { path = "clients/oxide-client" } oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "258a8b59902dd36fc7ee5425e6b1fb5fc80d4649", features = [ "api", "std" ] } once_cell = "1.18.0" diff --git a/package/src/dot.rs b/package/src/dot.rs index f6ac32aa3a..133d5c0f00 100644 --- a/package/src/dot.rs +++ b/package/src/dot.rs @@ -184,7 +184,7 @@ pub fn do_dot( } } - PackageSource::Local { blobs, rust, paths } => { + PackageSource::Local { blobs, rust, paths, .. } => { // Regardless of the type of local package (e.g., files-only or // Rust package or whatever), create nodes showing any S3 blobs // on which it depends. From 2055962e578bb6643072882ff317aa3e4ad62b9e Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sun, 19 Nov 2023 05:23:11 +0000 Subject: [PATCH 12/56] Update taiki-e/install-action digest to dd2be0d (#4515) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`7c4edf1` -> `dd2be0d`](https://togithub.com/taiki-e/install-action/compare/7c4edf1...dd2be0d) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index 07b7124f73..d169caede3 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@7c4edf14345f90e1199544e41cb94c3ef67bd237 # v2 + uses: taiki-e/install-action@dd2be0d4c91c3f760b208b1d954dd582cdd782fe # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From 16aa92afa83daf2b13b28450420c24e0627b6229 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sun, 19 Nov 2023 13:59:54 -0800 Subject: [PATCH 13/56] Update Rust crate http to 0.2.11 (#4517) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dd126fab31..ccc6fd3e8b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2787,9 +2787,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" dependencies = [ "bytes", "fnv", diff --git a/Cargo.toml b/Cargo.toml index e008827405..7f8a335285 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -206,7 +206,7 @@ hex = "0.4.3" hex-literal = "0.4.1" highway = "1.1.0" hkdf = "0.12.3" -http = "0.2.9" +http = "0.2.11" httptest = "0.15.5" hubtools = { git = "https://github.com/oxidecomputer/hubtools.git", branch = "main" } humantime = "2.1.0" From f36d3b25ed9b4fb9473c5bbbdec7da8c9cbb9f0a Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sun, 19 Nov 2023 14:00:52 -0800 Subject: [PATCH 14/56] Update Rust crate rustls to 0.21.9 (#4518) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ccc6fd3e8b..efb3e5b27f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6874,9 +6874,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.8" +version = "0.21.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" +checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" dependencies = [ "log", "ring 0.17.5", diff --git a/Cargo.toml b/Cargo.toml index 7f8a335285..0fd4c2662f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -307,7 +307,7 @@ ring = "0.16" rpassword = "7.2.0" rstest = "0.18.2" rustfmt-wrapper = "0.2" -rustls = "0.21.8" +rustls = "0.21.9" samael = { git = "https://github.com/njaremko/samael", features = ["xmlsec"], branch = "master" } schemars = "0.8.12" secrecy = "0.8.0" From 4951c339a8987c757596207c81afdfada8bbf7c7 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sun, 19 Nov 2023 14:01:35 -0800 Subject: [PATCH 15/56] Update Rust crate diesel to 2.1.4 (#4516) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index efb3e5b27f..a45895676b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1629,9 +1629,9 @@ dependencies = [ [[package]] name = "diesel" -version = "2.1.3" +version = "2.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2268a214a6f118fce1838edba3d1561cf0e78d8de785475957a580a7f8c69d33" +checksum = "62c6fcf842f17f8c78ecf7c81d75c5ce84436b41ee07e03f490fbb5f5a8731d8" dependencies = [ "bitflags 2.4.0", "byteorder", diff --git a/Cargo.toml b/Cargo.toml index 0fd4c2662f..be0e8f6d01 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -180,7 +180,7 @@ db-macros = { path = "nexus/db-macros" } debug-ignore = "1.0.5" derive_more = "0.99.17" derive-where = "1.2.5" -diesel = { version = "2.1.3", features = ["postgres", "r2d2", "chrono", "serde_json", "network-address", "uuid"] } +diesel = { version = "2.1.4", features = ["postgres", "r2d2", "chrono", "serde_json", "network-address", "uuid"] } diesel-dtrace = { git = "https://github.com/oxidecomputer/diesel-dtrace", branch = "main" } dns-server = { path = "dns-server" } dns-service-client = { path = "clients/dns-service-client" } diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index c95226b960..4d416eca02 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -33,7 +33,7 @@ crossbeam-epoch = { version = "0.9.15" } crossbeam-utils = { version = "0.8.16" } crossterm = { version = "0.27.0", features = ["event-stream", "serde"] } crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } -diesel = { version = "2.1.3", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } +diesel = { version = "2.1.4", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } digest = { version = "0.10.7", features = ["mac", "oid", "std"] } either = { version = "1.9.0" } flate2 = { version = "1.0.28" } @@ -126,7 +126,7 @@ crossbeam-epoch = { version = "0.9.15" } crossbeam-utils = { version = "0.8.16" } crossterm = { version = "0.27.0", features = ["event-stream", "serde"] } crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } -diesel = { version = "2.1.3", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } +diesel = { version = "2.1.4", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } digest = { version = "0.10.7", features = ["mac", "oid", "std"] } either = { version = "1.9.0" } flate2 = { version = "1.0.28" } From 8406356c3a07a7473ef4289677bc4fb2d5970c6a Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 05:23:28 +0000 Subject: [PATCH 16/56] Update taiki-e/install-action digest to ccc14bd (#4521) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`dd2be0d` -> `ccc14bd`](https://togithub.com/taiki-e/install-action/compare/dd2be0d...ccc14bd) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index d169caede3..cc67b91fce 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@dd2be0d4c91c3f760b208b1d954dd582cdd782fe # v2 + uses: taiki-e/install-action@ccc14bdc8d34cddf54e4f9fb2da0c208427207a3 # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From f5db42d68ec7aac74f1e6cf7e7ed7d78d0e2aff5 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Sun, 19 Nov 2023 22:44:13 -0800 Subject: [PATCH 17/56] Update Rust crate fs-err to 2.10.0 (#4522) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 7 +++++-- Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a45895676b..a75c341405 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2280,9 +2280,12 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "fs-err" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0845fa252299212f0389d64ba26f34fa32cfe41588355f21ed507c59a0f64541" +checksum = "fb5fd9bcbe8b1087cbd395b51498c01bc997cef73e778a80b77a811af5e2d29f" +dependencies = [ + "autocfg", +] [[package]] name = "fs2" diff --git a/Cargo.toml b/Cargo.toml index be0e8f6d01..a30e6909f1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -193,7 +193,7 @@ filetime = "0.2.22" flate2 = "1.0.28" flume = "0.11.0" foreign-types = "0.3.2" -fs-err = "2.9.0" +fs-err = "2.10.0" futures = "0.3.29" gateway-client = { path = "clients/gateway-client" } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "2739c18e80697aa6bc235c935176d14b4d757ee9", default-features = false, features = ["std"] } From ba1d4f6bbbf63650e6c067cb9d3d27dfc486fdae Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 06:54:07 +0000 Subject: [PATCH 18/56] Update Rust crate itertools to 0.12.0 (#4524) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 25 +++++++++++++++++-------- Cargo.toml | 2 +- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a75c341405..3409e5eead 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3175,7 +3175,7 @@ dependencies = [ "installinator-artifact-client", "installinator-common", "ipcc-key-value", - "itertools 0.11.0", + "itertools 0.12.0", "libc", "omicron-common", "omicron-test-utils", @@ -3400,6 +3400,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.9" @@ -4002,7 +4011,7 @@ dependencies = [ "hyper-rustls", "internal-dns", "ipnetwork", - "itertools 0.11.0", + "itertools 0.12.0", "lazy_static", "macaddr", "newtype_derive", @@ -4595,7 +4604,7 @@ dependencies = [ "hyper-rustls", "internal-dns", "ipnetwork", - "itertools 0.11.0", + "itertools 0.12.0", "lazy_static", "macaddr", "mg-admin-client", @@ -4802,7 +4811,7 @@ dependencies = [ "illumos-utils", "internal-dns", "ipnetwork", - "itertools 0.11.0", + "itertools 0.12.0", "key-manager", "libc", "macaddr", @@ -5317,7 +5326,7 @@ dependencies = [ "dropshot", "expectorate", "highway", - "itertools 0.11.0", + "itertools 0.12.0", "omicron-common", "omicron-test-utils", "omicron-workspace-hack", @@ -8852,7 +8861,7 @@ dependencies = [ "fs-err", "hex", "hubtools", - "itertools 0.11.0", + "itertools 0.12.0", "omicron-common", "omicron-test-utils", "omicron-workspace-hack", @@ -9425,7 +9434,7 @@ dependencies = [ "humantime", "indexmap 2.1.0", "indicatif", - "itertools 0.11.0", + "itertools 0.12.0", "omicron-common", "omicron-passwords", "omicron-workspace-hack", @@ -9530,7 +9539,7 @@ dependencies = [ "installinator-common", "internal-dns", "ipnetwork", - "itertools 0.11.0", + "itertools 0.12.0", "maplit", "omicron-certificates", "omicron-common", diff --git a/Cargo.toml b/Cargo.toml index a30e6909f1..7fff336fd0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -223,7 +223,7 @@ installinator-common = { path = "installinator-common" } internal-dns = { path = "internal-dns" } ipcc-key-value = { path = "ipcc-key-value" } ipnetwork = { version = "0.20", features = ["schemars"] } -itertools = "0.11.0" +itertools = "0.12.0" key-manager = { path = "key-manager" } kstat-rs = "0.2.3" lazy_static = "1.4.0" From 711a18e88d7a3616d1c53e4311a687c3c6e336d0 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 08:07:39 +0000 Subject: [PATCH 19/56] Update Rust crate proptest to 1.4.0 (#4525) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 6 +++--- Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3409e5eead..609e1699cf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6168,9 +6168,9 @@ dependencies = [ [[package]] name = "proptest" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c003ac8c77cb07bb74f5f198bce836a689bcd5a42574612bf14d17bfd08c20e" +checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", @@ -6180,7 +6180,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", - "regex-syntax 0.7.5", + "regex-syntax 0.8.2", "rusty-fork", "tempfile", "unarray", diff --git a/Cargo.toml b/Cargo.toml index 7fff336fd0..b55c4fca6a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -293,7 +293,7 @@ progenitor-client = { git = "https://github.com/oxidecomputer/progenitor", branc bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "54398875a2125227d13827d4236dce943c019b1c" } propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "54398875a2125227d13827d4236dce943c019b1c" } propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "54398875a2125227d13827d4236dce943c019b1c" } -proptest = "1.3.1" +proptest = "1.4.0" quote = "1.0" rand = "0.8.5" ratatui = "0.23.0" From b3378547b95517a7cace05050429015c20625dde Mon Sep 17 00:00:00 2001 From: Augustus Mayo Date: Mon, 20 Nov 2023 08:46:51 -0600 Subject: [PATCH 20/56] Fix reflector-bot merge conflict handling (#4469) The update workflows for dendrite and maghemite are currently failing to merge main into their respective integration branches. The intended behavior is that the workflows start by merging main to pick up any new changes, and overwriting any changes to the `*_openapi_version` files that had been made on the integration branches with the versions from main (in the event of a conflict). This gets the integration branch into a clean spot which it can then apply the update scripts to. The workflows as is though are not correctly passing the list of paths for which the the main branch should be preferred. This leaves the integration branch with a conflict causing the action to fail. These changes should fix that and allow merges to complete cleanly. Co-authored-by: reflector[bot] <123+reflector[bot]@users.noreply.github.com> --- .github/workflows/update-dendrite.yml | 2 +- .github/workflows/update-maghemite.yml | 2 +- tools/reflector/helpers.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/update-dendrite.yml b/.github/workflows/update-dendrite.yml index 919c97f586..4a2e6e95cc 100644 --- a/.github/workflows/update-dendrite.yml +++ b/.github/workflows/update-dendrite.yml @@ -39,7 +39,7 @@ jobs: . ./tools/reflector/helpers.sh PATHS=("tools") - merge $TARGET_BRANCH $INT_BRANCH ${{ inputs.reflector_user_id }} $PATHS + merge $TARGET_BRANCH $INT_BRANCH ${{ inputs.reflector_user_id }} "${PATHS[@]}" - name: Update dendrite versions run: | diff --git a/.github/workflows/update-maghemite.yml b/.github/workflows/update-maghemite.yml index 04023bc5b0..b3611f9987 100644 --- a/.github/workflows/update-maghemite.yml +++ b/.github/workflows/update-maghemite.yml @@ -39,7 +39,7 @@ jobs: . ./tools/reflector/helpers.sh PATHS=("tools") - merge $TARGET_BRANCH $INT_BRANCH ${{ inputs.reflector_user_id }} $PATHS + merge $TARGET_BRANCH $INT_BRANCH ${{ inputs.reflector_user_id }} "${PATHS[@]}" - name: Update maghemite versions run: | diff --git a/tools/reflector/helpers.sh b/tools/reflector/helpers.sh index 92d132faae..3d4f693da2 100644 --- a/tools/reflector/helpers.sh +++ b/tools/reflector/helpers.sh @@ -19,7 +19,7 @@ function merge { local TARGET_BRANCH="$1" local INTEGRATION_BRANCH="$2" local BOT_ID="$3" - local -n CHECKOUT_PATHS=$4 + local CHECKOUT_PATHS=$4 set_reflector_bot "$BOT_ID" From 8238581760c0eaa4e096a97e7e999b6b5cdcb62a Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Mon, 20 Nov 2023 12:02:39 -0500 Subject: [PATCH 21/56] [nexus] List all uninitialized sleds (#4504) As part of adding a sled to an already initialized rack, we need a way for operators to be able to list sleds that are not part of a rack. This PR adds an external nexus endpoint for doing just that. Like the `sleds` endpoint, this endpoint is not tied to a rack. The way this works is by looking at the SPs in the latest inventory collection and finding all the Baseboards that are not in the `sled` table in CRDB. A follow up commit will allow an operator to add uninitialized sleds to a rack with a new external nexus endpoint. --- dev-tools/omdb/src/bin/omdb/db.rs | 18 +- nexus/db-model/src/rack.rs | 19 +- .../db-queries/src/db/datastore/inventory.rs | 624 +++++++++--------- nexus/src/app/rack.rs | 71 +- nexus/src/external_api/http_entrypoints.rs | 24 +- nexus/tests/integration_tests/endpoints.rs | 9 + nexus/tests/output/nexus_tags.txt | 1 + nexus/types/src/external_api/views.rs | 30 +- openapi/nexus.json | 54 ++ 9 files changed, 530 insertions(+), 320 deletions(-) diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index efcefdea43..d009c05f86 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -53,7 +53,6 @@ use nexus_db_model::Zpool; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; use nexus_db_queries::db::datastore::DataStoreConnection; -use nexus_db_queries::db::datastore::DataStoreInventoryTest; use nexus_db_queries::db::datastore::InstanceAndActiveVmm; use nexus_db_queries::db::identity::Asset; use nexus_db_queries::db::lookup::LookupPath; @@ -383,8 +382,13 @@ impl DbArgs { .await } DbCommands::Inventory(inventory_args) => { - cmd_db_inventory(&datastore, self.fetch_limit, inventory_args) - .await + cmd_db_inventory( + &opctx, + &datastore, + self.fetch_limit, + inventory_args, + ) + .await } DbCommands::Services(ServicesArgs { command: ServicesCommands::ListInstances, @@ -1751,6 +1755,7 @@ fn format_record(record: &DnsRecord) -> impl Display { // Inventory async fn cmd_db_inventory( + opctx: &OpContext, datastore: &DataStore, limit: NonZeroU32, inventory_args: &InventoryArgs, @@ -1768,7 +1773,9 @@ async fn cmd_db_inventory( }) => cmd_db_inventory_collections_list(&conn, limit).await, InventoryCommands::Collections(CollectionsArgs { command: CollectionsCommands::Show(CollectionsShowArgs { id }), - }) => cmd_db_inventory_collections_show(datastore, id, limit).await, + }) => { + cmd_db_inventory_collections_show(opctx, datastore, id, limit).await + } } } @@ -1928,12 +1935,13 @@ async fn cmd_db_inventory_collections_list( } async fn cmd_db_inventory_collections_show( + opctx: &OpContext, datastore: &DataStore, id: Uuid, limit: NonZeroU32, ) -> Result<(), anyhow::Error> { let (collection, incomplete) = datastore - .inventory_collection_read_best_effort(id, limit) + .inventory_collection_read_best_effort(opctx, id, limit) .await .context("reading collection")?; if incomplete { diff --git a/nexus/db-model/src/rack.rs b/nexus/db-model/src/rack.rs index 580ec155b4..f2bc7528d2 100644 --- a/nexus/db-model/src/rack.rs +++ b/nexus/db-model/src/rack.rs @@ -4,8 +4,9 @@ use crate::schema::rack; use db_macros::Asset; -use ipnetwork::IpNetwork; +use ipnetwork::{IpNetwork, Ipv6Network}; use nexus_types::{external_api::views, identity::Asset}; +use omicron_common::api; use uuid::Uuid; /// Information about a local rack. @@ -28,6 +29,22 @@ impl Rack { rack_subnet: None, } } + + pub fn subnet(&self) -> Result { + match self.rack_subnet { + Some(IpNetwork::V6(subnet)) => Ok(subnet), + Some(IpNetwork::V4(_)) => { + return Err(api::external::Error::InternalError { + internal_message: "rack subnet not IPv6".into(), + }) + } + None => { + return Err(api::external::Error::InternalError { + internal_message: "rack subnet not set".into(), + }) + } + } + } } impl From for views::Rack { diff --git a/nexus/db-queries/src/db/datastore/inventory.rs b/nexus/db-queries/src/db/datastore/inventory.rs index 114b9dbe31..b743d28ee8 100644 --- a/nexus/db-queries/src/db/datastore/inventory.rs +++ b/nexus/db-queries/src/db/datastore/inventory.rs @@ -10,8 +10,6 @@ use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::queries::ALLOW_FULL_TABLE_SCAN_SQL; use crate::db::TransactionError; -use anyhow::anyhow; -use anyhow::bail; use anyhow::Context; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; @@ -44,6 +42,7 @@ use nexus_db_model::SwCaboose; use nexus_types::inventory::Collection; use omicron_common::api::external::Error; use omicron_common::api::external::InternalContext; +use omicron_common::bail_unless; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::num::NonZeroU32; @@ -798,54 +797,311 @@ impl DataStore { Ok(()) } -} -/// Extra interfaces that are not intended (and potentially unsafe) for use in -/// Nexus, but useful for testing and `omdb` -pub trait DataStoreInventoryTest: Send + Sync { - /// List all collections - /// - /// This does not paginate. - fn inventory_collections(&self) -> BoxFuture>>; + /// Attempt to read the latest collection while limiting queries to `limit` + /// records + pub async fn inventory_get_latest_collection( + &self, + opctx: &OpContext, + limit: NonZeroU32, + ) -> Result { + opctx.authorize(authz::Action::Read, &authz::INVENTORY).await?; + let conn = self.pool_connection_authorized(opctx).await?; + use db::schema::inv_collection::dsl; + let collection_id = dsl::inv_collection + .select(dsl::id) + .order_by(dsl::time_started.desc()) + .limit(1) + .first_async::(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; - /// Make a best effort to read the given collection while limiting queries - /// to `limit` results. Returns as much as it was able to get. The - /// returned bool indicates whether the returned collection might be - /// incomplete because the limit was reached. - fn inventory_collection_read_best_effort( + self.inventory_collection_read_all_or_nothing( + opctx, + collection_id, + limit, + ) + .await + } + + /// Attempt to read the given collection while limiting queries to `limit` + /// records and returning nothing if `limit` is not large enough. + async fn inventory_collection_read_all_or_nothing( &self, + opctx: &OpContext, id: Uuid, limit: NonZeroU32, - ) -> BoxFuture>; + ) -> Result { + let (collection, limit_reached) = self + .inventory_collection_read_best_effort(opctx, id, limit) + .await?; + bail_unless!( + !limit_reached, + "hit limit of {} records while loading collection", + limit + ); + Ok(collection) + } - /// Attempt to read the given collection while limiting queries to `limit` - /// records - fn inventory_collection_read_all_or_nothing( + /// Make a best effort to read the given collection while limiting queries + /// to `limit` results. Returns as much as it was able to get. The + /// returned bool indicates whether the returned collection might be + /// incomplete because the limit was reached. + pub async fn inventory_collection_read_best_effort( &self, + opctx: &OpContext, id: Uuid, limit: NonZeroU32, - ) -> BoxFuture> { - async move { - let (collection, limit_reached) = - self.inventory_collection_read_best_effort(id, limit).await?; - anyhow::ensure!( - !limit_reached, - "hit limit of {} records while loading collection", - limit + ) -> Result<(Collection, bool), Error> { + let conn = self.pool_connection_authorized(opctx).await?; + let sql_limit = i64::from(u32::from(limit)); + let usize_limit = usize::try_from(u32::from(limit)).unwrap(); + let mut limit_reached = false; + let (time_started, time_done, collector) = { + use db::schema::inv_collection::dsl; + + let collections = dsl::inv_collection + .filter(dsl::id.eq(id)) + .limit(2) + .select(InvCollection::as_select()) + .load_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })?; + bail_unless!(collections.len() == 1); + let collection = collections.into_iter().next().unwrap(); + ( + collection.time_started, + collection.time_done, + collection.collector, + ) + }; + + let errors: Vec = { + use db::schema::inv_collection_error::dsl; + dsl::inv_collection_error + .filter(dsl::inv_collection_id.eq(id)) + .order_by(dsl::idx) + .limit(sql_limit) + .select(InvCollectionError::as_select()) + .load_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? + .into_iter() + .map(|e| e.message) + .collect() + }; + limit_reached = limit_reached || errors.len() == usize_limit; + + let sps: BTreeMap<_, _> = { + use db::schema::inv_service_processor::dsl; + dsl::inv_service_processor + .filter(dsl::inv_collection_id.eq(id)) + .limit(sql_limit) + .select(InvServiceProcessor::as_select()) + .load_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? + .into_iter() + .map(|sp_row| { + let baseboard_id = sp_row.hw_baseboard_id; + ( + baseboard_id, + nexus_types::inventory::ServiceProcessor::from(sp_row), + ) + }) + .collect() + }; + limit_reached = limit_reached || sps.len() == usize_limit; + + let rots: BTreeMap<_, _> = { + use db::schema::inv_root_of_trust::dsl; + dsl::inv_root_of_trust + .filter(dsl::inv_collection_id.eq(id)) + .limit(sql_limit) + .select(InvRootOfTrust::as_select()) + .load_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? + .into_iter() + .map(|rot_row| { + let baseboard_id = rot_row.hw_baseboard_id; + ( + baseboard_id, + nexus_types::inventory::RotState::from(rot_row), + ) + }) + .collect() + }; + limit_reached = limit_reached || rots.len() == usize_limit; + + // Collect the unique baseboard ids referenced by SPs and RoTs. + let baseboard_id_ids: BTreeSet<_> = + sps.keys().chain(rots.keys()).cloned().collect(); + // Fetch the corresponding baseboard records. + let baseboards_by_id: BTreeMap<_, _> = { + use db::schema::hw_baseboard_id::dsl; + dsl::hw_baseboard_id + .filter(dsl::id.eq_any(baseboard_id_ids)) + .limit(sql_limit) + .select(HwBaseboardId::as_select()) + .load_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? + .into_iter() + .map(|bb| { + ( + bb.id, + Arc::new(nexus_types::inventory::BaseboardId::from(bb)), + ) + }) + .collect() + }; + limit_reached = limit_reached || baseboards_by_id.len() == usize_limit; + + // Having those, we can replace the keys in the maps above with + // references to the actual baseboard rather than the uuid. + let sps = sps + .into_iter() + .map(|(id, sp)| { + baseboards_by_id.get(&id).map(|bb| (bb.clone(), sp)).ok_or_else( + || { + Error::internal_error( + "missing baseboard that we should have fetched", + ) + }, + ) + }) + .collect::, _>>()?; + let rots = rots + .into_iter() + .map(|(id, rot)| { + baseboards_by_id + .get(&id) + .map(|bb| (bb.clone(), rot)) + .ok_or_else(|| { + Error::internal_error( + "missing baseboard that we should have fetched", + ) + }) + }) + .collect::, _>>()?; + + // Fetch records of cabooses found. + let inv_caboose_rows = { + use db::schema::inv_caboose::dsl; + dsl::inv_caboose + .filter(dsl::inv_collection_id.eq(id)) + .limit(sql_limit) + .select(InvCaboose::as_select()) + .load_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })? + }; + limit_reached = limit_reached || inv_caboose_rows.len() == usize_limit; + + // Collect the unique sw_caboose_ids for those cabooses. + let sw_caboose_ids: BTreeSet<_> = inv_caboose_rows + .iter() + .map(|inv_caboose| inv_caboose.sw_caboose_id) + .collect(); + // Fetch the corresponing records. + let cabooses_by_id: BTreeMap<_, _> = { + use db::schema::sw_caboose::dsl; + dsl::sw_caboose + .filter(dsl::id.eq_any(sw_caboose_ids)) + .limit(sql_limit) + .select(SwCaboose::as_select()) + .load_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? + .into_iter() + .map(|sw_caboose_row| { + ( + sw_caboose_row.id, + Arc::new(nexus_types::inventory::Caboose::from( + sw_caboose_row, + )), + ) + }) + .collect() + }; + limit_reached = limit_reached || cabooses_by_id.len() == usize_limit; + + // Assemble the lists of cabooses found. + let mut cabooses_found = BTreeMap::new(); + for c in inv_caboose_rows { + let by_baseboard = cabooses_found + .entry(nexus_types::inventory::CabooseWhich::from(c.which)) + .or_insert_with(BTreeMap::new); + let Some(bb) = baseboards_by_id.get(&c.hw_baseboard_id) else { + let msg = format!( + "unknown baseboard found in inv_caboose: {}", + c.hw_baseboard_id + ); + return Err(Error::internal_error(&msg)); + }; + let Some(sw_caboose) = cabooses_by_id.get(&c.sw_caboose_id) else { + let msg = format!( + "unknown caboose found in inv_caboose: {}", + c.sw_caboose_id + ); + return Err(Error::internal_error(&msg)); + }; + + let previous = by_baseboard.insert( + bb.clone(), + nexus_types::inventory::CabooseFound { + time_collected: c.time_collected, + source: c.source, + caboose: sw_caboose.clone(), + }, + ); + bail_unless!( + previous.is_none(), + "duplicate caboose found: {:?} baseboard {:?}", + c.which, + c.hw_baseboard_id ); - Ok(collection) } - .boxed() + + Ok(( + Collection { + id, + errors, + time_started, + time_done, + collector, + baseboards: baseboards_by_id.values().cloned().collect(), + cabooses: cabooses_by_id.values().cloned().collect(), + sps, + rots, + cabooses_found, + }, + limit_reached, + )) } } +/// Extra interfaces that are not intended (and potentially unsafe) for use in +/// Nexus, but useful for testing and `omdb` +pub trait DataStoreInventoryTest: Send + Sync { + /// List all collections + /// + /// This does not paginate. + fn inventory_collections(&self) -> BoxFuture>>; +} + impl DataStoreInventoryTest for DataStore { fn inventory_collections(&self) -> BoxFuture>> { async { let conn = self .pool_connection_for_tests() .await - .context("getting connectoin")?; + .context("getting connection")?; conn.transaction_async(|conn| async move { conn.batch_execute_async(ALLOW_FULL_TABLE_SCAN_SQL) .await @@ -863,257 +1119,11 @@ impl DataStoreInventoryTest for DataStore { } .boxed() } - - // This function could move into the datastore if it proves helpful. We'd - // need to work out how to report the usual type of Error. For now we don't - // need it so we limit its scope to the test suite. - fn inventory_collection_read_best_effort( - &self, - id: Uuid, - limit: NonZeroU32, - ) -> BoxFuture> { - async move { - let conn = &self - .pool_connection_for_tests() - .await - .context("getting connection")?; - let sql_limit = i64::from(u32::from(limit)); - let usize_limit = usize::try_from(u32::from(limit)).unwrap(); - let mut limit_reached = false; - let (time_started, time_done, collector) = { - use db::schema::inv_collection::dsl; - - let collections = dsl::inv_collection - .filter(dsl::id.eq(id)) - .limit(2) - .select(InvCollection::as_select()) - .load_async(&**conn) - .await - .context("loading collection")?; - anyhow::ensure!(collections.len() == 1); - let collection = collections.into_iter().next().unwrap(); - ( - collection.time_started, - collection.time_done, - collection.collector, - ) - }; - - let errors: Vec = { - use db::schema::inv_collection_error::dsl; - dsl::inv_collection_error - .filter(dsl::inv_collection_id.eq(id)) - .order_by(dsl::idx) - .limit(sql_limit) - .select(InvCollectionError::as_select()) - .load_async(&**conn) - .await - .context("loading collection errors")? - .into_iter() - .map(|e| e.message) - .collect() - }; - limit_reached = limit_reached || errors.len() == usize_limit; - - let sps: BTreeMap<_, _> = { - use db::schema::inv_service_processor::dsl; - dsl::inv_service_processor - .filter(dsl::inv_collection_id.eq(id)) - .limit(sql_limit) - .select(InvServiceProcessor::as_select()) - .load_async(&**conn) - .await - .context("loading service processors")? - .into_iter() - .map(|sp_row| { - let baseboard_id = sp_row.hw_baseboard_id; - ( - baseboard_id, - nexus_types::inventory::ServiceProcessor::from( - sp_row, - ), - ) - }) - .collect() - }; - limit_reached = limit_reached || sps.len() == usize_limit; - - let rots: BTreeMap<_, _> = { - use db::schema::inv_root_of_trust::dsl; - dsl::inv_root_of_trust - .filter(dsl::inv_collection_id.eq(id)) - .limit(sql_limit) - .select(InvRootOfTrust::as_select()) - .load_async(&**conn) - .await - .context("loading roots of trust")? - .into_iter() - .map(|rot_row| { - let baseboard_id = rot_row.hw_baseboard_id; - ( - baseboard_id, - nexus_types::inventory::RotState::from(rot_row), - ) - }) - .collect() - }; - limit_reached = limit_reached || rots.len() == usize_limit; - - // Collect the unique baseboard ids referenced by SPs and RoTs. - let baseboard_id_ids: BTreeSet<_> = - sps.keys().chain(rots.keys()).cloned().collect(); - // Fetch the corresponding baseboard records. - let baseboards_by_id: BTreeMap<_, _> = { - use db::schema::hw_baseboard_id::dsl; - dsl::hw_baseboard_id - .filter(dsl::id.eq_any(baseboard_id_ids)) - .limit(sql_limit) - .select(HwBaseboardId::as_select()) - .load_async(&**conn) - .await - .context("loading baseboards")? - .into_iter() - .map(|bb| { - ( - bb.id, - Arc::new( - nexus_types::inventory::BaseboardId::from(bb), - ), - ) - }) - .collect() - }; - limit_reached = - limit_reached || baseboards_by_id.len() == usize_limit; - - // Having those, we can replace the keys in the maps above with - // references to the actual baseboard rather than the uuid. - let sps = sps - .into_iter() - .map(|(id, sp)| { - baseboards_by_id - .get(&id) - .map(|bb| (bb.clone(), sp)) - .ok_or_else(|| { - anyhow!( - "missing baseboard that we should have fetched" - ) - }) - }) - .collect::, _>>()?; - let rots = - rots.into_iter() - .map(|(id, rot)| { - baseboards_by_id - .get(&id) - .map(|bb| (bb.clone(), rot)) - .ok_or_else(|| { - anyhow!("missing baseboard that we should have fetched") - }) - }) - .collect::, _>>()?; - - // Fetch records of cabooses found. - let inv_caboose_rows = { - use db::schema::inv_caboose::dsl; - dsl::inv_caboose - .filter(dsl::inv_collection_id.eq(id)) - .limit(sql_limit) - .select(InvCaboose::as_select()) - .load_async(&**conn) - .await - .context("loading inv_cabooses")? - }; - limit_reached = - limit_reached || inv_caboose_rows.len() == usize_limit; - - // Collect the unique sw_caboose_ids for those cabooses. - let sw_caboose_ids: BTreeSet<_> = inv_caboose_rows - .iter() - .map(|inv_caboose| inv_caboose.sw_caboose_id) - .collect(); - // Fetch the corresponing records. - let cabooses_by_id: BTreeMap<_, _> = { - use db::schema::sw_caboose::dsl; - dsl::sw_caboose - .filter(dsl::id.eq_any(sw_caboose_ids)) - .limit(sql_limit) - .select(SwCaboose::as_select()) - .load_async(&**conn) - .await - .context("loading sw_cabooses")? - .into_iter() - .map(|sw_caboose_row| { - ( - sw_caboose_row.id, - Arc::new(nexus_types::inventory::Caboose::from( - sw_caboose_row, - )), - ) - }) - .collect() - }; - limit_reached = - limit_reached || cabooses_by_id.len() == usize_limit; - - // Assemble the lists of cabooses found. - let mut cabooses_found = BTreeMap::new(); - for c in inv_caboose_rows { - let by_baseboard = cabooses_found - .entry(nexus_types::inventory::CabooseWhich::from(c.which)) - .or_insert_with(BTreeMap::new); - let Some(bb) = baseboards_by_id.get(&c.hw_baseboard_id) else { - bail!( - "unknown baseboard found in inv_caboose: {}", - c.hw_baseboard_id - ); - }; - let Some(sw_caboose) = cabooses_by_id.get(&c.sw_caboose_id) - else { - bail!( - "unknown caboose found in inv_caboose: {}", - c.sw_caboose_id - ); - }; - - let previous = by_baseboard.insert( - bb.clone(), - nexus_types::inventory::CabooseFound { - time_collected: c.time_collected, - source: c.source, - caboose: sw_caboose.clone(), - }, - ); - anyhow::ensure!( - previous.is_none(), - "duplicate caboose found: {:?} baseboard {:?}", - c.which, - c.hw_baseboard_id - ); - } - - Ok(( - Collection { - id, - errors, - time_started, - time_done, - collector, - baseboards: baseboards_by_id.values().cloned().collect(), - cabooses: cabooses_by_id.values().cloned().collect(), - sps, - rots, - cabooses_found, - }, - limit_reached, - )) - } - .boxed() - } } #[cfg(test)] mod test { + use crate::context::OpContext; use crate::db::datastore::datastore_test; use crate::db::datastore::inventory::DataStoreInventoryTest; use crate::db::datastore::DataStore; @@ -1136,11 +1146,14 @@ mod test { use uuid::Uuid; async fn read_collection( + opctx: &OpContext, datastore: &DataStore, id: Uuid, ) -> anyhow::Result { let limit = NonZeroU32::new(1000).unwrap(); - datastore.inventory_collection_read_all_or_nothing(id, limit).await + Ok(datastore + .inventory_collection_read_all_or_nothing(opctx, id, limit) + .await?) } async fn count_baseboards_cabooses( @@ -1186,9 +1199,10 @@ mod test { // Read it back. let conn = datastore.pool_connection_for_tests().await.unwrap(); - let collection_read = read_collection(&datastore, collection1.id) - .await - .expect("failed to read collection back"); + let collection_read = + read_collection(&opctx, &datastore, collection1.id) + .await + .expect("failed to read collection back"); assert_eq!(collection1, collection_read); // There ought to be no baseboards or cabooses in the databases from @@ -1208,9 +1222,10 @@ mod test { .inventory_insert_collection(&opctx, &collection2) .await .expect("failed to insert collection"); - let collection_read = read_collection(&datastore, collection2.id) - .await - .expect("failed to read collection back"); + let collection_read = + read_collection(&opctx, &datastore, collection2.id) + .await + .expect("failed to read collection back"); assert_eq!(collection2, collection_read); // Verify that we have exactly the set of cabooses and baseboards in the // databases that came from this first non-empty collection. @@ -1221,6 +1236,18 @@ mod test { assert_eq!(collection2.baseboards.len(), nbaseboards); assert_eq!(collection2.cabooses.len(), ncabooses); + // Check that we get an error on the limit being reached for + // `read_all_or_nothing` + let limit = NonZeroU32::new(1).unwrap(); + assert!(datastore + .inventory_collection_read_all_or_nothing( + &opctx, + collection2.id, + limit + ) + .await + .is_err()); + // Now insert an equivalent collection again. Verify the distinct // baseboards and cabooses again. This is important: the insertion // process should re-use the baseboards and cabooses from the previous @@ -1231,9 +1258,10 @@ mod test { .inventory_insert_collection(&opctx, &collection3) .await .expect("failed to insert collection"); - let collection_read = read_collection(&datastore, collection3.id) - .await - .expect("failed to read collection back"); + let collection_read = + read_collection(&opctx, &datastore, collection3.id) + .await + .expect("failed to read collection back"); assert_eq!(collection3, collection_read); // Verify that we have the same number of cabooses and baseboards, since // those didn't change. @@ -1275,9 +1303,10 @@ mod test { .inventory_insert_collection(&opctx, &collection4) .await .expect("failed to insert collection"); - let collection_read = read_collection(&datastore, collection4.id) - .await - .expect("failed to read collection back"); + let collection_read = + read_collection(&opctx, &datastore, collection4.id) + .await + .expect("failed to read collection back"); assert_eq!(collection4, collection_read); // Verify the number of baseboards and collections again. assert_eq!( @@ -1302,9 +1331,10 @@ mod test { .inventory_insert_collection(&opctx, &collection5) .await .expect("failed to insert collection"); - let collection_read = read_collection(&datastore, collection5.id) - .await - .expect("failed to read collection back"); + let collection_read = + read_collection(&opctx, &datastore, collection5.id) + .await + .expect("failed to read collection back"); assert_eq!(collection5, collection_read); assert_eq!(collection5.baseboards.len(), collection3.baseboards.len()); assert_eq!(collection5.cabooses.len(), collection3.cabooses.len()); @@ -1433,19 +1463,19 @@ mod test { ); // If we try to fetch a pruned collection, we should get nothing. - let _ = read_collection(&datastore, collection4.id) + let _ = read_collection(&opctx, &datastore, collection4.id) .await .expect_err("unexpectedly read pruned collection"); // But we should still be able to fetch the collections that do exist. let collection_read = - read_collection(&datastore, collection5.id).await.unwrap(); + read_collection(&opctx, &datastore, collection5.id).await.unwrap(); assert_eq!(collection5, collection_read); let collection_read = - read_collection(&datastore, collection6.id).await.unwrap(); + read_collection(&opctx, &datastore, collection6.id).await.unwrap(); assert_eq!(collection6, collection_read); let collection_read = - read_collection(&datastore, collection7.id).await.unwrap(); + read_collection(&opctx, &datastore, collection7.id).await.unwrap(); assert_eq!(collection7, collection_read); // We should prune more than one collection, if needed. We'll wind up diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 163f3bd5bb..1c2e49e260 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -9,6 +9,7 @@ use crate::external_api::params; use crate::external_api::params::CertificateCreate; use crate::external_api::shared::ServiceUsingCertificate; use crate::internal_api::params::RackInitializationRequest; +use gateway_client::types::SpType; use ipnetwork::IpNetwork; use nexus_db_model::DnsGroup; use nexus_db_model::InitialDnsGroup; @@ -31,6 +32,9 @@ use nexus_types::external_api::params::{ use nexus_types::external_api::shared::FleetRole; use nexus_types::external_api::shared::SiloIdentityMode; use nexus_types::external_api::shared::SiloRole; +use nexus_types::external_api::views; +use nexus_types::external_api::views::Baseboard; +use nexus_types::external_api::views::UninitializedSled; use nexus_types::internal_api::params::DnsRecord; use omicron_common::api::external::AddressLotKind; use omicron_common::api::external::DataPageParams; @@ -51,6 +55,7 @@ use std::collections::BTreeSet; use std::collections::HashMap; use std::net::IpAddr; use std::net::Ipv4Addr; +use std::num::NonZeroU32; use std::str::FromStr; use uuid::Uuid; @@ -614,20 +619,7 @@ impl super::Nexus { opctx: &OpContext, ) -> Result { let rack = self.rack_lookup(opctx, &self.rack_id).await?; - - let subnet = match rack.rack_subnet { - Some(IpNetwork::V6(subnet)) => subnet, - Some(IpNetwork::V4(_)) => { - return Err(Error::InternalError { - internal_message: "rack subnet not IPv6".into(), - }) - } - None => { - return Err(Error::InternalError { - internal_message: "rack subnet not set".into(), - }) - } - }; + let subnet = rack.subnet()?; let db_ports = self.active_port_settings(opctx).await?; let mut ports = Vec::new(); @@ -724,4 +716,55 @@ impl super::Nexus { Ok(result) } + + /// Return the list of sleds that are inserted into an initialized rack + /// but not yet initialized as part of a rack. + // + // TODO-multirack: We currently limit sleds to a single rack and we also + // retrieve the `rack_uuid` from the Nexus instance used. + pub(crate) async fn uninitialized_sled_list( + &self, + opctx: &OpContext, + ) -> ListResultVec { + // Grab the SPs from the last collection + let limit = NonZeroU32::new(50).unwrap(); + let collection = self + .db_datastore + .inventory_get_latest_collection(opctx, limit) + .await?; + let pagparams = DataPageParams { + marker: None, + direction: dropshot::PaginationOrder::Descending, + // TODO: This limit is only suitable for a single sled cluster + limit: NonZeroU32::new(32).unwrap(), + }; + let sleds = self.db_datastore.sled_list(opctx, &pagparams).await?; + + let mut uninitialized_sleds: Vec = collection + .sps + .into_iter() + .filter_map(|(k, v)| { + if v.sp_type == SpType::Sled { + Some(UninitializedSled { + baseboard: Baseboard { + serial: k.serial_number.clone(), + part: k.part_number.clone(), + revision: v.baseboard_revision.into(), + }, + rack_id: self.rack_id, + cubby: v.sp_slot, + }) + } else { + None + } + }) + .collect(); + + let sled_baseboards: BTreeSet = + sleds.into_iter().map(|s| views::Sled::from(s).baseboard).collect(); + + // Retain all sleds that exist but are not in the sled table + uninitialized_sleds.retain(|s| !sled_baseboards.contains(&s.baseboard)); + Ok(uninitialized_sleds) + } } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index eba97a88ec..428632bcf5 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -8,8 +8,8 @@ use super::{ console_api, device_auth, params, views::{ self, Certificate, Group, IdentityProvider, Image, IpPool, IpPoolRange, - PhysicalDisk, Project, Rack, Role, Silo, Sled, Snapshot, SshKey, User, - UserBuiltin, Vpc, VpcRouter, VpcSubnet, + PhysicalDisk, Project, Rack, Role, Silo, Sled, Snapshot, SshKey, + UninitializedSled, User, UserBuiltin, Vpc, VpcRouter, VpcSubnet, }, }; use crate::external_api::shared; @@ -222,6 +222,7 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(physical_disk_list)?; api.register(switch_list)?; api.register(switch_view)?; + api.register(uninitialized_sled_list)?; api.register(user_builtin_list)?; api.register(user_builtin_view)?; @@ -4382,6 +4383,25 @@ async fn rack_view( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } +/// List uninitialized sleds in a given rack +#[endpoint { + method = GET, + path = "/v1/system/hardware/uninitialized-sleds", + tags = ["system/hardware"] +}] +async fn uninitialized_sled_list( + rqctx: RequestContext>, +) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.nexus; + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let sleds = nexus.uninitialized_sled_list(&opctx).await?; + Ok(HttpResponseOk(sleds)) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + // Sleds /// List sleds diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 8fba22fb2f..64790c49c2 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -43,6 +43,8 @@ use std::str::FromStr; lazy_static! { pub static ref HARDWARE_RACK_URL: String = format!("/v1/system/hardware/racks/{}", RACK_UUID); + pub static ref HARDWARE_UNINITIALIZED_SLEDS: String = + format!("/v1/system/hardware/uninitialized-sleds"); pub static ref HARDWARE_SLED_URL: String = format!("/v1/system/hardware/sleds/{}", SLED_AGENT_UUID); pub static ref HARDWARE_SWITCH_URL: String = @@ -1564,6 +1566,13 @@ lazy_static! { allowed_methods: vec![AllowedMethod::Get], }, + VerifyEndpoint { + url: &HARDWARE_UNINITIALIZED_SLEDS, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { url: "/v1/system/hardware/sleds", visibility: Visibility::Public, diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 8c5fe953e3..7f0c30c471 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -122,6 +122,7 @@ sled_physical_disk_list GET /v1/system/hardware/sleds/{sle sled_view GET /v1/system/hardware/sleds/{sled_id} switch_list GET /v1/system/hardware/switches switch_view GET /v1/system/hardware/switches/{switch_id} +uninitialized_sled_list GET /v1/system/hardware/uninitialized-sleds API operations found with tag "system/metrics" OPERATION ID METHOD URL PATH diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index ef3835c618..b34fc7a542 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -274,10 +274,38 @@ pub struct Rack { pub identity: AssetIdentityMetadata, } +/// View of a sled that has not been added to an initialized rack yet +#[derive( + Clone, + Debug, + Serialize, + Deserialize, + JsonSchema, + PartialOrd, + Ord, + PartialEq, + Eq, +)] +pub struct UninitializedSled { + pub baseboard: Baseboard, + pub rack_id: Uuid, + pub cubby: u16, +} + // FRUs /// Properties that uniquely identify an Oxide hardware component -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] +#[derive( + Clone, + Debug, + Serialize, + Deserialize, + JsonSchema, + PartialOrd, + Ord, + PartialEq, + Eq, +)] pub struct Baseboard { pub serial: String, pub part: String, diff --git a/openapi/nexus.json b/openapi/nexus.json index 74162a9b2b..0d19e81d9a 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -4064,6 +4064,37 @@ } } }, + "/v1/system/hardware/uninitialized-sleds": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "List uninitialized sleds in a given rack", + "operationId": "uninitialized_sled_list", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_UninitializedSled", + "type": "array", + "items": { + "$ref": "#/components/schemas/UninitializedSled" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/v1/system/identity-providers": { "get": { "tags": [ @@ -13939,6 +13970,29 @@ "vlan_id" ] }, + "UninitializedSled": { + "description": "View of a sled that has not been added to an initialized rack yet", + "type": "object", + "properties": { + "baseboard": { + "$ref": "#/components/schemas/Baseboard" + }, + "cubby": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "rack_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "baseboard", + "cubby", + "rack_id" + ] + }, "User": { "description": "View of a User", "type": "object", From 7f42bc9f3f58647b1030fd0dab42451a0bd79200 Mon Sep 17 00:00:00 2001 From: James MacMahon Date: Mon, 20 Nov 2023 14:31:07 -0500 Subject: [PATCH 22/56] Add `omdb db validate` subcommands (#4376) In order to diagnose if customer-support#57 is caused by _not_ having the fixes for omicron#3866, add a few commands to omdb to validate the contents of the database: Usage: omdb db validate Commands: validate-volume-references Validate each `volume_references` column in the region snapshots table validate-region-snapshots Find either region snapshots Nexus knows about that the corresponding Crucible agent says were deleted, or region snapshots that Nexus doesn't know about help Print this message or the help of the given subcommand(s) This commit also adds an environment variable OMDB_FETCH_LIMIT, which overrides the default fetch limit. --- Cargo.lock | 1 + dev-tools/omdb/Cargo.toml | 3 +- dev-tools/omdb/src/bin/omdb/db.rs | 463 +++++++++++++++++++- dev-tools/omdb/tests/usage_errors.out | 8 +- nexus/db-queries/src/db/datastore/mod.rs | 1 + nexus/db-queries/src/db/datastore/volume.rs | 2 +- 6 files changed, 473 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 609e1699cf..487227e187 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4684,6 +4684,7 @@ dependencies = [ "async-bb8-diesel", "chrono", "clap 4.4.3", + "crucible-agent-client", "diesel", "dropshot", "expectorate", diff --git a/dev-tools/omdb/Cargo.toml b/dev-tools/omdb/Cargo.toml index ff3c650d6d..a8834a0b29 100644 --- a/dev-tools/omdb/Cargo.toml +++ b/dev-tools/omdb/Cargo.toml @@ -12,6 +12,7 @@ anyhow.workspace = true async-bb8-diesel.workspace = true chrono.workspace = true clap.workspace = true +crucible-agent-client.workspace = true diesel.workspace = true dropshot.workspace = true futures.workspace = true @@ -39,10 +40,10 @@ tokio = { workspace = true, features = [ "full" ] } uuid.workspace = true ipnetwork.workspace = true omicron-workspace-hack.workspace = true +nexus-test-utils.workspace = true [dev-dependencies] expectorate.workspace = true -nexus-test-utils.workspace = true nexus-test-utils-macros.workspace = true omicron-nexus.workspace = true omicron-test-utils.workspace = true diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index d009c05f86..5fa19a1a27 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -19,7 +19,9 @@ use crate::Omdb; use anyhow::anyhow; use anyhow::bail; use anyhow::Context; +use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; +use async_bb8_diesel::AsyncSimpleConnection; use chrono::SecondsFormat; use clap::Args; use clap::Subcommand; @@ -30,6 +32,7 @@ use diesel::BoolExpressionMethods; use diesel::ExpressionMethods; use diesel::JoinOnDsl; use diesel::NullableExpressionMethods; +use diesel::TextExpressionMethods; use gateway_client::types::SpType; use nexus_db_model::Dataset; use nexus_db_model::Disk; @@ -49,15 +52,19 @@ use nexus_db_model::Snapshot; use nexus_db_model::SnapshotState; use nexus_db_model::SwCaboose; use nexus_db_model::Vmm; +use nexus_db_model::Volume; use nexus_db_model::Zpool; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; +use nexus_db_queries::db::datastore::read_only_resources_associated_with_volume; +use nexus_db_queries::db::datastore::CrucibleTargets; use nexus_db_queries::db::datastore::DataStoreConnection; use nexus_db_queries::db::datastore::InstanceAndActiveVmm; use nexus_db_queries::db::identity::Asset; use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::model::ServiceKind; use nexus_db_queries::db::DataStore; +use nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL; use nexus_types::identity::Resource; use nexus_types::internal_api::params::DnsRecord; use nexus_types::internal_api::params::Srv; @@ -66,6 +73,7 @@ use nexus_types::inventory::Collection; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Generation; use omicron_common::postgres_config::PostgresConfigWithUrl; +use sled_agent_client::types::VolumeConstructionRequest; use std::cmp::Ordering; use std::collections::BTreeMap; use std::collections::BTreeSet; @@ -125,7 +133,8 @@ pub struct DbArgs { /// limit to apply to queries that fetch rows #[clap( long = "fetch-limit", - default_value_t = NonZeroU32::new(500).unwrap() + default_value_t = NonZeroU32::new(500).unwrap(), + env("OMDB_FETCH_LIMIT"), )] fetch_limit: NonZeroU32, @@ -152,6 +161,8 @@ enum DbCommands { Network(NetworkArgs), /// Print information about snapshots Snapshots(SnapshotArgs), + /// Validate the contents of the database + Validate(ValidateArgs), } #[derive(Debug, Args)] @@ -308,6 +319,23 @@ struct SnapshotInfoArgs { uuid: Uuid, } +#[derive(Debug, Args)] +struct ValidateArgs { + #[command(subcommand)] + command: ValidateCommands, +} + +#[derive(Debug, Subcommand)] +enum ValidateCommands { + /// Validate each `volume_references` column in the region snapshots table + ValidateVolumeReferences, + + /// Find either region snapshots Nexus knows about that the corresponding + /// Crucible agent says were deleted, or region snapshots that Nexus doesn't + /// know about. + ValidateRegionSnapshots, +} + impl DbArgs { /// Run a `omdb db` subcommand. pub(crate) async fn run_cmd( @@ -429,6 +457,18 @@ impl DbArgs { DbCommands::Snapshots(SnapshotArgs { command: SnapshotCommands::List, }) => cmd_db_snapshot_list(&datastore, self.fetch_limit).await, + DbCommands::Validate(ValidateArgs { + command: ValidateCommands::ValidateVolumeReferences, + }) => { + cmd_db_validate_volume_references(&datastore, self.fetch_limit) + .await + } + DbCommands::Validate(ValidateArgs { + command: ValidateCommands::ValidateRegionSnapshots, + }) => { + cmd_db_validate_region_snapshots(&datastore, self.fetch_limit) + .await + } } } } @@ -1705,6 +1745,427 @@ async fn cmd_db_eips( Ok(()) } +/// Validate the `volume_references` column of the region snapshots table +async fn cmd_db_validate_volume_references( + datastore: &DataStore, + limit: NonZeroU32, +) -> Result<(), anyhow::Error> { + // First, get all region snapshot records + let region_snapshots: Vec = { + let region_snapshots: Vec = datastore + .pool_connection_for_tests() + .await? + .transaction_async(|conn| async move { + // Selecting all region snapshots requires a full table scan + conn.batch_execute_async(ALLOW_FULL_TABLE_SCAN_SQL).await?; + + use db::schema::region_snapshot::dsl; + dsl::region_snapshot + .select(RegionSnapshot::as_select()) + .get_results_async(&conn) + .await + }) + .await?; + + check_limit(®ion_snapshots, limit, || { + String::from("listing region snapshots") + }); + + region_snapshots + }; + + #[derive(Tabled)] + struct Row { + dataset_id: Uuid, + region_id: Uuid, + snapshot_id: Uuid, + error: String, + } + + let mut rows = Vec::new(); + + // Then, for each, make sure that the `volume_references` matches what is in + // the volume table + for region_snapshot in region_snapshots { + let matching_volumes: Vec = { + let snapshot_addr = region_snapshot.snapshot_addr.clone(); + + let matching_volumes = datastore + .pool_connection_for_tests() + .await? + .transaction_async(|conn| async move { + // Selecting all volumes based on the data column requires a + // full table scan + conn.batch_execute_async(ALLOW_FULL_TABLE_SCAN_SQL).await?; + + let pattern = format!("%{}%", &snapshot_addr); + + use db::schema::volume::dsl; + + // Find all volumes that have not been deleted that contain + // this snapshot_addr. If a Volume has been soft deleted, + // then the region snapshot record should have had its + // volume references column updated accordingly. + dsl::volume + .filter(dsl::time_deleted.is_null()) + .filter(dsl::data.like(pattern)) + .select(Volume::as_select()) + .get_results_async(&conn) + .await + }) + .await?; + + check_limit(&matching_volumes, limit, || { + String::from("finding matching volumes") + }); + + matching_volumes + }; + + // The Crucible Agent will reuse ports for regions and running snapshots + // when they're deleted. Check that the matching volume construction requests + // reference this snapshot addr as a read-only target. + let matching_volumes = matching_volumes + .into_iter() + .filter(|volume| { + let vcr: VolumeConstructionRequest = + serde_json::from_str(&volume.data()).unwrap(); + + let mut targets = CrucibleTargets::default(); + read_only_resources_associated_with_volume(&vcr, &mut targets); + + targets + .read_only_targets + .contains(®ion_snapshot.snapshot_addr) + }) + .count(); + + if matching_volumes != region_snapshot.volume_references as usize { + rows.push(Row { + dataset_id: region_snapshot.dataset_id, + region_id: region_snapshot.region_id, + snapshot_id: region_snapshot.snapshot_id, + error: format!( + "record has {} volume references when it should be {}!", + region_snapshot.volume_references, matching_volumes, + ), + }); + } else { + // The volume references are correct, but additionally check to see + // deleting is true when matching_volumes is 0. Be careful: in the + // snapshot create saga, the region snapshot record is created + // before the snapshot's volume is inserted into the DB. There's a + // time between these operations that this function would flag that + // this region snapshot should have `deleting` set to true. + + if matching_volumes == 0 && !region_snapshot.deleting { + rows.push(Row { + dataset_id: region_snapshot.dataset_id, + region_id: region_snapshot.region_id, + snapshot_id: region_snapshot.snapshot_id, + error: String::from( + "record has 0 volume references but deleting is false!", + ), + }); + } + } + } + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .to_string(); + + println!("{}", table); + + Ok(()) +} + +async fn cmd_db_validate_region_snapshots( + datastore: &DataStore, + limit: NonZeroU32, +) -> Result<(), anyhow::Error> { + let mut regions_to_snapshots_map: BTreeMap> = + BTreeMap::default(); + + // First, get all region snapshot records (with their corresponding dataset) + let datasets_and_region_snapshots: Vec<(Dataset, RegionSnapshot)> = { + let datasets_region_snapshots: Vec<(Dataset, RegionSnapshot)> = + datastore + .pool_connection_for_tests() + .await? + .transaction_async(|conn| async move { + // Selecting all datasets and region snapshots requires a full table scan + conn.batch_execute_async(ALLOW_FULL_TABLE_SCAN_SQL).await?; + + use db::schema::dataset::dsl as dataset_dsl; + use db::schema::region_snapshot::dsl; + + dsl::region_snapshot + .inner_join( + dataset_dsl::dataset + .on(dsl::dataset_id.eq(dataset_dsl::id)), + ) + .select(( + Dataset::as_select(), + RegionSnapshot::as_select(), + )) + .get_results_async(&conn) + .await + }) + .await?; + + check_limit(&datasets_region_snapshots, limit, || { + String::from("listing datasets and region snapshots") + }); + + datasets_region_snapshots + }; + + #[derive(Tabled)] + struct Row { + dataset_id: Uuid, + region_id: Uuid, + snapshot_id: Uuid, + dataset_addr: std::net::SocketAddrV6, + error: String, + } + + let mut rows = Vec::new(); + + // Then, for each one, reconcile with the corresponding Crucible Agent: do + // the region_snapshot records match reality? + for (dataset, region_snapshot) in datasets_and_region_snapshots { + regions_to_snapshots_map + .entry(region_snapshot.region_id) + .or_default() + .insert(region_snapshot.snapshot_id); + + use crucible_agent_client::types::RegionId; + use crucible_agent_client::types::State; + use crucible_agent_client::Client as CrucibleAgentClient; + + let url = format!("http://{}", dataset.address()); + let client = CrucibleAgentClient::new(&url); + + let actual_region_snapshots = client + .region_get_snapshots(&RegionId( + region_snapshot.region_id.to_string(), + )) + .await?; + + let snapshot_id = region_snapshot.snapshot_id.to_string(); + + if actual_region_snapshots + .snapshots + .iter() + .any(|x| x.name == snapshot_id) + { + // A snapshot currently exists, matching the database entry + } else { + // In this branch, there's a database entry for a snapshot that was + // deleted. Due to how the snapshot create saga is currently + // written, a database entry would not have been created unless a + // snapshot was successfully made: unless that saga changes, we can + // be reasonably sure that this snapshot existed at some point. + + match actual_region_snapshots.running_snapshots.get(&snapshot_id) { + Some(running_snapshot) => { + match running_snapshot.state { + State::Destroyed | State::Failed => { + // In this branch, we can be sure a snapshot previously + // existed and was deleted: a running snapshot was made + // from it, then deleted, and the snapshot does not + // currently exist in the list of snapshots for this + // region. This record should be deleted. + + // Before recommending anything, validate the higher + // level Snapshot object too: it should have been + // destroyed. + + let snapshot: Snapshot = { + use db::schema::snapshot::dsl; + + dsl::snapshot + .filter( + dsl::id.eq(region_snapshot.snapshot_id), + ) + .select(Snapshot::as_select()) + .first_async( + &*datastore + .pool_connection_for_tests() + .await?, + ) + .await? + }; + + if snapshot.time_deleted().is_some() { + // This is ok - Nexus currently soft-deletes its + // resource records. + rows.push(Row { + dataset_id: region_snapshot.dataset_id, + region_id: region_snapshot.region_id, + snapshot_id: region_snapshot.snapshot_id, + dataset_addr: dataset.address(), + error: String::from( + "region snapshot was deleted, please remove its record", + ), + }); + } else { + // If the higher level Snapshot was _not_ + // deleted, this is a Nexus bug: something told + // the Agent to delete the snapshot when the + // higher level Snapshot was not deleted! + + rows.push(Row { + dataset_id: region_snapshot.dataset_id, + region_id: region_snapshot.region_id, + snapshot_id: region_snapshot.snapshot_id, + dataset_addr: dataset.address(), + error: String::from( + "NEXUS BUG: region snapshot was deleted, but the higher level snapshot was not!", + ), + }); + } + } + + State::Requested + | State::Created + | State::Tombstoned => { + // The agent is in a bad state: we did not find the + // snapshot in the list of snapshots for this + // region, but either: + // + // - there's a requested or existing running + // snapshot for it, or + // + // - there's a running snapshot that should have + // been completely deleted before the snapshot + // itself was deleted. + // + // This should have never been allowed to happen by + // the Agent, so it's a bug. + + rows.push(Row { + dataset_id: region_snapshot.dataset_id, + region_id: region_snapshot.region_id, + snapshot_id: region_snapshot.snapshot_id, + dataset_addr: dataset.address(), + error: format!( + "AGENT BUG: region snapshot was deleted but has a running snapshot in state {:?}!", + running_snapshot.state, + ), + }); + } + } + } + + None => { + // A running snapshot never existed for this snapshot + } + } + } + } + + // Second, get all regions + let datasets_and_regions: Vec<(Dataset, Region)> = { + let datasets_and_regions: Vec<(Dataset, Region)> = datastore + .pool_connection_for_tests() + .await? + .transaction_async(|conn| async move { + // Selecting all datasets and regions requires a full table scan + conn.batch_execute_async(ALLOW_FULL_TABLE_SCAN_SQL).await?; + + use db::schema::dataset::dsl as dataset_dsl; + use db::schema::region::dsl; + + dsl::region + .inner_join( + dataset_dsl::dataset + .on(dsl::dataset_id.eq(dataset_dsl::id)), + ) + .select((Dataset::as_select(), Region::as_select())) + .get_results_async(&conn) + .await + }) + .await?; + + check_limit(&datasets_and_regions, limit, || { + String::from("listing datasets and regions") + }); + + datasets_and_regions + }; + + // Reconcile with the Crucible agents: are there snapshots that Nexus does + // not know about? + for (dataset, region) in datasets_and_regions { + use crucible_agent_client::types::RegionId; + use crucible_agent_client::types::State; + use crucible_agent_client::Client as CrucibleAgentClient; + + let url = format!("http://{}", dataset.address()); + let client = CrucibleAgentClient::new(&url); + + let actual_region_snapshots = client + .region_get_snapshots(&RegionId(region.id().to_string())) + .await?; + + let default = HashSet::default(); + let nexus_region_snapshots: &HashSet = + regions_to_snapshots_map.get(®ion.id()).unwrap_or(&default); + + for actual_region_snapshot in &actual_region_snapshots.snapshots { + let snapshot_id: Uuid = actual_region_snapshot.name.parse()?; + if !nexus_region_snapshots.contains(&snapshot_id) { + rows.push(Row { + dataset_id: dataset.id(), + region_id: region.id(), + snapshot_id, + dataset_addr: dataset.address(), + error: String::from( + "Nexus does not know about this snapshot!", + ), + }); + } + } + + for (_, actual_region_running_snapshot) in + &actual_region_snapshots.running_snapshots + { + let snapshot_id: Uuid = + actual_region_running_snapshot.name.parse()?; + + match actual_region_running_snapshot.state { + State::Destroyed | State::Failed | State::Tombstoned => { + // don't check, Nexus would consider this gone + } + + State::Requested | State::Created => { + if !nexus_region_snapshots.contains(&snapshot_id) { + rows.push(Row { + dataset_id: dataset.id(), + region_id: region.id(), + snapshot_id, + dataset_addr: dataset.address(), + error: String::from( + "Nexus does not know about this running snapshot!" + ), + }); + } + } + } + } + } + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .to_string(); + + println!("{}", table); + + Ok(()) +} + fn print_name( prefix: &str, name: &str, diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index e859c325a5..eaabf970a6 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -98,11 +98,13 @@ Commands: instances Print information about customer instances network Print information about the network snapshots Print information about snapshots + validate Validate the contents of the database help Print this message or the help of the given subcommand(s) Options: --db-url URL of the database SQL interface [env: OMDB_DB_URL=] - --fetch-limit limit to apply to queries that fetch rows [default: 500] + --fetch-limit limit to apply to queries that fetch rows [env: + OMDB_FETCH_LIMIT=] [default: 500] -h, --help Print help ============================================= EXECUTING COMMAND: omdb ["db", "--help"] @@ -122,11 +124,13 @@ Commands: instances Print information about customer instances network Print information about the network snapshots Print information about snapshots + validate Validate the contents of the database help Print this message or the help of the given subcommand(s) Options: --db-url URL of the database SQL interface [env: OMDB_DB_URL=] - --fetch-limit limit to apply to queries that fetch rows [default: 500] + --fetch-limit limit to apply to queries that fetch rows [env: + OMDB_FETCH_LIMIT=] [default: 500] -h, --help Print help --------------------------------------------- stderr: diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 7385970fb1..8be3386183 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -103,6 +103,7 @@ pub use rack::RackInit; pub use silo::Discoverability; pub use switch_port::SwitchPortSettingsCombinedResult; pub use virtual_provisioning_collection::StorageType; +pub use volume::read_only_resources_associated_with_volume; pub use volume::CrucibleResources; pub use volume::CrucibleTargets; diff --git a/nexus/db-queries/src/db/datastore/volume.rs b/nexus/db-queries/src/db/datastore/volume.rs index 5d753f0742..1e64d784f7 100644 --- a/nexus/db-queries/src/db/datastore/volume.rs +++ b/nexus/db-queries/src/db/datastore/volume.rs @@ -1024,7 +1024,7 @@ impl DataStore { /// Return the targets from a VolumeConstructionRequest. /// /// The targets of a volume construction request map to resources. -fn read_only_resources_associated_with_volume( +pub fn read_only_resources_associated_with_volume( vcr: &VolumeConstructionRequest, crucible_targets: &mut CrucibleTargets, ) { From e37f3d003fd11571d296db0fe6217d768a5a1341 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 11:48:12 -0800 Subject: [PATCH 23/56] Update Rust crate reedline to 0.26.0 (#4526) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- wicket-dbg/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 487227e187..113bc6f003 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6436,9 +6436,9 @@ dependencies = [ [[package]] name = "reedline" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7dc1d1d369c194cf79acc204397aca1fecc4248df3e1c1eabb15e5ef2d16991" +checksum = "d0a093a20a6c473247c2e9971aaf4cedf9041bcd3f444dc7fad667d3b6b7a5fd" dependencies = [ "chrono", "crossterm", diff --git a/wicket-dbg/Cargo.toml b/wicket-dbg/Cargo.toml index a00bcb9c1b..f9047297af 100644 --- a/wicket-dbg/Cargo.toml +++ b/wicket-dbg/Cargo.toml @@ -20,7 +20,7 @@ tokio = { workspace = true, features = ["full"] } wicket.workspace = true # used only by wicket-dbg binary -reedline = "0.25.0" +reedline = "0.26.0" omicron-workspace-hack.workspace = true [[bin]] From 74d3bf98a5586a9fd855a7b464ea4735df4f78d8 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Mon, 20 Nov 2023 13:13:30 -0800 Subject: [PATCH 24/56] Restore schema 10.0.0 (#4529) This was accidentally merged in with 11.0.0 in #3804 Fixes #4530 (but see #4531 for followup work) --- schema/crdb/{11.0.0 => 10.0.0}/README.md | 0 schema/crdb/{11.0.0 => 10.0.0}/up01.sql | 0 schema/crdb/{11.0.0 => 10.0.0}/up02.sql | 0 schema/crdb/{11.0.0 => 10.0.0}/up03.sql | 0 schema/crdb/{11.0.0 => 10.0.0}/up04.sql | 0 schema/crdb/{11.0.0 => 10.0.0}/up05.sql | 0 schema/crdb/{11.0.0 => 10.0.0}/up06.sql | 0 schema/crdb/{11.0.0 => 10.0.0}/up07.sql | 0 schema/crdb/{11.0.0 => 10.0.0}/up08.sql | 0 schema/crdb/{11.0.0 => 10.0.0}/up09.sql | 0 schema/crdb/{11.0.0 => 10.0.0}/up10.sql | 0 schema/crdb/{11.0.0 => 10.0.0}/up11.sql | 0 schema/crdb/{11.0.0 => 10.0.0}/up12.sql | 0 13 files changed, 0 insertions(+), 0 deletions(-) rename schema/crdb/{11.0.0 => 10.0.0}/README.md (100%) rename schema/crdb/{11.0.0 => 10.0.0}/up01.sql (100%) rename schema/crdb/{11.0.0 => 10.0.0}/up02.sql (100%) rename schema/crdb/{11.0.0 => 10.0.0}/up03.sql (100%) rename schema/crdb/{11.0.0 => 10.0.0}/up04.sql (100%) rename schema/crdb/{11.0.0 => 10.0.0}/up05.sql (100%) rename schema/crdb/{11.0.0 => 10.0.0}/up06.sql (100%) rename schema/crdb/{11.0.0 => 10.0.0}/up07.sql (100%) rename schema/crdb/{11.0.0 => 10.0.0}/up08.sql (100%) rename schema/crdb/{11.0.0 => 10.0.0}/up09.sql (100%) rename schema/crdb/{11.0.0 => 10.0.0}/up10.sql (100%) rename schema/crdb/{11.0.0 => 10.0.0}/up11.sql (100%) rename schema/crdb/{11.0.0 => 10.0.0}/up12.sql (100%) diff --git a/schema/crdb/11.0.0/README.md b/schema/crdb/10.0.0/README.md similarity index 100% rename from schema/crdb/11.0.0/README.md rename to schema/crdb/10.0.0/README.md diff --git a/schema/crdb/11.0.0/up01.sql b/schema/crdb/10.0.0/up01.sql similarity index 100% rename from schema/crdb/11.0.0/up01.sql rename to schema/crdb/10.0.0/up01.sql diff --git a/schema/crdb/11.0.0/up02.sql b/schema/crdb/10.0.0/up02.sql similarity index 100% rename from schema/crdb/11.0.0/up02.sql rename to schema/crdb/10.0.0/up02.sql diff --git a/schema/crdb/11.0.0/up03.sql b/schema/crdb/10.0.0/up03.sql similarity index 100% rename from schema/crdb/11.0.0/up03.sql rename to schema/crdb/10.0.0/up03.sql diff --git a/schema/crdb/11.0.0/up04.sql b/schema/crdb/10.0.0/up04.sql similarity index 100% rename from schema/crdb/11.0.0/up04.sql rename to schema/crdb/10.0.0/up04.sql diff --git a/schema/crdb/11.0.0/up05.sql b/schema/crdb/10.0.0/up05.sql similarity index 100% rename from schema/crdb/11.0.0/up05.sql rename to schema/crdb/10.0.0/up05.sql diff --git a/schema/crdb/11.0.0/up06.sql b/schema/crdb/10.0.0/up06.sql similarity index 100% rename from schema/crdb/11.0.0/up06.sql rename to schema/crdb/10.0.0/up06.sql diff --git a/schema/crdb/11.0.0/up07.sql b/schema/crdb/10.0.0/up07.sql similarity index 100% rename from schema/crdb/11.0.0/up07.sql rename to schema/crdb/10.0.0/up07.sql diff --git a/schema/crdb/11.0.0/up08.sql b/schema/crdb/10.0.0/up08.sql similarity index 100% rename from schema/crdb/11.0.0/up08.sql rename to schema/crdb/10.0.0/up08.sql diff --git a/schema/crdb/11.0.0/up09.sql b/schema/crdb/10.0.0/up09.sql similarity index 100% rename from schema/crdb/11.0.0/up09.sql rename to schema/crdb/10.0.0/up09.sql diff --git a/schema/crdb/11.0.0/up10.sql b/schema/crdb/10.0.0/up10.sql similarity index 100% rename from schema/crdb/11.0.0/up10.sql rename to schema/crdb/10.0.0/up10.sql diff --git a/schema/crdb/11.0.0/up11.sql b/schema/crdb/10.0.0/up11.sql similarity index 100% rename from schema/crdb/11.0.0/up11.sql rename to schema/crdb/10.0.0/up11.sql diff --git a/schema/crdb/11.0.0/up12.sql b/schema/crdb/10.0.0/up12.sql similarity index 100% rename from schema/crdb/11.0.0/up12.sql rename to schema/crdb/10.0.0/up12.sql From cd2d23b9e852885ba55c9a4790f116700483b326 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 22:20:33 +0000 Subject: [PATCH 25/56] Update Rust crate tough to 0.15 (#4477) Co-authored-by: Rain --- Cargo.lock | 60 +++-- Cargo.toml | 4 +- nexus/Cargo.toml | 1 + nexus/src/app/update/mod.rs | 16 +- nexus/src/updates.rs | 24 +- nexus/tests/integration_tests/updates.rs | 38 ++- tufaceous-lib/Cargo.toml | 4 + tufaceous-lib/src/artifact.rs | 14 +- tufaceous-lib/src/assemble/build.rs | 15 +- tufaceous-lib/src/key.rs | 13 +- tufaceous-lib/src/repository.rs | 67 +++-- tufaceous-lib/src/root.rs | 5 +- tufaceous/Cargo.toml | 2 + tufaceous/src/dispatch.rs | 20 +- tufaceous/src/main.rs | 5 +- .../tests/integration-tests/command_tests.rs | 8 +- wicketd/Cargo.toml | 1 + wicketd/src/artifacts/artifacts_with_plan.rs | 35 ++- wicketd/src/artifacts/error.rs | 8 + wicketd/src/artifacts/extracted_artifacts.rs | 41 ++- wicketd/src/artifacts/store.rs | 9 +- wicketd/src/artifacts/update_plan.rs | 239 +++++++++++++----- wicketd/tests/integration_tests/updates.rs | 18 +- workspace-hack/Cargo.toml | 4 + 24 files changed, 434 insertions(+), 217 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 113bc6f003..c8cfe908c1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -259,6 +259,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "async-recursion" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.32", +] + [[package]] name = "async-stream" version = "0.3.5" @@ -4577,6 +4588,7 @@ dependencies = [ "async-bb8-diesel", "async-trait", "base64 0.21.5", + "buf-list", "camino", "cancel-safe-futures", "chrono", @@ -4974,6 +4986,7 @@ dependencies = [ "signature 2.1.0", "similar", "slog", + "snafu", "spin 0.9.8", "string_cache", "subtle", @@ -4984,6 +4997,7 @@ dependencies = [ "tokio", "tokio-postgres", "tokio-stream", + "tokio-util", "toml 0.7.8", "toml_datetime", "toml_edit 0.19.15", @@ -5548,24 +5562,6 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" -[[package]] -name = "path-absolutize" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43eb3595c63a214e1b37b44f44b0a84900ef7ae0b4c5efce59e123d246d7a0de" -dependencies = [ - "path-dedot", -] - -[[package]] -name = "path-dedot" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d55e486337acb9973cdea3ec5638c1b3bcb22e573b2b7b41969e0c744d5a15e" -dependencies = [ - "once_cell", -] - [[package]] name = "path-slash" version = "0.1.5" @@ -7753,6 +7749,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4de37ad025c587a29e8f3f5605c00f70b98715ef90b9061a815b9e59e9042d6" dependencies = [ "doc-comment", + "futures-core", + "pin-project", "snafu-derive", ] @@ -8649,18 +8647,22 @@ checksum = "ea68304e134ecd095ac6c3574494fc62b909f416c4fca77e440530221e549d3d" [[package]] name = "tough" -version = "0.14.0" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda3efa9005cf9c1966984c3b9a44c3f37b7ed2c95ba338d6ad51bba70e989a0" +checksum = "d16dc5f42fc7ce7cb51eebc7a6ef91f4d69a6d41bb13f34a09674ec47e454d9b" dependencies = [ + "async-recursion", + "async-trait", + "bytes", "chrono", "dyn-clone", + "futures", + "futures-core", "globset", "hex", "log", "olpc-cjson", - "path-absolutize", - "pem 1.1.1", + "pem 3.0.2", "percent-encoding", "reqwest", "ring 0.16.20", @@ -8669,6 +8671,9 @@ dependencies = [ "serde_plain", "snafu", "tempfile", + "tokio", + "tokio-util", + "typed-path", "untrusted 0.7.1", "url", "walkdir", @@ -8843,6 +8848,7 @@ dependencies = [ "slog-envlogger", "slog-term", "tempfile", + "tokio", "tufaceous-lib", ] @@ -8851,6 +8857,7 @@ name = "tufaceous-lib" version = "0.1.0" dependencies = [ "anyhow", + "async-trait", "buf-list", "bytes", "bytesize", @@ -8860,6 +8867,7 @@ dependencies = [ "debug-ignore", "flate2", "fs-err", + "futures", "hex", "hubtools", "itertools 0.12.0", @@ -8874,6 +8882,7 @@ dependencies = [ "sha2", "slog", "tar", + "tokio", "toml 0.8.8", "tough", "url", @@ -8928,6 +8937,12 @@ dependencies = [ "utf-8", ] +[[package]] +name = "typed-path" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb9d13b8242894ff21f9990082b90a6410a43dcc6029ac4227a1467853ba781" + [[package]] name = "typenum" version = "1.16.0" @@ -9511,6 +9526,7 @@ dependencies = [ "async-trait", "base64 0.21.5", "bootstrap-agent-client", + "buf-list", "bytes", "camino", "camino-tempfile", diff --git a/Cargo.toml b/Cargo.toml index b55c4fca6a..b18b20aec7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -367,11 +367,11 @@ tokio = "1.33.0" tokio-postgres = { version = "0.7", features = [ "with-chrono-0_4", "with-uuid-1" ] } tokio-stream = "0.1.14" tokio-tungstenite = "0.18" -tokio-util = "0.7.10" +tokio-util = { version = "0.7.10", features = ["io", "io-util"] } toml = "0.8.8" toml_edit = "0.21.0" topological-sort = "0.2.2" -tough = { version = "0.14", features = [ "http" ] } +tough = { version = "0.15", features = [ "http" ] } trust-dns-client = "0.22" trust-dns-proto = "0.22" trust-dns-resolver = "0.22" diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 4fc13a31d8..704a7ab7bd 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -12,6 +12,7 @@ anyhow.workspace = true assert_matches.workspace = true async-trait.workspace = true base64.workspace = true +buf-list.workspace = true cancel-safe-futures.workspace = true camino.workspace = true clap.workspace = true diff --git a/nexus/src/app/update/mod.rs b/nexus/src/app/update/mod.rs index 4196cd8a71..165a6ae23b 100644 --- a/nexus/src/app/update/mod.rs +++ b/nexus/src/app/update/mod.rs @@ -69,14 +69,14 @@ impl super::Nexus { ), })?; - let artifacts = tokio::task::spawn_blocking(move || { - crate::updates::read_artifacts(&trusted_root, base_url) - }) - .await - .unwrap() - .map_err(|e| Error::InternalError { - internal_message: format!("error trying to refresh updates: {}", e), - })?; + let artifacts = crate::updates::read_artifacts(&trusted_root, base_url) + .await + .map_err(|e| Error::InternalError { + internal_message: format!( + "error trying to refresh updates: {}", + e + ), + })?; // FIXME: if we hit an error in any of these database calls, the // available artifact table will be out of sync with the current diff --git a/nexus/src/updates.rs b/nexus/src/updates.rs index c2265096dc..2f57868acc 100644 --- a/nexus/src/updates.rs +++ b/nexus/src/updates.rs @@ -2,38 +2,38 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use buf_list::BufList; +use futures::TryStreamExt; use nexus_db_queries::db; use omicron_common::update::ArtifactsDocument; use std::convert::TryInto; -// TODO(iliana): make async/.await. awslabs/tough#213 -pub(crate) fn read_artifacts( +pub(crate) async fn read_artifacts( trusted_root: &[u8], mut base_url: String, ) -> Result< Vec, Box, > { - use std::io::Read; - if !base_url.ends_with('/') { base_url.push('/'); } let repository = tough::RepositoryLoader::new( - trusted_root, + &trusted_root, format!("{}metadata/", base_url).parse()?, format!("{}targets/", base_url).parse()?, ) - .load()?; + .load() + .await?; - let mut artifact_document = Vec::new(); - match repository.read_target(&"artifacts.json".parse()?)? { - Some(mut target) => target.read_to_end(&mut artifact_document)?, - None => return Err("artifacts.json missing".into()), - }; + let artifact_document = + match repository.read_target(&"artifacts.json".parse()?).await? { + Some(target) => target.try_collect::().await?, + None => return Err("artifacts.json missing".into()), + }; let artifacts: ArtifactsDocument = - serde_json::from_slice(&artifact_document)?; + serde_json::from_reader(buf_list::Cursor::new(&artifact_document))?; let valid_until = repository .root() diff --git a/nexus/tests/integration_tests/updates.rs b/nexus/tests/integration_tests/updates.rs index 918d5ac100..891166ed19 100644 --- a/nexus/tests/integration_tests/updates.rs +++ b/nexus/tests/integration_tests/updates.rs @@ -7,6 +7,7 @@ // - test that an unknown artifact returns 404, not 500 // - tests around target names and artifact names that contain dangerous paths like `../` +use async_trait::async_trait; use chrono::{Duration, Utc}; use dropshot::test_util::LogContext; use dropshot::{ @@ -45,17 +46,22 @@ const UPDATE_COMPONENT: &'static str = "omicron-test-component"; #[tokio::test] async fn test_update_end_to_end() { let mut config = load_test_config(); + let logctx = LogContext::new("test_update_end_to_end", &config.pkg.log); // build the TUF repo let rng = SystemRandom::new(); - let tuf_repo = new_tuf_repo(&rng); + let tuf_repo = new_tuf_repo(&rng).await; + slog::info!( + logctx.log, + "TUF repo created at {}", + tuf_repo.path().display() + ); // serve it over HTTP let dropshot_config = Default::default(); let mut api = ApiDescription::new(); api.register(static_content).unwrap(); let context = FileServerContext { base: tuf_repo.path().to_owned() }; - let logctx = LogContext::new("test_update_end_to_end", &config.pkg.log); let server = HttpServerStarter::new(&dropshot_config, api, context, &logctx.log) .unwrap() @@ -122,9 +128,14 @@ async fn static_content( for component in path.into_inner().path { fs_path.push(component); } - let body = tokio::fs::read(fs_path) - .await - .map_err(|e| HttpError::for_bad_request(None, e.to_string()))?; + let body = tokio::fs::read(fs_path).await.map_err(|e| { + // tough 0.15+ depend on ENOENT being translated into 404. + if e.kind() == std::io::ErrorKind::NotFound { + HttpError::for_not_found(None, e.to_string()) + } else { + HttpError::for_bad_request(None, e.to_string()) + } + })?; Ok(Response::builder().status(StatusCode::OK).body(body.into())?) } @@ -132,7 +143,7 @@ async fn static_content( const TARGET_CONTENTS: &[u8] = b"hello world".as_slice(); -fn new_tuf_repo(rng: &dyn SecureRandom) -> TempDir { +async fn new_tuf_repo(rng: &(dyn SecureRandom + Sync)) -> TempDir { let version = NonZeroU64::new(Utc::now().timestamp().try_into().unwrap()).unwrap(); let expires = Utc::now() + Duration::minutes(5); @@ -180,13 +191,14 @@ fn new_tuf_repo(rng: &dyn SecureRandom) -> TempDir { &signing_keys, rng, ) + .await .unwrap(); // TODO(iliana): there's no way to create a `RepositoryEditor` without having the root.json on // disk. this is really unergonomic. write and upstream a fix let mut root_tmp = NamedTempFile::new().unwrap(); root_tmp.as_file_mut().write_all(signed_root.buffer()).unwrap(); - let mut editor = RepositoryEditor::new(&root_tmp).unwrap(); + let mut editor = RepositoryEditor::new(&root_tmp).await.unwrap(); root_tmp.close().unwrap(); editor @@ -200,19 +212,20 @@ fn new_tuf_repo(rng: &dyn SecureRandom) -> TempDir { .timestamp_expires(expires); let (targets_dir, target_names) = generate_targets(); for target in target_names { - editor.add_target_path(targets_dir.path().join(target)).unwrap(); + editor.add_target_path(targets_dir.path().join(target)).await.unwrap(); } - let signed_repo = editor.sign(&signing_keys).unwrap(); + let signed_repo = editor.sign(&signing_keys).await.unwrap(); let repo = TempDir::new().unwrap(); - signed_repo.write(repo.path().join("metadata")).unwrap(); + signed_repo.write(repo.path().join("metadata")).await.unwrap(); signed_repo .copy_targets( targets_dir, repo.path().join("targets"), PathExists::Fail, ) + .await .unwrap(); repo @@ -257,8 +270,9 @@ impl Debug for KeyKeySource { } } +#[async_trait] impl KeySource for KeyKeySource { - fn as_sign( + async fn as_sign( &self, ) -> Result, Box> { @@ -267,7 +281,7 @@ impl KeySource for KeyKeySource { Ok(Box::new(Ed25519KeyPair::from_pkcs8(self.0.as_ref()).unwrap())) } - fn write( + async fn write( &self, _value: &str, _key_id_hex: &str, diff --git a/tufaceous-lib/Cargo.toml b/tufaceous-lib/Cargo.toml index bcfcee6b9c..0df3a33f98 100644 --- a/tufaceous-lib/Cargo.toml +++ b/tufaceous-lib/Cargo.toml @@ -7,6 +7,7 @@ publish = false [dependencies] anyhow = { workspace = true, features = ["backtrace"] } +async-trait.workspace = true buf-list.workspace = true bytes.workspace = true bytesize = { workspace = true, features = ["serde"] } @@ -16,6 +17,7 @@ chrono.workspace = true debug-ignore.workspace = true flate2.workspace = true fs-err.workspace = true +futures.workspace = true hex.workspace = true hubtools.workspace = true itertools.workspace = true @@ -28,6 +30,7 @@ serde_path_to_error.workspace = true sha2.workspace = true slog.workspace = true tar.workspace = true +tokio.workspace = true toml.workspace = true tough.workspace = true url = "2.4.1" @@ -36,3 +39,4 @@ omicron-workspace-hack.workspace = true [dev-dependencies] omicron-test-utils.workspace = true +tokio = { workspace = true, features = ["test-util"] } diff --git a/tufaceous-lib/src/artifact.rs b/tufaceous-lib/src/artifact.rs index 56f3e34ecb..23cf31e8c3 100644 --- a/tufaceous-lib/src/artifact.rs +++ b/tufaceous-lib/src/artifact.rs @@ -127,7 +127,7 @@ pub struct HostPhaseImages { } impl HostPhaseImages { - pub fn extract(reader: R) -> Result { + pub fn extract(reader: R) -> Result { let mut phase_1 = Vec::new(); let mut phase_2 = Vec::new(); Self::extract_into( @@ -138,13 +138,12 @@ impl HostPhaseImages { Ok(Self { phase_1: phase_1.into(), phase_2: phase_2.into() }) } - pub fn extract_into( + pub fn extract_into( reader: R, phase_1: W, phase_2: W, ) -> Result<()> { - let uncompressed = - flate2::bufread::GzDecoder::new(BufReader::new(reader)); + let uncompressed = flate2::bufread::GzDecoder::new(reader); let mut archive = tar::Archive::new(uncompressed); let mut oxide_json_found = false; @@ -248,7 +247,7 @@ pub struct RotArchives { } impl RotArchives { - pub fn extract(reader: R) -> Result { + pub fn extract(reader: R) -> Result { let mut archive_a = Vec::new(); let mut archive_b = Vec::new(); Self::extract_into( @@ -259,13 +258,12 @@ impl RotArchives { Ok(Self { archive_a: archive_a.into(), archive_b: archive_b.into() }) } - pub fn extract_into( + pub fn extract_into( reader: R, archive_a: W, archive_b: W, ) -> Result<()> { - let uncompressed = - flate2::bufread::GzDecoder::new(BufReader::new(reader)); + let uncompressed = flate2::bufread::GzDecoder::new(reader); let mut archive = tar::Archive::new(uncompressed); let mut oxide_json_found = false; diff --git a/tufaceous-lib/src/assemble/build.rs b/tufaceous-lib/src/assemble/build.rs index 081e2e10d5..4cb636c9d3 100644 --- a/tufaceous-lib/src/assemble/build.rs +++ b/tufaceous-lib/src/assemble/build.rs @@ -44,7 +44,7 @@ impl OmicronRepoAssembler { self } - pub fn build(&self) -> Result<()> { + pub async fn build(&self) -> Result<()> { let (build_dir, is_temp) = match &self.build_dir { Some(dir) => (dir.clone(), false), None => { @@ -61,7 +61,7 @@ impl OmicronRepoAssembler { slog::info!(self.log, "assembling repository in `{build_dir}`"); - match self.build_impl(&build_dir) { + match self.build_impl(&build_dir).await { Ok(()) => { if is_temp { slog::debug!(self.log, "assembly successful, cleaning up"); @@ -92,15 +92,17 @@ impl OmicronRepoAssembler { Ok(()) } - fn build_impl(&self, build_dir: &Utf8Path) -> Result<()> { + async fn build_impl(&self, build_dir: &Utf8Path) -> Result<()> { let mut repository = OmicronRepo::initialize( &self.log, build_dir, self.manifest.system_version.clone(), self.keys.clone(), self.expiry, - )? - .into_editor()?; + ) + .await? + .into_editor() + .await?; // Add all the artifacts. for (kind, entries) in &self.manifest.artifacts { @@ -118,10 +120,11 @@ impl OmicronRepoAssembler { } // Write out the repository. - repository.sign_and_finish(self.keys.clone(), self.expiry)?; + repository.sign_and_finish(self.keys.clone(), self.expiry).await?; // Now reopen the repository to archive it into a zip file. let repo2 = OmicronRepo::load_untrusted(&self.log, build_dir) + .await .context("error reopening repository to archive")?; repo2 .archive(&self.output_path) diff --git a/tufaceous-lib/src/key.rs b/tufaceous-lib/src/key.rs index 8a5054b331..96282ee377 100644 --- a/tufaceous-lib/src/key.rs +++ b/tufaceous-lib/src/key.rs @@ -5,6 +5,7 @@ use ring::rand::SecureRandom; use ring::signature::Ed25519KeyPair; use std::fmt::Display; use std::str::FromStr; +use tough::async_trait; use tough::key_source::KeySource; use tough::sign::{Sign, SignKeyPair}; @@ -38,30 +39,32 @@ impl Key { } } +#[async_trait] impl Sign for Key { fn tuf_key(&self) -> tough::schema::key::Key { self.as_sign().tuf_key() } - fn sign( + async fn sign( &self, msg: &[u8], - rng: &dyn SecureRandom, + rng: &(dyn SecureRandom + Sync), ) -> Result, Box> { - self.as_sign().sign(msg, rng) + self.as_sign().sign(msg, rng).await } } +#[async_trait] impl KeySource for Key { - fn as_sign( + async fn as_sign( &self, ) -> Result, Box> { Ok(Box::new(self.clone())) } - fn write( + async fn write( &self, _value: &str, _key_id_hex: &str, diff --git a/tufaceous-lib/src/repository.rs b/tufaceous-lib/src/repository.rs index 11a6064602..416d5c9990 100644 --- a/tufaceous-lib/src/repository.rs +++ b/tufaceous-lib/src/repository.rs @@ -4,9 +4,11 @@ use crate::{key::Key, target::TargetWriter, AddArtifact, ArchiveBuilder}; use anyhow::{anyhow, bail, Context, Result}; +use buf_list::BufList; use camino::{Utf8Path, Utf8PathBuf}; use chrono::{DateTime, Utc}; -use fs_err::{self as fs, File}; +use fs_err::{self as fs}; +use futures::TryStreamExt; use omicron_common::{ api::external::SemverVersion, update::{Artifact, ArtifactsDocument}, @@ -28,38 +30,41 @@ pub struct OmicronRepo { impl OmicronRepo { /// Initializes a new repository at the given path, writing it to disk. - pub fn initialize( + pub async fn initialize( log: &slog::Logger, repo_path: &Utf8Path, system_version: SemverVersion, keys: Vec, expiry: DateTime, ) -> Result { - let root = crate::root::new_root(keys.clone(), expiry)?; + let root = crate::root::new_root(keys.clone(), expiry).await?; let editor = OmicronRepoEditor::initialize( repo_path.to_owned(), root, system_version, - )?; + ) + .await?; editor .sign_and_finish(keys, expiry) + .await .context("error signing new repository")?; // In theory we "trust" the key we just used to sign this repository, // but the code path is equivalent to `load_untrusted`. - Self::load_untrusted(log, repo_path) + Self::load_untrusted(log, repo_path).await } /// Loads a repository from the given path. /// /// This method enforces expirations. To load without expiration enforcement, use /// [`Self::load_untrusted_ignore_expiration`]. - pub fn load_untrusted( + pub async fn load_untrusted( log: &slog::Logger, repo_path: &Utf8Path, ) -> Result { Self::load_untrusted_impl(log, repo_path, ExpirationEnforcement::Safe) + .await } /// Loads a repository from the given path, ignoring expiration. @@ -68,30 +73,36 @@ impl OmicronRepo { /// /// 1. When you're editing an existing repository and will re-sign it afterwards. /// 2. In an environment in which time isn't available. - pub fn load_untrusted_ignore_expiration( + pub async fn load_untrusted_ignore_expiration( log: &slog::Logger, repo_path: &Utf8Path, ) -> Result { Self::load_untrusted_impl(log, repo_path, ExpirationEnforcement::Unsafe) + .await } - fn load_untrusted_impl( + async fn load_untrusted_impl( log: &slog::Logger, repo_path: &Utf8Path, exp: ExpirationEnforcement, ) -> Result { let log = log.new(slog::o!("component" => "OmicronRepo")); let repo_path = repo_path.canonicalize_utf8()?; + let root_json = repo_path.join("metadata").join("1.root.json"); + let root = tokio::fs::read(&root_json) + .await + .with_context(|| format!("error reading from {root_json}"))?; let repo = RepositoryLoader::new( - File::open(repo_path.join("metadata").join("1.root.json"))?, + &root, Url::from_file_path(repo_path.join("metadata")) .expect("the canonical path is not absolute?"), Url::from_file_path(repo_path.join("targets")) .expect("the canonical path is not absolute?"), ) .expiration_enforcement(exp) - .load()?; + .load() + .await?; Ok(Self { log, repo, repo_path }) } @@ -107,12 +118,17 @@ impl OmicronRepo { } /// Reads the artifacts document from the repo. - pub fn read_artifacts(&self) -> Result { + pub async fn read_artifacts(&self) -> Result { let reader = self .repo - .read_target(&"artifacts.json".try_into()?)? + .read_target(&"artifacts.json".try_into()?) + .await? .ok_or_else(|| anyhow!("artifacts.json should be present"))?; - serde_json::from_reader(reader) + let buf_list = reader + .try_collect::() + .await + .context("error reading from artifacts.json")?; + serde_json::from_reader(buf_list::Cursor::new(&buf_list)) .context("error deserializing artifacts.json") } @@ -177,8 +193,8 @@ impl OmicronRepo { /// Converts `self` into an `OmicronRepoEditor`, which can be used to perform /// modifications to the repository. - pub fn into_editor(self) -> Result { - OmicronRepoEditor::new(self) + pub async fn into_editor(self) -> Result { + OmicronRepoEditor::new(self).await } /// Prepends the target digest to the name if using consistent snapshots. Returns both the @@ -210,8 +226,8 @@ pub struct OmicronRepoEditor { } impl OmicronRepoEditor { - fn new(repo: OmicronRepo) -> Result { - let artifacts = repo.read_artifacts()?; + async fn new(repo: OmicronRepo) -> Result { + let artifacts = repo.read_artifacts().await?; let existing_target_names = repo .repo @@ -226,7 +242,8 @@ impl OmicronRepoEditor { .join("metadata") .join(format!("{}.root.json", repo.repo.root().signed.version)), repo.repo, - )?; + ) + .await?; Ok(Self { editor, @@ -236,7 +253,7 @@ impl OmicronRepoEditor { }) } - fn initialize( + async fn initialize( repo_path: Utf8PathBuf, root: SignedRole, system_version: SemverVersion, @@ -250,7 +267,7 @@ impl OmicronRepoEditor { fs::create_dir_all(&targets_dir)?; fs::write(&root_path, root.buffer())?; - let editor = RepositoryEditor::new(&root_path)?; + let editor = RepositoryEditor::new(&root_path).await?; Ok(Self { editor, @@ -297,7 +314,7 @@ impl OmicronRepoEditor { } /// Consumes self, signing the repository and writing out this repository to disk. - pub fn sign_and_finish( + pub async fn sign_and_finish( mut self, keys: Vec, expiry: DateTime, @@ -313,9 +330,11 @@ impl OmicronRepoEditor { let signed = self .editor .sign(&crate::key::boxed_keys(keys)) + .await .context("error signing keys")?; signed .write(self.repo_path.join("metadata")) + .await .context("error writing repository")?; Ok(()) } @@ -346,8 +365,8 @@ mod tests { use chrono::Days; use omicron_test_utils::dev::test_setup_log; - #[test] - fn reject_artifacts_with_the_same_filename() { + #[tokio::test] + async fn reject_artifacts_with_the_same_filename() { let logctx = test_setup_log("reject_artifacts_with_the_same_filename"); let tempdir = Utf8TempDir::new().unwrap(); let mut repo = OmicronRepo::initialize( @@ -357,8 +376,10 @@ mod tests { vec![Key::generate_ed25519()], Utc::now() + Days::new(1), ) + .await .unwrap() .into_editor() + .await .unwrap(); // Targets are uniquely identified by their kind/name/version triple; diff --git a/tufaceous-lib/src/root.rs b/tufaceous-lib/src/root.rs index 8ecd1cdf9d..cf5f7129c5 100644 --- a/tufaceous-lib/src/root.rs +++ b/tufaceous-lib/src/root.rs @@ -8,7 +8,7 @@ use tough::editor::signed::SignedRole; use tough::schema::{KeyHolder, RoleKeys, RoleType, Root}; use tough::sign::Sign; -pub(crate) fn new_root( +pub(crate) async fn new_root( keys: Vec, expires: DateTime, ) -> Result> { @@ -47,5 +47,6 @@ pub(crate) fn new_root( &KeyHolder::Root(root), &keys, &SystemRandom::new(), - )?) + ) + .await?) } diff --git a/tufaceous/Cargo.toml b/tufaceous/Cargo.toml index e48513e24c..81248af57d 100644 --- a/tufaceous/Cargo.toml +++ b/tufaceous/Cargo.toml @@ -17,6 +17,7 @@ slog.workspace = true slog-async.workspace = true slog-envlogger.workspace = true slog-term.workspace = true +tokio.workspace = true tufaceous-lib.workspace = true omicron-workspace-hack.workspace = true @@ -27,6 +28,7 @@ fs-err.workspace = true omicron-test-utils.workspace = true predicates.workspace = true tempfile.workspace = true +tokio = { workspace = true, features = ["test-util"] } [[test]] name = "manifest-tests" diff --git a/tufaceous/src/dispatch.rs b/tufaceous/src/dispatch.rs index ea0db63fce..fc86c948df 100644 --- a/tufaceous/src/dispatch.rs +++ b/tufaceous/src/dispatch.rs @@ -36,7 +36,7 @@ pub struct Args { impl Args { /// Executes these arguments. - pub fn exec(self, log: &slog::Logger) -> Result<()> { + pub async fn exec(self, log: &slog::Logger) -> Result<()> { let repo_path = match self.repo { Some(repo) => repo, None => std::env::current_dir()?.try_into()?, @@ -52,7 +52,8 @@ impl Args { system_version, keys, self.expiry, - )?; + ) + .await?; slog::info!( log, "Initialized TUF repository in {}", @@ -87,8 +88,9 @@ impl Args { let repo = OmicronRepo::load_untrusted_ignore_expiration( &log, &repo_path, - )?; - let mut editor = repo.into_editor()?; + ) + .await?; + let mut editor = repo.into_editor().await?; let new_artifact = AddArtifact::from_path(kind, name, version, path)?; @@ -96,7 +98,7 @@ impl Args { editor .add_artifact(&new_artifact) .context("error adding artifact")?; - editor.sign_and_finish(self.keys, self.expiry)?; + editor.sign_and_finish(self.keys, self.expiry).await?; println!( "added {} {}, version {}", new_artifact.kind(), @@ -113,7 +115,8 @@ impl Args { let repo = OmicronRepo::load_untrusted_ignore_expiration( &log, &repo_path, - )?; + ) + .await?; repo.archive(&output_path)?; Ok(()) @@ -124,13 +127,14 @@ impl Args { // Now load the repository and ensure it's valid. let repo = OmicronRepo::load_untrusted(&log, &dest) + .await .with_context(|| { format!( "error loading extracted repository at `{dest}` \ (extracted files are still available)" ) })?; - repo.read_artifacts().with_context(|| { + repo.read_artifacts().await.with_context(|| { format!( "error loading artifacts.json from extracted archive \ at `{dest}`" @@ -169,7 +173,7 @@ impl Args { assembler.set_build_dir(dir); } - assembler.build()?; + assembler.build().await?; Ok(()) } diff --git a/tufaceous/src/main.rs b/tufaceous/src/main.rs index 30832cffbf..014817ee53 100644 --- a/tufaceous/src/main.rs +++ b/tufaceous/src/main.rs @@ -7,10 +7,11 @@ use clap::Parser; use slog::Drain; use tufaceous::Args; -fn main() -> Result<()> { +#[tokio::main] +async fn main() -> Result<()> { let log = setup_log(); let args = Args::parse(); - args.exec(&log) + args.exec(&log).await } fn setup_log() -> slog::Logger { diff --git a/tufaceous/tests/integration-tests/command_tests.rs b/tufaceous/tests/integration-tests/command_tests.rs index 73c94572eb..72c3a1a13a 100644 --- a/tufaceous/tests/integration-tests/command_tests.rs +++ b/tufaceous/tests/integration-tests/command_tests.rs @@ -14,8 +14,8 @@ use omicron_test_utils::dev::test_setup_log; use predicates::prelude::*; use tufaceous_lib::{Key, OmicronRepo}; -#[test] -fn test_init_and_add() -> Result<()> { +#[tokio::test] +async fn test_init_and_add() -> Result<()> { let logctx = test_setup_log("test_init_and_add"); let tempdir = tempfile::tempdir().unwrap(); let key = Key::generate_ed25519(); @@ -54,9 +54,9 @@ fn test_init_and_add() -> Result<()> { // Now read the repository and ensure the list of expected artifacts. let repo_path: Utf8PathBuf = tempdir.path().join("repo").try_into()?; - let repo = OmicronRepo::load_untrusted(&logctx.log, &repo_path)?; + let repo = OmicronRepo::load_untrusted(&logctx.log, &repo_path).await?; - let artifacts = repo.read_artifacts()?; + let artifacts = repo.read_artifacts().await?; assert_eq!( artifacts.artifacts.len(), 2, diff --git a/wicketd/Cargo.toml b/wicketd/Cargo.toml index db1ac9c04a..1360c28b19 100644 --- a/wicketd/Cargo.toml +++ b/wicketd/Cargo.toml @@ -8,6 +8,7 @@ license = "MPL-2.0" anyhow.workspace = true async-trait.workspace = true base64.workspace = true +buf-list.workspace = true bytes.workspace = true camino.workspace = true camino-tempfile.workspace = true diff --git a/wicketd/src/artifacts/artifacts_with_plan.rs b/wicketd/src/artifacts/artifacts_with_plan.rs index 331aecfc70..d3319d7f6b 100644 --- a/wicketd/src/artifacts/artifacts_with_plan.rs +++ b/wicketd/src/artifacts/artifacts_with_plan.rs @@ -50,7 +50,7 @@ pub(super) struct ArtifactsWithPlan { } impl ArtifactsWithPlan { - pub(super) fn from_zip( + pub(super) async fn from_zip( zip_data: T, log: &Logger, ) -> Result @@ -68,10 +68,12 @@ impl ArtifactsWithPlan { // anyone can sign the repositories and this code will accept that. let repository = OmicronRepo::load_untrusted_ignore_expiration(log, dir.path()) + .await .map_err(RepositoryError::LoadRepository)?; let artifacts = repository .read_artifacts() + .await .map_err(RepositoryError::ReadArtifactsDocument)?; // Create another temporary directory where we'll "permanently" (as long @@ -132,9 +134,10 @@ impl ArtifactsWithPlan { .map_err(RepositoryError::TargetHashLength)?, ); - let reader = repository + let stream = repository .repo() .read_target(&target_name) + .await .map_err(|error| RepositoryError::LocateTarget { target: artifact.target.clone(), error: Box::new(error), @@ -143,13 +146,15 @@ impl ArtifactsWithPlan { RepositoryError::MissingTarget(artifact.target.clone()) })?; - plan_builder.add_artifact( - artifact.into_id(), - artifact_hash, - io::BufReader::new(reader), - &mut by_id, - &mut by_hash, - )?; + plan_builder + .add_artifact( + artifact.into_id(), + artifact_hash, + stream, + &mut by_id, + &mut by_hash, + ) + .await?; } // Ensure we know how to apply updates from this set of artifacts; we'll @@ -218,8 +223,11 @@ mod tests { /// Test that `ArtifactsWithPlan` can extract the fake repository generated /// by tufaceous. - #[test] - fn test_extract_fake() -> Result<()> { + /// + /// See documentation for extract_nested_artifact_pair in update_plan.rs + /// for why multi_thread is required. + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_extract_fake() -> Result<()> { let logctx = test_setup_log("test_extract_fake"); let temp_dir = Utf8TempDir::new()?; let archive_path = temp_dir.path().join("archive.zip"); @@ -233,12 +241,15 @@ mod tests { ]) .context("error parsing args")?; - args.exec(&logctx.log).context("error executing assemble command")?; + args.exec(&logctx.log) + .await + .context("error executing assemble command")?; // Now check that it can be read by the archive extractor. let zip_bytes = std::fs::File::open(&archive_path) .context("error opening archive.zip")?; let plan = ArtifactsWithPlan::from_zip(zip_bytes, &logctx.log) + .await .context("error reading archive.zip")?; // Check that all known artifact kinds are present in the map. let by_id_kinds: BTreeSet<_> = diff --git a/wicketd/src/artifacts/error.rs b/wicketd/src/artifacts/error.rs index ef81ec66f3..ada8fbe011 100644 --- a/wicketd/src/artifacts/error.rs +++ b/wicketd/src/artifacts/error.rs @@ -57,6 +57,13 @@ pub(super) enum RepositoryError { )] MissingTarget(String), + #[error("error reading artifact of kind `{kind}` from repository")] + ReadArtifact { + kind: ArtifactKind, + #[source] + error: Box, + }, + #[error("error copying artifact of kind `{kind}` from repository")] CopyExtractedArtifact { kind: ArtifactKind, @@ -160,6 +167,7 @@ impl RepositoryError { | RepositoryError::LoadRepository(_) | RepositoryError::ReadArtifactsDocument(_) | RepositoryError::TargetHashRead { .. } + | RepositoryError::ReadArtifact { .. } | RepositoryError::CopyExtractedArtifact { .. } => { HttpError::for_bad_request(None, message) } diff --git a/wicketd/src/artifacts/extracted_artifacts.rs b/wicketd/src/artifacts/extracted_artifacts.rs index b796201936..5683cd1c13 100644 --- a/wicketd/src/artifacts/extracted_artifacts.rs +++ b/wicketd/src/artifacts/extracted_artifacts.rs @@ -7,6 +7,8 @@ use anyhow::Context; use camino::Utf8PathBuf; use camino_tempfile::NamedUtf8TempFile; use camino_tempfile::Utf8TempDir; +use futures::Stream; +use futures::StreamExt; use omicron_common::update::ArtifactHash; use omicron_common::update::ArtifactHashId; use omicron_common::update::ArtifactKind; @@ -14,13 +16,11 @@ use sha2::Digest; use sha2::Sha256; use slog::info; use slog::Logger; -use std::fs::File; use std::io; -use std::io::BufWriter; -use std::io::Read; use std::io::Write; use std::sync::Arc; use tokio::io::AsyncRead; +use tokio::io::AsyncWriteExt; use tokio_util::io::ReaderStream; /// Handle to the data of an extracted artifact. @@ -123,17 +123,18 @@ impl ExtractedArtifacts { self.tempdir.path().join(format!("{}", artifact_hash_id.hash)) } - /// Copy from `reader` into our temp directory, returning a handle to the + /// Copy from `stream` into our temp directory, returning a handle to the /// extracted artifact on success. - pub(super) fn store( + pub(super) async fn store( &mut self, artifact_hash_id: ArtifactHashId, - mut reader: impl Read, + stream: impl Stream>, ) -> Result { let output_path = self.path_for_artifact(&artifact_hash_id); - let mut writer = BufWriter::new( - File::create(&output_path) + let mut writer = tokio::io::BufWriter::new( + tokio::fs::File::create(&output_path) + .await .with_context(|| { format!("failed to create temp file {output_path}") }) @@ -143,15 +144,29 @@ impl ExtractedArtifacts { })?, ); - let file_size = io::copy(&mut reader, &mut writer) - .with_context(|| format!("failed writing to {output_path}")) - .map_err(|error| RepositoryError::CopyExtractedArtifact { + let mut stream = std::pin::pin!(stream); + + let mut file_size = 0; + + while let Some(res) = stream.next().await { + let chunk = res.map_err(|error| RepositoryError::ReadArtifact { kind: artifact_hash_id.kind.clone(), - error, - })? as usize; + error: Box::new(error), + })?; + file_size += chunk.len(); + writer + .write_all(&chunk) + .await + .with_context(|| format!("failed writing to {output_path}")) + .map_err(|error| RepositoryError::CopyExtractedArtifact { + kind: artifact_hash_id.kind.clone(), + error, + })?; + } writer .flush() + .await .with_context(|| format!("failed flushing {output_path}")) .map_err(|error| RepositoryError::CopyExtractedArtifact { kind: artifact_hash_id.kind.clone(), diff --git a/wicketd/src/artifacts/store.rs b/wicketd/src/artifacts/store.rs index 29e1ecef0a..2a7b4a646b 100644 --- a/wicketd/src/artifacts/store.rs +++ b/wicketd/src/artifacts/store.rs @@ -42,12 +42,9 @@ impl WicketdArtifactStore { slog::debug!(self.log, "adding repository"); let log = self.log.clone(); - let new_artifacts = tokio::task::spawn_blocking(move || { - ArtifactsWithPlan::from_zip(data, &log) - .map_err(|error| error.to_http_error()) - }) - .await - .unwrap()?; + let new_artifacts = ArtifactsWithPlan::from_zip(data, &log) + .await + .map_err(|error| error.to_http_error())?; self.replace(new_artifacts); Ok(()) diff --git a/wicketd/src/artifacts/update_plan.rs b/wicketd/src/artifacts/update_plan.rs index 5d7bee629a..c6db7c1b65 100644 --- a/wicketd/src/artifacts/update_plan.rs +++ b/wicketd/src/artifacts/update_plan.rs @@ -14,7 +14,10 @@ use super::extracted_artifacts::HashingNamedUtf8TempFile; use super::ArtifactIdData; use super::Board; use super::ExtractedArtifactDataHandle; -use anyhow::anyhow; +use bytes::Bytes; +use futures::Stream; +use futures::StreamExt; +use futures::TryStreamExt; use hubtools::RawHubrisArchive; use omicron_common::api::external::SemverVersion; use omicron_common::api::internal::nexus::KnownArtifactKind; @@ -28,7 +31,6 @@ use std::collections::btree_map; use std::collections::BTreeMap; use std::collections::HashMap; use std::io; -use std::io::Read; use tufaceous_lib::HostPhaseImages; use tufaceous_lib::RotArchives; @@ -143,24 +145,26 @@ impl<'a> UpdatePlanBuilder<'a> { }) } - pub(super) fn add_artifact( + pub(super) async fn add_artifact( &mut self, artifact_id: ArtifactId, artifact_hash: ArtifactHash, - reader: io::BufReader, + stream: impl Stream> + Send, by_id: &mut BTreeMap>, by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { // If we don't know this artifact kind, we'll still serve it up by hash, // but we don't do any further processing on it. let Some(artifact_kind) = artifact_id.kind.to_known() else { - return self.add_unknown_artifact( - artifact_id, - artifact_hash, - reader, - by_id, - by_hash, - ); + return self + .add_unknown_artifact( + artifact_id, + artifact_hash, + stream, + by_id, + by_hash, + ) + .await; }; // If we do know the artifact kind, we may have additional work to do, @@ -170,48 +174,57 @@ impl<'a> UpdatePlanBuilder<'a> { match artifact_kind { KnownArtifactKind::GimletSp | KnownArtifactKind::PscSp - | KnownArtifactKind::SwitchSp => self.add_sp_artifact( - artifact_id, - artifact_kind, - artifact_hash, - reader, - by_id, - by_hash, - ), + | KnownArtifactKind::SwitchSp => { + self.add_sp_artifact( + artifact_id, + artifact_kind, + artifact_hash, + stream, + by_id, + by_hash, + ) + .await + } KnownArtifactKind::GimletRot | KnownArtifactKind::PscRot - | KnownArtifactKind::SwitchRot => self.add_rot_artifact( - artifact_id, - artifact_kind, - reader, - by_id, - by_hash, - ), + | KnownArtifactKind::SwitchRot => { + self.add_rot_artifact( + artifact_id, + artifact_kind, + stream, + by_id, + by_hash, + ) + .await + } KnownArtifactKind::Host => { - self.add_host_artifact(artifact_id, reader, by_id, by_hash) + self.add_host_artifact(artifact_id, stream, by_id, by_hash) } KnownArtifactKind::Trampoline => self.add_trampoline_artifact( artifact_id, - reader, - by_id, - by_hash, - ), - KnownArtifactKind::ControlPlane => self.add_control_plane_artifact( - artifact_id, - artifact_hash, - reader, + stream, by_id, by_hash, ), + KnownArtifactKind::ControlPlane => { + self.add_control_plane_artifact( + artifact_id, + artifact_hash, + stream, + by_id, + by_hash, + ) + .await + } } } - fn add_sp_artifact( + async fn add_sp_artifact( &mut self, artifact_id: ArtifactId, artifact_kind: KnownArtifactKind, artifact_hash: ArtifactHash, - mut reader: io::BufReader, + stream: impl Stream> + Send, by_id: &mut BTreeMap>, by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { @@ -228,15 +241,18 @@ impl<'a> UpdatePlanBuilder<'a> { | KnownArtifactKind::SwitchRot => unreachable!(), }; + let mut stream = std::pin::pin!(stream); + // SP images are small, and hubtools wants a `&[u8]` to parse, so we'll // read the whole thing into memory. let mut data = Vec::new(); - reader.read_to_end(&mut data).map_err(|error| { - RepositoryError::CopyExtractedArtifact { + while let Some(res) = stream.next().await { + let chunk = res.map_err(|error| RepositoryError::ReadArtifact { kind: artifact_kind.into(), - error: anyhow!(error), - } - })?; + error: Box::new(error), + })?; + data.extend_from_slice(&chunk); + } let (artifact_id, board) = read_hubris_board_from_archive(artifact_id, data.clone())?; @@ -255,7 +271,11 @@ impl<'a> UpdatePlanBuilder<'a> { ArtifactHashId { kind: artifact_kind.into(), hash: artifact_hash }; let data = self .extracted_artifacts - .store(artifact_hash_id, io::Cursor::new(&data))?; + .store( + artifact_hash_id, + futures::stream::iter([Ok(Bytes::from(data))]), + ) + .await?; slot.insert(ArtifactIdData { id: artifact_id.clone(), data: data.clone(), @@ -273,11 +293,11 @@ impl<'a> UpdatePlanBuilder<'a> { Ok(()) } - fn add_rot_artifact( + async fn add_rot_artifact( &mut self, artifact_id: ArtifactId, artifact_kind: KnownArtifactKind, - reader: io::BufReader, + stream: impl Stream> + Send, by_id: &mut BTreeMap>, by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { @@ -310,9 +330,12 @@ impl<'a> UpdatePlanBuilder<'a> { }; let (rot_a_data, rot_b_data) = Self::extract_nested_artifact_pair( + stream, &mut self.extracted_artifacts, artifact_kind, - |out_a, out_b| RotArchives::extract_into(reader, out_a, out_b), + |reader, out_a, out_b| { + RotArchives::extract_into(reader, out_a, out_b) + }, )?; // Technically we've done all we _need_ to do with the RoT images. We @@ -358,7 +381,7 @@ impl<'a> UpdatePlanBuilder<'a> { fn add_host_artifact( &mut self, artifact_id: ArtifactId, - reader: io::BufReader, + stream: impl Stream> + Send, by_id: &mut BTreeMap>, by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { @@ -369,9 +392,12 @@ impl<'a> UpdatePlanBuilder<'a> { } let (phase_1_data, phase_2_data) = Self::extract_nested_artifact_pair( + stream, &mut self.extracted_artifacts, KnownArtifactKind::Host, - |out_1, out_2| HostPhaseImages::extract_into(reader, out_1, out_2), + |reader, out_1, out_2| { + HostPhaseImages::extract_into(reader, out_1, out_2) + }, )?; // Similarly to the RoT, we need to create new, non-conflicting artifact @@ -409,7 +435,7 @@ impl<'a> UpdatePlanBuilder<'a> { fn add_trampoline_artifact( &mut self, artifact_id: ArtifactId, - reader: io::BufReader, + stream: impl Stream> + Send, by_id: &mut BTreeMap>, by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { @@ -422,9 +448,12 @@ impl<'a> UpdatePlanBuilder<'a> { } let (phase_1_data, phase_2_data) = Self::extract_nested_artifact_pair( + stream, &mut self.extracted_artifacts, KnownArtifactKind::Trampoline, - |out_1, out_2| HostPhaseImages::extract_into(reader, out_1, out_2), + |reader, out_1, out_2| { + HostPhaseImages::extract_into(reader, out_1, out_2) + }, )?; // Similarly to the RoT, we need to create new, non-conflicting artifact @@ -466,11 +495,11 @@ impl<'a> UpdatePlanBuilder<'a> { Ok(()) } - fn add_control_plane_artifact( + async fn add_control_plane_artifact( &mut self, artifact_id: ArtifactId, artifact_hash: ArtifactHash, - reader: io::BufReader, + stream: impl Stream> + Send, by_id: &mut BTreeMap>, by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { @@ -487,7 +516,8 @@ impl<'a> UpdatePlanBuilder<'a> { hash: artifact_hash, }; - let data = self.extracted_artifacts.store(artifact_hash_id, reader)?; + let data = + self.extracted_artifacts.store(artifact_hash_id, stream).await?; self.control_plane_hash = Some(data.hash()); @@ -503,11 +533,11 @@ impl<'a> UpdatePlanBuilder<'a> { Ok(()) } - fn add_unknown_artifact( + async fn add_unknown_artifact( &mut self, artifact_id: ArtifactId, artifact_hash: ArtifactHash, - reader: io::BufReader, + stream: impl Stream> + Send, by_id: &mut BTreeMap>, by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { @@ -515,7 +545,8 @@ impl<'a> UpdatePlanBuilder<'a> { let artifact_hash_id = ArtifactHashId { kind: artifact_kind.clone(), hash: artifact_hash }; - let data = self.extracted_artifacts.store(artifact_hash_id, reader)?; + let data = + self.extracted_artifacts.store(artifact_hash_id, stream).await?; record_extracted_artifact( artifact_id, @@ -529,11 +560,80 @@ impl<'a> UpdatePlanBuilder<'a> { Ok(()) } - // RoT, host OS, and trampoline OS artifacts all contain a pair of artifacts - // we actually care about (RoT: A/B images; host/trampoline: phase1/phase2 - // images). This method is a helper that converts a single artifact `reader` - // into a pair of extracted artifacts. + /// A helper that converts a single artifact `stream` into a pair of + /// extracted artifacts. + /// + /// RoT, host OS, and trampoline OS artifacts all contain a pair of + /// artifacts we actually care about (RoT: A/B images; host/trampoline: + /// phase1/phase2 images). This method is a helper to extract that. + /// + /// This method uses a `block_in_place` into synchronous code, because the + /// value of changing tufaceous to do async tarball extraction is honestly + /// pretty dubious. + /// + /// The main costs of this are that: + /// 1. This code can only be used with multithreaded Tokio executors. (This + /// is OK for production, but does require that our tests use `flavor = + /// "multi_thread`.) + /// 2. Parallelizing extraction is harder if we ever want to do that in the + /// future. (It can be done using the async-scoped crate, though.) + /// + /// Depending on how things shake out, we may want to revisit this in the + /// future. fn extract_nested_artifact_pair( + stream: impl Stream> + Send, + extracted_artifacts: &mut ExtractedArtifacts, + kind: KnownArtifactKind, + extract: F, + ) -> Result< + (ExtractedArtifactDataHandle, ExtractedArtifactDataHandle), + RepositoryError, + > + where + F: FnOnce( + &mut dyn io::BufRead, + &mut HashingNamedUtf8TempFile, + &mut HashingNamedUtf8TempFile, + ) -> anyhow::Result<()> + + Send, + { + // Since stream isn't guaranteed to be 'static, we have to use + // block_in_place here, not spawn_blocking. This does mean that the + // current task is taken over, and that this function can only be used + // from a multithreaded Tokio runtime. + // + // An alternative would be to use the `async-scoped` crate. However: + // + // - We would only spawn one task there. + // - The only safe use of async-scoped is with the `scope_and_block` + // call, which uses `tokio::task::block_in_place` anyway. + // - async-scoped also requires a multithreaded Tokio runtime. + // + // If we ever want to parallelize extraction across all the different + // artifacts, `async-scoped` would be a good fit. + tokio::task::block_in_place(|| { + let stream = std::pin::pin!(stream); + let reader = + tokio_util::io::StreamReader::new(stream.map_err(|error| { + // StreamReader requires a conversion from tough's errors to + // std::io::Error. + std::io::Error::new(io::ErrorKind::Other, error) + })); + + // RotArchives::extract_into takes a synchronous reader, so we need + // to use this bridge. The bridge can only be used from a blocking + // context. + let mut reader = tokio_util::io::SyncIoBridge::new(reader); + + Self::extract_nested_artifact_pair_impl( + extracted_artifacts, + kind, + |out_a, out_b| extract(&mut reader, out_a, out_b), + ) + }) + } + + fn extract_nested_artifact_pair_impl( extracted_artifacts: &mut ExtractedArtifacts, kind: KnownArtifactKind, extract: F, @@ -838,7 +938,9 @@ mod tests { builder.build_to_vec().unwrap() } - #[tokio::test] + // See documentation for extract_nested_artifact_pair for why multi_thread + // is required. + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_update_plan_from_artifacts() { const VERSION_0: SemverVersion = SemverVersion::new(0, 0, 0); @@ -867,10 +969,11 @@ mod tests { .add_artifact( id, hash, - io::BufReader::new(io::Cursor::new(&data)), + futures::stream::iter([Ok(Bytes::from(data))]), &mut by_id, &mut by_hash, ) + .await .unwrap(); } @@ -889,10 +992,11 @@ mod tests { .add_artifact( id, hash, - io::BufReader::new(io::Cursor::new(&data)), + futures::stream::iter([Ok(Bytes::from(data))]), &mut by_id, &mut by_hash, ) + .await .unwrap(); } @@ -917,10 +1021,11 @@ mod tests { .add_artifact( id, hash, - io::BufReader::new(io::Cursor::new(&data)), + futures::stream::iter([Ok(Bytes::from(data))]), &mut by_id, &mut by_hash, ) + .await .unwrap(); } } @@ -945,10 +1050,11 @@ mod tests { .add_artifact( id, hash, - io::BufReader::new(io::Cursor::new(data)), + futures::stream::iter([Ok(data.clone())]), &mut by_id, &mut by_hash, ) + .await .unwrap(); } @@ -972,10 +1078,11 @@ mod tests { .add_artifact( id, hash, - io::BufReader::new(io::Cursor::new(data)), + futures::stream::iter([Ok(data.clone())]), &mut by_id, &mut by_hash, ) + .await .unwrap(); } diff --git a/wicketd/tests/integration_tests/updates.rs b/wicketd/tests/integration_tests/updates.rs index fb1637f44e..b65833a74b 100644 --- a/wicketd/tests/integration_tests/updates.rs +++ b/wicketd/tests/integration_tests/updates.rs @@ -31,7 +31,9 @@ use wicketd_client::types::{ StartUpdateParams, }; -#[tokio::test] +// See documentation for extract_nested_artifact_pair in update_plan.rs for why +// multi_thread is required. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_updates() { let gateway = gateway_setup::test_setup("test_updates", SpPort::One).await; let wicketd_testctx = WicketdTestContext::setup(gateway).await; @@ -48,7 +50,7 @@ async fn test_updates() { ]) .expect("args parsed correctly"); - args.exec(log).expect("assemble command completed successfully"); + args.exec(log).await.expect("assemble command completed successfully"); // Read the archive and upload it to the server. let zip_bytes = @@ -258,7 +260,9 @@ async fn test_updates() { wicketd_testctx.teardown().await; } -#[tokio::test] +// See documentation for extract_nested_artifact_pair in update_plan.rs for why +// multi_thread is required. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_installinator_fetch() { let gateway = gateway_setup::test_setup("test_updates", SpPort::One).await; let wicketd_testctx = WicketdTestContext::setup(gateway).await; @@ -275,7 +279,7 @@ async fn test_installinator_fetch() { ]) .expect("args parsed correctly"); - args.exec(log).expect("assemble command completed successfully"); + args.exec(log).await.expect("assemble command completed successfully"); // Read the archive and upload it to the server. let zip_bytes = @@ -391,7 +395,9 @@ async fn test_installinator_fetch() { wicketd_testctx.teardown().await; } -#[tokio::test] +// See documentation for extract_nested_artifact_pair in update_plan.rs for why +// multi_thread is required. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_update_races() { let gateway = gateway_setup::test_setup( "test_artifact_upload_while_updating", @@ -412,7 +418,7 @@ async fn test_update_races() { ]) .expect("args parsed correctly"); - args.exec(log).expect("assemble command completed successfully"); + args.exec(log).await.expect("assemble command completed successfully"); // Read the archive and upload it to the server. let zip_bytes = diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 4d416eca02..47ea83f8f2 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -85,6 +85,7 @@ sha2 = { version = "0.10.8", features = ["oid"] } signature = { version = "2.1.0", default-features = false, features = ["digest", "rand_core", "std"] } similar = { version = "2.2.1", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } +snafu = { version = "0.7.5", features = ["futures"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } subtle = { version = "2.5.0" } @@ -94,6 +95,7 @@ time = { version = "0.3.27", features = ["formatting", "local-offset", "macros", tokio = { version = "1.33.0", features = ["full", "test-util"] } tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } tokio-stream = { version = "0.1.14", features = ["net"] } +tokio-util = { version = "0.7.10", features = ["codec", "io-util"] } toml = { version = "0.7.8" } toml_edit-647d43efb71741da = { package = "toml_edit", version = "0.21.0", features = ["serde"] } tracing = { version = "0.1.37", features = ["log"] } @@ -178,6 +180,7 @@ sha2 = { version = "0.10.8", features = ["oid"] } signature = { version = "2.1.0", default-features = false, features = ["digest", "rand_core", "std"] } similar = { version = "2.2.1", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } +snafu = { version = "0.7.5", features = ["futures"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } subtle = { version = "2.5.0" } @@ -188,6 +191,7 @@ time-macros = { version = "0.2.13", default-features = false, features = ["forma tokio = { version = "1.33.0", features = ["full", "test-util"] } tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } tokio-stream = { version = "0.1.14", features = ["net"] } +tokio-util = { version = "0.7.10", features = ["codec", "io-util"] } toml = { version = "0.7.8" } toml_edit-647d43efb71741da = { package = "toml_edit", version = "0.21.0", features = ["serde"] } tracing = { version = "0.1.37", features = ["log"] } From 08041d6c30c183692bdc28e9ed4e9df558140892 Mon Sep 17 00:00:00 2001 From: bnaecker Date: Mon, 20 Nov 2023 15:27:47 -0800 Subject: [PATCH 26/56] Add a producer kind to oximeter metric producers (#4497) - Adds the `kind` enum to metric producer information, including DB schema, model, and various client parameter types. This records the supported types of metric producers, and is intended to aid debugging and future work around updates and instance lifecycle management. - Add schema update files which create the DB enum type and add it as a column to the `metric_producer` table. This currently _drops_ the existing table and recreates it with the new column, rather than adding the column using `ALTER TABLE`. That is intended to remove old entries in bulk, since nothing previously removed the records for Propolis servers when their instance was stopped. This is the initial PR in a sequence that will eventually make this field _required_ in both the database and API requests. As there are consumers of this API outside of the Omicron repository, this field needs to start as optional, to avoid introducing a commit with incompatible clients. --- clients/nexus-client/src/lib.rs | 14 ++++++ clients/oximeter-client/src/lib.rs | 14 ++++++ common/src/api/internal/nexus.rs | 21 +++++++++ nexus/db-model/src/producer_endpoint.rs | 37 +++++++++++++++ nexus/db-model/src/schema.rs | 3 +- nexus/db-queries/src/db/datastore/oximeter.rs | 1 + nexus/src/app/oximeter.rs | 4 ++ nexus/test-utils/src/lib.rs | 2 + nexus/tests/integration_tests/oximeter.rs | 2 + openapi/nexus-internal.json | 45 ++++++++++++++++++- openapi/oximeter.json | 45 ++++++++++++++++++- oximeter/collector/src/agent.rs | 4 ++ oximeter/producer/examples/producer.rs | 2 + schema/crdb/12.0.0/up01.sql | 27 +++++++++++ schema/crdb/12.0.0/up02.sql | 11 +++++ schema/crdb/12.0.0/up03.sql | 17 +++++++ schema/crdb/12.0.0/up04.sql | 8 ++++ schema/crdb/dbinit.sql | 15 ++++++- sled-agent/src/sim/disk.rs | 2 + sled-agent/src/sled_agent.rs | 2 + 20 files changed, 272 insertions(+), 4 deletions(-) create mode 100644 schema/crdb/12.0.0/up01.sql create mode 100644 schema/crdb/12.0.0/up02.sql create mode 100644 schema/crdb/12.0.0/up03.sql create mode 100644 schema/crdb/12.0.0/up04.sql diff --git a/clients/nexus-client/src/lib.rs b/clients/nexus-client/src/lib.rs index 23ceb114fc..6667f759e4 100644 --- a/clients/nexus-client/src/lib.rs +++ b/clients/nexus-client/src/lib.rs @@ -202,6 +202,19 @@ impl From<&types::InstanceState> } } +impl From + for types::ProducerKind +{ + fn from(kind: omicron_common::api::internal::nexus::ProducerKind) -> Self { + use omicron_common::api::internal::nexus::ProducerKind; + match kind { + ProducerKind::SledAgent => Self::SledAgent, + ProducerKind::Service => Self::Service, + ProducerKind::Instance => Self::Instance, + } + } +} + impl From<&omicron_common::api::internal::nexus::ProducerEndpoint> for types::ProducerEndpoint { @@ -212,6 +225,7 @@ impl From<&omicron_common::api::internal::nexus::ProducerEndpoint> address: s.address.to_string(), base_route: s.base_route.clone(), id: s.id, + kind: s.kind.map(Into::into), interval: s.interval.into(), } } diff --git a/clients/oximeter-client/src/lib.rs b/clients/oximeter-client/src/lib.rs index 7bd17d7e76..8a03304e06 100644 --- a/clients/oximeter-client/src/lib.rs +++ b/clients/oximeter-client/src/lib.rs @@ -20,6 +20,19 @@ impl From for types::Duration { } } +impl From + for types::ProducerKind +{ + fn from(kind: omicron_common::api::internal::nexus::ProducerKind) -> Self { + use omicron_common::api::internal::nexus; + match kind { + nexus::ProducerKind::Service => Self::Service, + nexus::ProducerKind::SledAgent => Self::SledAgent, + nexus::ProducerKind::Instance => Self::Instance, + } + } +} + impl From<&omicron_common::api::internal::nexus::ProducerEndpoint> for types::ProducerEndpoint { @@ -30,6 +43,7 @@ impl From<&omicron_common::api::internal::nexus::ProducerEndpoint> address: s.address.to_string(), base_route: s.base_route.clone(), id: s.id, + kind: s.kind.map(Into::into), interval: s.interval.into(), } } diff --git a/common/src/api/internal/nexus.rs b/common/src/api/internal/nexus.rs index a4a539ad9b..1daa85dbe7 100644 --- a/common/src/api/internal/nexus.rs +++ b/common/src/api/internal/nexus.rs @@ -84,13 +84,34 @@ pub struct SledInstanceState { // Oximeter producer/collector objects. +/// The kind of metric producer this is. +#[derive(Clone, Copy, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum ProducerKind { + /// The producer is a sled-agent. + SledAgent, + /// The producer is an Omicron-managed service. + Service, + /// The producer is a Propolis VMM managing a guest instance. + Instance, +} + /// Information announced by a metric server, used so that clients can contact it and collect /// available metric data from it. #[derive(Clone, Debug, Deserialize, JsonSchema, Serialize, PartialEq)] pub struct ProducerEndpoint { + /// A unique ID for this producer. pub id: Uuid, + /// The kind of producer. + pub kind: Option, + /// The IP address and port at which `oximeter` can collect metrics from the + /// producer. pub address: SocketAddr, + /// The API base route from which `oximeter` can collect metrics. + /// + /// The full route is `{base_route}/{id}`. pub base_route: String, + /// The interval on which `oximeter` should collect metrics. pub interval: Duration, } diff --git a/nexus/db-model/src/producer_endpoint.rs b/nexus/db-model/src/producer_endpoint.rs index 29e57b0877..52a69e0508 100644 --- a/nexus/db-model/src/producer_endpoint.rs +++ b/nexus/db-model/src/producer_endpoint.rs @@ -3,12 +3,47 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use super::SqlU16; +use crate::impl_enum_type; use crate::schema::metric_producer; use db_macros::Asset; use nexus_types::identity::Asset; use omicron_common::api::internal; use uuid::Uuid; +impl_enum_type!( + #[derive(SqlType, Copy, Clone, Debug, QueryId)] + #[diesel(postgres_type(name = "producer_kind"))] + pub struct ProducerKindEnum; + + #[derive(AsExpression, Copy, Clone, Debug, FromSqlRow, PartialEq)] + #[diesel(sql_type = ProducerKindEnum)] + pub enum ProducerKind; + + SledAgent => b"sled_agent" + Service => b"service" + Instance => b"instance" +); + +impl From for ProducerKind { + fn from(kind: internal::nexus::ProducerKind) -> Self { + match kind { + internal::nexus::ProducerKind::SledAgent => ProducerKind::SledAgent, + internal::nexus::ProducerKind::Service => ProducerKind::Service, + internal::nexus::ProducerKind::Instance => ProducerKind::Instance, + } + } +} + +impl From for internal::nexus::ProducerKind { + fn from(kind: ProducerKind) -> Self { + match kind { + ProducerKind::SledAgent => internal::nexus::ProducerKind::SledAgent, + ProducerKind::Service => internal::nexus::ProducerKind::Service, + ProducerKind::Instance => internal::nexus::ProducerKind::Instance, + } + } +} + /// Information announced by a metric server, used so that clients can contact it and collect /// available metric data from it. #[derive(Queryable, Insertable, Debug, Clone, Selectable, Asset)] @@ -17,6 +52,7 @@ pub struct ProducerEndpoint { #[diesel(embed)] identity: ProducerEndpointIdentity, + pub kind: Option, pub ip: ipnetwork::IpNetwork, pub port: SqlU16, pub interval: f64, @@ -33,6 +69,7 @@ impl ProducerEndpoint { ) -> Self { Self { identity: ProducerEndpointIdentity::new(endpoint.id), + kind: endpoint.kind.map(Into::into), ip: endpoint.address.ip().into(), port: endpoint.address.port().into(), base_route: endpoint.base_route.clone(), diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 4844f2a33f..e7d625e854 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -399,6 +399,7 @@ table! { id -> Uuid, time_created -> Timestamptz, time_modified -> Timestamptz, + kind -> Nullable, ip -> Inet, port -> Int4, interval -> Float8, @@ -1269,7 +1270,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(11, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(12, 0, 0); allow_tables_to_appear_in_same_query!( system_update, diff --git a/nexus/db-queries/src/db/datastore/oximeter.rs b/nexus/db-queries/src/db/datastore/oximeter.rs index 55b650ea53..116e8586b0 100644 --- a/nexus/db-queries/src/db/datastore/oximeter.rs +++ b/nexus/db-queries/src/db/datastore/oximeter.rs @@ -96,6 +96,7 @@ impl DataStore { .do_update() .set(( dsl::time_modified.eq(Utc::now()), + dsl::kind.eq(producer.kind), dsl::ip.eq(producer.ip), dsl::port.eq(producer.port), dsl::interval.eq(producer.interval), diff --git a/nexus/src/app/oximeter.rs b/nexus/src/app/oximeter.rs index 7dfa2fb68b..66f39a32b6 100644 --- a/nexus/src/app/oximeter.rs +++ b/nexus/src/app/oximeter.rs @@ -127,6 +127,9 @@ impl super::Nexus { for producer in producers.into_iter() { let producer_info = oximeter_client::types::ProducerEndpoint { id: producer.id(), + kind: producer + .kind + .map(|kind| nexus::ProducerKind::from(kind).into()), address: SocketAddr::new( producer.ip.ip(), producer.port.try_into().unwrap(), @@ -149,6 +152,7 @@ impl super::Nexus { pub(crate) async fn register_as_producer(&self, address: SocketAddr) { let producer_endpoint = nexus::ProducerEndpoint { id: self.id, + kind: Some(nexus::ProducerKind::Service), address, base_route: String::from("/metrics/collect"), interval: Duration::from_secs(10), diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 647232031d..1e7de6132b 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -30,6 +30,7 @@ use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; use omicron_common::api::external::MacAddr; use omicron_common::api::external::{IdentityMetadata, Name}; use omicron_common::api::internal::nexus::ProducerEndpoint; +use omicron_common::api::internal::nexus::ProducerKind; use omicron_common::api::internal::shared::SwitchLocation; use omicron_common::nexus_config; use omicron_common::nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; @@ -1092,6 +1093,7 @@ pub async fn start_producer_server( let producer_address = SocketAddr::new(Ipv6Addr::LOCALHOST.into(), 0); let server_info = ProducerEndpoint { id, + kind: Some(ProducerKind::Service), address: producer_address, base_route: "/collect".to_string(), interval: Duration::from_secs(1), diff --git a/nexus/tests/integration_tests/oximeter.rs b/nexus/tests/integration_tests/oximeter.rs index 65aaa18642..e97f36daf4 100644 --- a/nexus/tests/integration_tests/oximeter.rs +++ b/nexus/tests/integration_tests/oximeter.rs @@ -9,6 +9,7 @@ use http::StatusCode; use nexus_test_interface::NexusServer; use nexus_test_utils_macros::nexus_test; use omicron_common::api::internal::nexus::ProducerEndpoint; +use omicron_common::api::internal::nexus::ProducerKind; use omicron_test_utils::dev::poll::{wait_for_condition, CondCheckError}; use oximeter_db::DbWrite; use std::collections::BTreeSet; @@ -360,6 +361,7 @@ async fn test_oximeter_collector_reregistration_gets_all_assignments() { ids.insert(id); let info = ProducerEndpoint { id, + kind: Some(ProducerKind::Service), address: SocketAddr::new(Ipv6Addr::LOCALHOST.into(), 12345), base_route: String::from("/collect"), interval: Duration::from_secs(1), diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index fcb285d9eb..c358b4109b 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -4322,17 +4322,34 @@ "type": "object", "properties": { "address": { + "description": "The IP address and port at which `oximeter` can collect metrics from the producer.", "type": "string" }, "base_route": { + "description": "The API base route from which `oximeter` can collect metrics.\n\nThe full route is `{base_route}/{id}`.", "type": "string" }, "id": { + "description": "A unique ID for this producer.", "type": "string", "format": "uuid" }, "interval": { - "$ref": "#/components/schemas/Duration" + "description": "The interval on which `oximeter` should collect metrics.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "kind": { + "nullable": true, + "description": "The kind of producer.", + "allOf": [ + { + "$ref": "#/components/schemas/ProducerKind" + } + ] } }, "required": [ @@ -4342,6 +4359,32 @@ "interval" ] }, + "ProducerKind": { + "description": "The kind of metric producer this is.", + "oneOf": [ + { + "description": "The producer is a sled-agent.", + "type": "string", + "enum": [ + "sled_agent" + ] + }, + { + "description": "The producer is an Omicron-managed service.", + "type": "string", + "enum": [ + "service" + ] + }, + { + "description": "The producer is a Propolis VMM managing a guest instance.", + "type": "string", + "enum": [ + "instance" + ] + } + ] + }, "ProducerResultsItem": { "oneOf": [ { diff --git a/openapi/oximeter.json b/openapi/oximeter.json index 529d20e921..f7e534c95d 100644 --- a/openapi/oximeter.json +++ b/openapi/oximeter.json @@ -191,17 +191,34 @@ "type": "object", "properties": { "address": { + "description": "The IP address and port at which `oximeter` can collect metrics from the producer.", "type": "string" }, "base_route": { + "description": "The API base route from which `oximeter` can collect metrics.\n\nThe full route is `{base_route}/{id}`.", "type": "string" }, "id": { + "description": "A unique ID for this producer.", "type": "string", "format": "uuid" }, "interval": { - "$ref": "#/components/schemas/Duration" + "description": "The interval on which `oximeter` should collect metrics.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "kind": { + "nullable": true, + "description": "The kind of producer.", + "allOf": [ + { + "$ref": "#/components/schemas/ProducerKind" + } + ] } }, "required": [ @@ -231,6 +248,32 @@ "required": [ "items" ] + }, + "ProducerKind": { + "description": "The kind of metric producer this is.", + "oneOf": [ + { + "description": "The producer is a sled-agent.", + "type": "string", + "enum": [ + "sled_agent" + ] + }, + { + "description": "The producer is an Omicron-managed service.", + "type": "string", + "enum": [ + "service" + ] + }, + { + "description": "The producer is a Propolis VMM managing a guest instance.", + "type": "string", + "enum": [ + "instance" + ] + } + ] } }, "responses": { diff --git a/oximeter/collector/src/agent.rs b/oximeter/collector/src/agent.rs index 23ff32ed66..f6da172909 100644 --- a/oximeter/collector/src/agent.rs +++ b/oximeter/collector/src/agent.rs @@ -648,6 +648,7 @@ mod tests { use hyper::Response; use hyper::Server; use hyper::StatusCode; + use omicron_common::api::internal::nexus::ProducerKind; use omicron_test_utils::dev::test_setup_log; use std::convert::Infallible; use std::net::Ipv6Addr; @@ -694,6 +695,7 @@ mod tests { let interval = Duration::from_secs(1); let endpoint = ProducerEndpoint { id: Uuid::new_v4(), + kind: Some(ProducerKind::Service), address, base_route: String::from("/"), interval, @@ -752,6 +754,7 @@ mod tests { let interval = Duration::from_secs(1); let endpoint = ProducerEndpoint { id: Uuid::new_v4(), + kind: Some(ProducerKind::Service), address: SocketAddr::V6(SocketAddrV6::new( Ipv6Addr::LOCALHOST, 0, @@ -840,6 +843,7 @@ mod tests { let interval = Duration::from_secs(1); let endpoint = ProducerEndpoint { id: Uuid::new_v4(), + kind: Some(ProducerKind::Service), address, base_route: String::from("/"), interval, diff --git a/oximeter/producer/examples/producer.rs b/oximeter/producer/examples/producer.rs index dd9722c80a..baa4f57bf7 100644 --- a/oximeter/producer/examples/producer.rs +++ b/oximeter/producer/examples/producer.rs @@ -15,6 +15,7 @@ use dropshot::ConfigLogging; use dropshot::ConfigLoggingLevel; use dropshot::HandlerTaskMode; use omicron_common::api::internal::nexus::ProducerEndpoint; +use omicron_common::api::internal::nexus::ProducerKind; use oximeter::types::Cumulative; use oximeter::types::ProducerRegistry; use oximeter::types::Sample; @@ -124,6 +125,7 @@ async fn main() -> anyhow::Result<()> { registry.register_producer(producer).unwrap(); let server_info = ProducerEndpoint { id: registry.producer_id(), + kind: Some(ProducerKind::Service), address: args.address, base_route: "/collect".to_string(), interval: Duration::from_secs(10), diff --git a/schema/crdb/12.0.0/up01.sql b/schema/crdb/12.0.0/up01.sql new file mode 100644 index 0000000000..36f2f810ca --- /dev/null +++ b/schema/crdb/12.0.0/up01.sql @@ -0,0 +1,27 @@ +/* + * Drop the entire metric producer assignment table. + * + * Programs wishing to produce metrics need to register with Nexus. That creates + * an assignment of the producer to a collector, which is recorded in this + * table. That registration is idempotent, and every _current_ producer will + * register when it restarts. For example, `dpd` includes a task that registers + * with Nexus, so each time it (re)starts, that registration will happen. + * + * With that in mind, dropping this table is safe, because as of today, all + * software updates reuqire that the whole control plane be offline. We know + * that these entries will be recreated shortly, as the services registering + * producers are restarted. + * + * The current metric producers are: + * + * - `dpd` + * - Each `nexus` instance + * - Each `sled-agent` instance + * - The Propolis server for each guest Instance + * + * Another reason we're dropping the table is because we will add a new column, + * `kind`, in a following update file, but we don't have a good way to backfill + * that value for existing rows. We also don't need to, because these services + * will soon reregister, and provide us with a value. + */ +DROP TABLE IF EXISTS omicron.public.metric_producer; diff --git a/schema/crdb/12.0.0/up02.sql b/schema/crdb/12.0.0/up02.sql new file mode 100644 index 0000000000..96c4c5d6b4 --- /dev/null +++ b/schema/crdb/12.0.0/up02.sql @@ -0,0 +1,11 @@ +/* + * The kind of metric producer each record corresponds to. + */ +CREATE TYPE IF NOT EXISTS omicron.public.producer_kind AS ENUM ( + -- A sled agent for an entry in the sled table. + 'sled_agent', + -- A service in the omicron.public.service table + 'service', + -- A Propolis VMM for an instance in the omicron.public.instance table + 'instance' +); diff --git a/schema/crdb/12.0.0/up03.sql b/schema/crdb/12.0.0/up03.sql new file mode 100644 index 0000000000..fc57667541 --- /dev/null +++ b/schema/crdb/12.0.0/up03.sql @@ -0,0 +1,17 @@ +/* + * Recreate the metric producer assignment table. + * + * Note that we're adding the `kind` column here, using the new enum in the + * previous update SQL file. + */ +CREATE TABLE IF NOT EXISTS omicron.public.metric_producer ( + id UUID PRIMARY KEY, + time_created TIMESTAMPTZ NOT NULL, + time_modified TIMESTAMPTZ NOT NULL, + kind omicron.public.producer_kind, + ip INET NOT NULL, + port INT4 CHECK (port BETWEEN 0 AND 65535) NOT NULL, + interval FLOAT NOT NULL, + base_route STRING(512) NOT NULL, + oximeter_id UUID NOT NULL +); diff --git a/schema/crdb/12.0.0/up04.sql b/schema/crdb/12.0.0/up04.sql new file mode 100644 index 0000000000..cad33ddcf2 --- /dev/null +++ b/schema/crdb/12.0.0/up04.sql @@ -0,0 +1,8 @@ +/* + * Recreate index to support looking up a producer by its assigned oximeter + * collector ID. + */ +CREATE UNIQUE INDEX IF NOT EXISTS lookup_producer_by_oximeter ON omicron.public.metric_producer ( + oximeter_id, + id +); diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index a74cabfe6e..7bd83439e8 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1108,6 +1108,18 @@ CREATE TABLE IF NOT EXISTS omicron.public.oximeter ( port INT4 CHECK (port BETWEEN 0 AND 65535) NOT NULL ); +/* + * The kind of metric producer each record corresponds to. + */ +CREATE TYPE IF NOT EXISTS omicron.public.producer_kind AS ENUM ( + -- A sled agent for an entry in the sled table. + 'sled_agent', + -- A service in the omicron.public.service table + 'service', + -- A Propolis VMM for an instance in the omicron.public.instance table + 'instance' +); + /* * Information about registered metric producers. */ @@ -1115,6 +1127,7 @@ CREATE TABLE IF NOT EXISTS omicron.public.metric_producer ( id UUID PRIMARY KEY, time_created TIMESTAMPTZ NOT NULL, time_modified TIMESTAMPTZ NOT NULL, + kind omicron.public.producer_kind, ip INET NOT NULL, port INT4 CHECK (port BETWEEN 0 AND 65535) NOT NULL, interval FLOAT NOT NULL, @@ -2906,7 +2919,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '11.0.0', NULL) + ( TRUE, NOW(), NOW(), '12.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/sled-agent/src/sim/disk.rs b/sled-agent/src/sim/disk.rs index 0f08289b74..f131fd2bff 100644 --- a/sled-agent/src/sim/disk.rs +++ b/sled-agent/src/sim/disk.rs @@ -17,6 +17,7 @@ use omicron_common::api::external::Generation; use omicron_common::api::external::ResourceType; use omicron_common::api::internal::nexus::DiskRuntimeState; use omicron_common::api::internal::nexus::ProducerEndpoint; +use omicron_common::api::internal::nexus::ProducerKind; use oximeter_producer::LogConfig; use oximeter_producer::Server as ProducerServer; use propolis_client::types::DiskAttachmentState as PropolisDiskState; @@ -168,6 +169,7 @@ impl SimDisk { let producer_address = SocketAddr::new(Ipv6Addr::LOCALHOST.into(), 0); let server_info = ProducerEndpoint { id, + kind: Some(ProducerKind::SledAgent), address: producer_address, base_route: "/collect".to_string(), interval: Duration::from_millis(200), diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index 9826a987d4..cfa8c5d7ca 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -43,6 +43,7 @@ use omicron_common::address::{ }; use omicron_common::api::external::Vni; use omicron_common::api::internal::nexus::ProducerEndpoint; +use omicron_common::api::internal::nexus::ProducerKind; use omicron_common::api::internal::nexus::{ SledInstanceState, VmmRuntimeState, }; @@ -504,6 +505,7 @@ impl SledAgent { // Nexus. This should not block progress here. let endpoint = ProducerEndpoint { id: request.body.id, + kind: Some(ProducerKind::SledAgent), address: sled_address.into(), base_route: String::from("/metrics/collect"), interval: crate::metrics::METRIC_COLLECTION_INTERVAL, From e3e99ee62e8c2d78a02bed5ab8925e39a07dddfb Mon Sep 17 00:00:00 2001 From: Rain Date: Mon, 20 Nov 2023 17:01:04 -0800 Subject: [PATCH 27/56] [nexus-db-model] separate out SledUpdate from Sled (#4533) `Sled` consists of several columns that aren't controlled by sled-agent, and we end up in this weird place where we have `Sled` instances that don't reflect reality. I'm working on adding a `provision_state` column which is controlled by the operator, and again for which sled-agent doesn't know. Clean this up by defining a new struct, `SledUpdate`, which only contains the columns sled-agent knows about. The other columns get defaults when `into_insertable` is called. --- nexus/db-model/src/sled.rs | 133 +++++++++++++----- nexus/db-queries/src/db/datastore/mod.rs | 12 +- .../src/db/datastore/physical_disk.rs | 6 +- nexus/db-queries/src/db/datastore/rack.rs | 6 +- nexus/db-queries/src/db/datastore/sled.rs | 62 ++++---- nexus/src/app/sled.rs | 2 +- 6 files changed, 150 insertions(+), 71 deletions(-) diff --git a/nexus/db-model/src/sled.rs b/nexus/db-model/src/sled.rs index 5e059946ff..ba572901c6 100644 --- a/nexus/db-model/src/sled.rs +++ b/nexus/db-model/src/sled.rs @@ -62,38 +62,6 @@ pub struct Sled { } impl Sled { - pub fn new( - id: Uuid, - addr: SocketAddrV6, - baseboard: SledBaseboard, - hardware: SledSystemHardware, - rack_id: Uuid, - ) -> Self { - let last_used_address = { - let mut segments = addr.ip().segments(); - segments[7] += omicron_common::address::RSS_RESERVED_ADDRESSES; - ipv6::Ipv6Addr::from(Ipv6Addr::from(segments)) - }; - Self { - identity: SledIdentity::new(id), - time_deleted: None, - rcgen: Generation::new(), - rack_id, - is_scrimlet: hardware.is_scrimlet, - serial_number: baseboard.serial_number, - part_number: baseboard.part_number, - revision: baseboard.revision, - usable_hardware_threads: SqlU32::new( - hardware.usable_hardware_threads, - ), - usable_physical_ram: hardware.usable_physical_ram, - reservoir_size: hardware.reservoir_size, - ip: ipv6::Ipv6Addr::from(addr.ip()), - port: addr.port().into(), - last_used_address, - } - } - pub fn is_scrimlet(&self) -> bool { self.is_scrimlet } @@ -153,6 +121,107 @@ impl DatastoreCollectionConfig for Sled { type CollectionIdColumn = service::dsl::sled_id; } +/// Form of `Sled` used for updates from sled-agent. This is missing some +/// columns that are present in `Sled` because sled-agent doesn't control them. +#[derive(Debug, Clone)] +pub struct SledUpdate { + id: Uuid, + + pub rack_id: Uuid, + + is_scrimlet: bool, + serial_number: String, + part_number: String, + revision: i64, + + pub usable_hardware_threads: SqlU32, + pub usable_physical_ram: ByteCount, + pub reservoir_size: ByteCount, + + // ServiceAddress (Sled Agent). + pub ip: ipv6::Ipv6Addr, + pub port: SqlU16, +} + +impl SledUpdate { + pub fn new( + id: Uuid, + addr: SocketAddrV6, + baseboard: SledBaseboard, + hardware: SledSystemHardware, + rack_id: Uuid, + ) -> Self { + Self { + id, + rack_id, + is_scrimlet: hardware.is_scrimlet, + serial_number: baseboard.serial_number, + part_number: baseboard.part_number, + revision: baseboard.revision, + usable_hardware_threads: SqlU32::new( + hardware.usable_hardware_threads, + ), + usable_physical_ram: hardware.usable_physical_ram, + reservoir_size: hardware.reservoir_size, + ip: addr.ip().into(), + port: addr.port().into(), + } + } + + /// Converts self into a form used for inserts of new sleds into the + /// database. + /// + /// This form adds default values for fields that are not present in + /// `SledUpdate`. + pub fn into_insertable(self) -> Sled { + let last_used_address = { + let mut segments = self.ip().segments(); + segments[7] += omicron_common::address::RSS_RESERVED_ADDRESSES; + ipv6::Ipv6Addr::from(Ipv6Addr::from(segments)) + }; + Sled { + identity: SledIdentity::new(self.id), + rcgen: Generation::new(), + time_deleted: None, + rack_id: self.rack_id, + is_scrimlet: self.is_scrimlet, + serial_number: self.serial_number, + part_number: self.part_number, + revision: self.revision, + usable_hardware_threads: self.usable_hardware_threads, + usable_physical_ram: self.usable_physical_ram, + reservoir_size: self.reservoir_size, + ip: self.ip, + port: self.port, + last_used_address, + } + } + + pub fn id(&self) -> Uuid { + self.id + } + + pub fn is_scrimlet(&self) -> bool { + self.is_scrimlet + } + + pub fn ip(&self) -> Ipv6Addr { + self.ip.into() + } + + pub fn address(&self) -> SocketAddrV6 { + self.address_with_port(self.port.into()) + } + + pub fn address_with_port(&self, port: u16) -> SocketAddrV6 { + SocketAddrV6::new(self.ip(), port, 0, 0) + } + + pub fn serial_number(&self) -> &str { + &self.serial_number + } +} + /// A set of constraints that can be placed on operations that select a sled. #[derive(Debug)] pub struct SledReservationConstraints { diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 8be3386183..0612b960c9 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -371,8 +371,8 @@ mod test { use crate::db::model::{ BlockSize, ComponentUpdate, ComponentUpdateIdentity, ConsoleSession, Dataset, DatasetKind, ExternalIp, PhysicalDisk, PhysicalDiskKind, - Project, Rack, Region, Service, ServiceKind, SiloUser, Sled, - SledBaseboard, SledSystemHardware, SshKey, SystemUpdate, + Project, Rack, Region, Service, ServiceKind, SiloUser, SledBaseboard, + SledSystemHardware, SledUpdate, SshKey, SystemUpdate, UpdateableComponentType, VpcSubnet, Zpool, }; use crate::db::queries::vpc_subnet::FilterConflictingVpcSubnetRangesQuery; @@ -599,14 +599,14 @@ mod test { let rack_id = Uuid::new_v4(); let sled_id = Uuid::new_v4(); - let sled = Sled::new( + let sled_update = SledUpdate::new( sled_id, bogus_addr, sled_baseboard_for_test(), sled_system_hardware_for_test(), rack_id, ); - datastore.sled_upsert(sled).await.unwrap(); + datastore.sled_upsert(sled_update).await.unwrap(); sled_id } @@ -1205,7 +1205,7 @@ mod test { let rack_id = Uuid::new_v4(); let addr1 = "[fd00:1de::1]:12345".parse().unwrap(); let sled1_id = "0de4b299-e0b4-46f0-d528-85de81a7095f".parse().unwrap(); - let sled1 = db::model::Sled::new( + let sled1 = db::model::SledUpdate::new( sled1_id, addr1, sled_baseboard_for_test(), @@ -1216,7 +1216,7 @@ mod test { let addr2 = "[fd00:1df::1]:12345".parse().unwrap(); let sled2_id = "66285c18-0c79-43e0-e54f-95271f271314".parse().unwrap(); - let sled2 = db::model::Sled::new( + let sled2 = db::model::SledUpdate::new( sled2_id, addr2, sled_baseboard_for_test(), diff --git a/nexus/db-queries/src/db/datastore/physical_disk.rs b/nexus/db-queries/src/db/datastore/physical_disk.rs index 3c83b91d21..ecb583ee29 100644 --- a/nexus/db-queries/src/db/datastore/physical_disk.rs +++ b/nexus/db-queries/src/db/datastore/physical_disk.rs @@ -141,7 +141,7 @@ mod test { use crate::db::datastore::test::{ sled_baseboard_for_test, sled_system_hardware_for_test, }; - use crate::db::model::{PhysicalDiskKind, Sled}; + use crate::db::model::{PhysicalDiskKind, Sled, SledUpdate}; use dropshot::PaginationOrder; use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Asset; @@ -153,14 +153,14 @@ mod test { let sled_id = Uuid::new_v4(); let addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0); let rack_id = Uuid::new_v4(); - let sled = Sled::new( + let sled_update = SledUpdate::new( sled_id, addr, sled_baseboard_for_test(), sled_system_hardware_for_test(), rack_id, ); - db.sled_upsert(sled) + db.sled_upsert(sled_update) .await .expect("Could not upsert sled during test prep") } diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index ae982d86f8..2cc5880470 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -680,7 +680,7 @@ mod test { use crate::db::model::Sled; use async_bb8_diesel::AsyncSimpleConnection; use internal_params::DnsRecord; - use nexus_db_model::{DnsGroup, InitialDnsGroup}; + use nexus_db_model::{DnsGroup, InitialDnsGroup, SledUpdate}; use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::shared::SiloIdentityMode; use nexus_types::identity::Asset; @@ -870,14 +870,14 @@ mod test { async fn create_test_sled(db: &DataStore) -> Sled { let sled_id = Uuid::new_v4(); let addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0); - let sled = Sled::new( + let sled_update = SledUpdate::new( sled_id, addr, sled_baseboard_for_test(), sled_system_hardware_for_test(), rack_id(), ); - db.sled_upsert(sled) + db.sled_upsert(sled_update) .await .expect("Could not upsert sled during test prep") } diff --git a/nexus/db-queries/src/db/datastore/sled.rs b/nexus/db-queries/src/db/datastore/sled.rs index f4f5188057..130c36b496 100644 --- a/nexus/db-queries/src/db/datastore/sled.rs +++ b/nexus/db-queries/src/db/datastore/sled.rs @@ -11,9 +11,9 @@ use crate::db; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; -use crate::db::identity::Asset; use crate::db::model::Sled; use crate::db::model::SledResource; +use crate::db::model::SledUpdate; use crate::db::pagination::paginated; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; @@ -29,21 +29,25 @@ use uuid::Uuid; impl DataStore { /// Stores a new sled in the database. - pub async fn sled_upsert(&self, sled: Sled) -> CreateResult { + pub async fn sled_upsert( + &self, + sled_update: SledUpdate, + ) -> CreateResult { use db::schema::sled::dsl; diesel::insert_into(dsl::sled) - .values(sled.clone()) + .values(sled_update.clone().into_insertable()) .on_conflict(dsl::id) .do_update() .set(( dsl::time_modified.eq(Utc::now()), - dsl::ip.eq(sled.ip), - dsl::port.eq(sled.port), - dsl::rack_id.eq(sled.rack_id), - dsl::is_scrimlet.eq(sled.is_scrimlet()), - dsl::usable_hardware_threads.eq(sled.usable_hardware_threads), - dsl::usable_physical_ram.eq(sled.usable_physical_ram), - dsl::reservoir_size.eq(sled.reservoir_size), + dsl::ip.eq(sled_update.ip), + dsl::port.eq(sled_update.port), + dsl::rack_id.eq(sled_update.rack_id), + dsl::is_scrimlet.eq(sled_update.is_scrimlet()), + dsl::usable_hardware_threads + .eq(sled_update.usable_hardware_threads), + dsl::usable_physical_ram.eq(sled_update.usable_physical_ram), + dsl::reservoir_size.eq(sled_update.reservoir_size), )) .returning(Sled::as_returning()) .get_result_async(&*self.pool_connection_unauthorized().await?) @@ -53,7 +57,7 @@ impl DataStore { e, ErrorHandler::Conflict( ResourceType::Sled, - &sled.id().to_string(), + &sled_update.id().to_string(), ), ) }) @@ -241,7 +245,7 @@ mod test { let sled_id = Uuid::new_v4(); let addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0); - let mut sled = Sled::new( + let mut sled_update = SledUpdate::new( sled_id, addr, sled_baseboard_for_test(), @@ -249,44 +253,50 @@ mod test { rack_id(), ); let observed_sled = datastore - .sled_upsert(sled.clone()) + .sled_upsert(sled_update.clone()) .await .expect("Could not upsert sled during test prep"); assert_eq!( observed_sled.usable_hardware_threads, - sled.usable_hardware_threads + sled_update.usable_hardware_threads + ); + assert_eq!( + observed_sled.usable_physical_ram, + sled_update.usable_physical_ram ); - assert_eq!(observed_sled.usable_physical_ram, sled.usable_physical_ram); - assert_eq!(observed_sled.reservoir_size, sled.reservoir_size); + assert_eq!(observed_sled.reservoir_size, sled_update.reservoir_size); // Modify the sizes of hardware - sled.usable_hardware_threads = - SqlU32::new(sled.usable_hardware_threads.0 + 1); + sled_update.usable_hardware_threads = + SqlU32::new(sled_update.usable_hardware_threads.0 + 1); const MIB: u64 = 1024 * 1024; - sled.usable_physical_ram = ByteCount::from( + sled_update.usable_physical_ram = ByteCount::from( external::ByteCount::try_from( - sled.usable_physical_ram.0.to_bytes() + MIB, + sled_update.usable_physical_ram.0.to_bytes() + MIB, ) .unwrap(), ); - sled.reservoir_size = ByteCount::from( + sled_update.reservoir_size = ByteCount::from( external::ByteCount::try_from( - sled.reservoir_size.0.to_bytes() + MIB, + sled_update.reservoir_size.0.to_bytes() + MIB, ) .unwrap(), ); // Test that upserting the sled propagates those changes to the DB. let observed_sled = datastore - .sled_upsert(sled.clone()) + .sled_upsert(sled_update.clone()) .await .expect("Could not upsert sled during test prep"); assert_eq!( observed_sled.usable_hardware_threads, - sled.usable_hardware_threads + sled_update.usable_hardware_threads + ); + assert_eq!( + observed_sled.usable_physical_ram, + sled_update.usable_physical_ram ); - assert_eq!(observed_sled.usable_physical_ram, sled.usable_physical_ram); - assert_eq!(observed_sled.reservoir_size, sled.reservoir_size); + assert_eq!(observed_sled.reservoir_size, sled_update.reservoir_size); db.cleanup().await.unwrap(); logctx.cleanup_successful(); diff --git a/nexus/src/app/sled.rs b/nexus/src/app/sled.rs index da89e7e25a..8189c0a93d 100644 --- a/nexus/src/app/sled.rs +++ b/nexus/src/app/sled.rs @@ -51,7 +51,7 @@ impl super::Nexus { SledRole::Scrimlet => true, }; - let sled = db::model::Sled::new( + let sled = db::model::SledUpdate::new( id, info.sa_address, db::model::SledBaseboard { From 39512b7966eed7032d584302c94e507776bacc5d Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 21 Nov 2023 05:24:41 +0000 Subject: [PATCH 28/56] Update taiki-e/install-action digest to 8f354f3 (#4537) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`ccc14bd` -> `8f354f3`](https://togithub.com/taiki-e/install-action/compare/ccc14bd...8f354f3) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index cc67b91fce..d79c836fba 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@ccc14bdc8d34cddf54e4f9fb2da0c208427207a3 # v2 + uses: taiki-e/install-action@8f354f35e51028c902e8ab954045e37739acf562 # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From aee9602d50d96d168a3308a661a3ad2b5c5c64c2 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 22:15:41 -0800 Subject: [PATCH 29/56] Update Rust crate fs-err to 2.11.0 (#4538) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c8cfe908c1..7c30892c8c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2291,9 +2291,9 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "fs-err" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5fd9bcbe8b1087cbd395b51498c01bc997cef73e778a80b77a811af5e2d29f" +checksum = "88a41f105fe1d5b6b34b2055e3dc59bb79b46b48b2040b9e6c7b4b5de097aa41" dependencies = [ "autocfg", ] diff --git a/Cargo.toml b/Cargo.toml index b18b20aec7..7aa6482bf2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -193,7 +193,7 @@ filetime = "0.2.22" flate2 = "1.0.28" flume = "0.11.0" foreign-types = "0.3.2" -fs-err = "2.10.0" +fs-err = "2.11.0" futures = "0.3.29" gateway-client = { path = "clients/gateway-client" } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "2739c18e80697aa6bc235c935176d14b4d757ee9", default-features = false, features = ["std"] } From 74120386aca7de0f9f6d41428cc21e543d795e37 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 21 Nov 2023 06:51:31 +0000 Subject: [PATCH 30/56] Update Rust crate rpassword to 7.3.1 (#4539) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 6 +++--- Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7c30892c8c..82dd13e0ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6646,13 +6646,13 @@ dependencies = [ [[package]] name = "rpassword" -version = "7.2.0" +version = "7.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6678cf63ab3491898c0d021b493c94c9b221d91295294a2a5746eacbe5928322" +checksum = "80472be3c897911d0137b2d2b9055faf6eeac5b14e324073d83bc17b191d7e3f" dependencies = [ "libc", "rtoolbox", - "winapi", + "windows-sys 0.48.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 7aa6482bf2..0d0bf07abf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -304,7 +304,7 @@ regex = "1.10.2" regress = "0.7.1" reqwest = { version = "0.11", default-features = false } ring = "0.16" -rpassword = "7.2.0" +rpassword = "7.3.1" rstest = "0.18.2" rustfmt-wrapper = "0.2" rustls = "0.21.9" From 837f646a0f04d821af46384608a346487549e5d3 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 21 Nov 2023 00:53:06 -0800 Subject: [PATCH 31/56] Update Rust crate tokio to 1.34.0 (#4540) --- Cargo.lock | 24 ++++++++++++------------ Cargo.toml | 2 +- workspace-hack/Cargo.toml | 20 ++++++++++---------- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 82dd13e0ab..cc2daa0e01 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2501,7 +2501,7 @@ dependencies = [ "serde", "serde-big-array 0.5.1", "slog", - "socket2 0.5.4", + "socket2 0.5.5", "string_cache", "thiserror", "tlvc 0.3.1 (git+https://github.com/oxidecomputer/tlvc.git?branch=main)", @@ -3354,7 +3354,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.4", + "socket2 0.5.5", "widestring", "windows-sys 0.48.0", "winreg", @@ -3835,9 +3835,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" dependencies = [ "libc", "log", @@ -7778,9 +7778,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", "windows-sys 0.48.0", @@ -8435,9 +8435,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.33.0" +version = "1.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" +checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" dependencies = [ "backtrace", "bytes", @@ -8447,16 +8447,16 @@ dependencies = [ "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.4", + "socket2 0.5.5", "tokio-macros", "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", @@ -8493,7 +8493,7 @@ dependencies = [ "postgres-protocol", "postgres-types", "rand 0.8.5", - "socket2 0.5.4", + "socket2 0.5.5", "tokio", "tokio-util", "whoami", diff --git a/Cargo.toml b/Cargo.toml index 0d0bf07abf..881bbf1cee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -363,7 +363,7 @@ textwrap = "0.16.0" test-strategy = "0.3.1" thiserror = "1.0" tofino = { git = "http://github.com/oxidecomputer/tofino", branch = "main" } -tokio = "1.33.0" +tokio = "1.34.0" tokio-postgres = { version = "0.7", features = [ "with-chrono-0_4", "with-uuid-1" ] } tokio-stream = "0.1.14" tokio-tungstenite = "0.18" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 47ea83f8f2..7aad62ee38 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -92,7 +92,7 @@ subtle = { version = "2.5.0" } syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.32", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } time = { version = "0.3.27", features = ["formatting", "local-offset", "macros", "parsing"] } -tokio = { version = "1.33.0", features = ["full", "test-util"] } +tokio = { version = "1.34.0", features = ["full", "test-util"] } tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } tokio-stream = { version = "0.1.14", features = ["net"] } tokio-util = { version = "0.7.10", features = ["codec", "io-util"] } @@ -188,7 +188,7 @@ syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extr syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.32", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } time = { version = "0.3.27", features = ["formatting", "local-offset", "macros", "parsing"] } time-macros = { version = "0.2.13", default-features = false, features = ["formatting", "parsing"] } -tokio = { version = "1.33.0", features = ["full", "test-util"] } +tokio = { version = "1.34.0", features = ["full", "test-util"] } tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } tokio-stream = { version = "0.1.14", features = ["net"] } tokio-util = { version = "0.7.10", features = ["codec", "io-util"] } @@ -207,49 +207,49 @@ zip = { version = "0.6.6", default-features = false, features = ["bzip2", "defla [target.x86_64-unknown-linux-gnu.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } -mio = { version = "0.8.8", features = ["net", "os-ext"] } +mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.x86_64-unknown-linux-gnu.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } -mio = { version = "0.8.8", features = ["net", "os-ext"] } +mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.x86_64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } -mio = { version = "0.8.8", features = ["net", "os-ext"] } +mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.x86_64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } -mio = { version = "0.8.8", features = ["net", "os-ext"] } +mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.aarch64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } -mio = { version = "0.8.8", features = ["net", "os-ext"] } +mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.aarch64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } -mio = { version = "0.8.8", features = ["net", "os-ext"] } +mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } rustix = { version = "0.38.9", features = ["fs", "termios"] } [target.x86_64-unknown-illumos.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } -mio = { version = "0.8.8", features = ["net", "os-ext"] } +mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } rustix = { version = "0.38.9", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } @@ -258,7 +258,7 @@ toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", featu [target.x86_64-unknown-illumos.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } -mio = { version = "0.8.8", features = ["net", "os-ext"] } +mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } rustix = { version = "0.38.9", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } From 828021fc023460a7be9ad628ce5ff672b672e461 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 21 Nov 2023 00:53:40 -0800 Subject: [PATCH 32/56] Update Rust crate uuid to 1.6.1 (#4541) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cc2daa0e01..b324f4919b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9206,9 +9206,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.5.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ad59a7560b41a70d191093a945f0b87bc1deeda46fb237479708a1d6b6cdfc" +checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" dependencies = [ "getrandom 0.2.10", "serde", diff --git a/Cargo.toml b/Cargo.toml index 881bbf1cee..fb220ba53d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -382,7 +382,7 @@ tufaceous-lib = { path = "tufaceous-lib" } unicode-width = "0.1.11" update-engine = { path = "update-engine" } usdt = "0.3" -uuid = { version = "1.5.0", features = ["serde", "v4"] } +uuid = { version = "1.6.1", features = ["serde", "v4"] } walkdir = "2.4" wicket = { path = "wicket" } wicket-common = { path = "wicket-common" } diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 7aad62ee38..1a289bd0cb 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -103,7 +103,7 @@ trust-dns-proto = { version = "0.22.0" } unicode-bidi = { version = "0.3.13" } unicode-normalization = { version = "0.1.22" } usdt = { version = "0.3.5" } -uuid = { version = "1.5.0", features = ["serde", "v4"] } +uuid = { version = "1.6.1", features = ["serde", "v4"] } yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } zeroize = { version = "1.6.0", features = ["std", "zeroize_derive"] } zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } @@ -199,7 +199,7 @@ trust-dns-proto = { version = "0.22.0" } unicode-bidi = { version = "0.3.13" } unicode-normalization = { version = "0.1.22" } usdt = { version = "0.3.5" } -uuid = { version = "1.5.0", features = ["serde", "v4"] } +uuid = { version = "1.6.1", features = ["serde", "v4"] } yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } zeroize = { version = "1.6.0", features = ["std", "zeroize_derive"] } zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } From 745eac2d0dba169824c11d57be681f1ec4f2ccf4 Mon Sep 17 00:00:00 2001 From: Ryan Goodfellow Date: Tue, 21 Nov 2023 07:57:49 -0800 Subject: [PATCH 33/56] Fix #4509 and #4512 (#4528) --- nexus/db-model/src/schema.rs | 5 + .../src/db/datastore/switch_port.rs | 130 +++- nexus/src/app/sagas/mod.rs | 1 + .../app/sagas/switch_port_settings_apply.rs | 646 ++---------------- .../app/sagas/switch_port_settings_clear.rs | 51 +- .../app/sagas/switch_port_settings_common.rs | 577 ++++++++++++++++ nexus/tests/integration_tests/switch_port.rs | 15 + 7 files changed, 827 insertions(+), 598 deletions(-) create mode 100644 nexus/src/app/sagas/switch_port_settings_common.rs diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index e7d625e854..960b53873a 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1329,3 +1329,8 @@ allow_tables_to_appear_in_same_query!( switch_port, switch_port_settings_route_config ); + +allow_tables_to_appear_in_same_query!( + switch_port, + switch_port_settings_bgp_peer_config +); diff --git a/nexus/db-queries/src/db/datastore/switch_port.rs b/nexus/db-queries/src/db/datastore/switch_port.rs index f301750ee9..d7319347f0 100644 --- a/nexus/db-queries/src/db/datastore/switch_port.rs +++ b/nexus/db-queries/src/db/datastore/switch_port.rs @@ -23,8 +23,8 @@ use crate::db::pagination::paginated; use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; use diesel::result::Error as DieselError; use diesel::{ - ExpressionMethods, JoinOnDsl, NullableExpressionMethods, QueryDsl, - SelectableHelper, + CombineDsl, ExpressionMethods, JoinOnDsl, NullableExpressionMethods, + QueryDsl, SelectableHelper, }; use nexus_types::external_api::params; use omicron_common::api::external::http_pagination::PaginatedBy; @@ -1110,6 +1110,7 @@ impl DataStore { ) -> ListResultVec { use db::schema::{ switch_port::dsl as switch_port_dsl, + switch_port_settings_bgp_peer_config::dsl as bgp_peer_config_dsl, switch_port_settings_route_config::dsl as route_config_dsl, }; @@ -1126,6 +1127,18 @@ impl DataStore { // pagination in the future, or maybe a way to constrain the query to // a rack? .limit(64) + .union( + switch_port_dsl::switch_port + .filter(switch_port_dsl::port_settings_id.is_not_null()) + .inner_join( + bgp_peer_config_dsl::switch_port_settings_bgp_peer_config + .on(switch_port_dsl::port_settings_id + .eq(bgp_peer_config_dsl::port_settings_id.nullable()), + ), + ) + .select(SwitchPort::as_select()) + .limit(64), + ) .load_async::( &*self.pool_connection_authorized(opctx).await?, ) @@ -1133,3 +1146,116 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } + +#[cfg(test)] +mod test { + use crate::db::datastore::{datastore_test, UpdatePrecondition}; + use nexus_test_utils::db::test_setup_database; + use nexus_types::external_api::params::{ + BgpAnnounceSetCreate, BgpConfigCreate, BgpPeerConfig, SwitchPortConfig, + SwitchPortGeometry, SwitchPortSettingsCreate, + }; + use omicron_common::api::external::{ + IdentityMetadataCreateParams, Name, NameOrId, + }; + use omicron_test_utils::dev; + use std::collections::HashMap; + use uuid::Uuid; + + #[tokio::test] + async fn test_bgp_boundary_switches() { + let logctx = dev::test_setup_log("test_bgp_boundary_switches"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let rack_id: Uuid = + nexus_test_utils::RACK_UUID.parse().expect("parse uuid"); + let switch0: Name = "switch0".parse().expect("parse switch location"); + let qsfp0: Name = "qsfp0".parse().expect("parse qsfp0"); + + let port_result = datastore + .switch_port_create(&opctx, rack_id, switch0.into(), qsfp0.into()) + .await + .expect("switch port create"); + + let announce_set = BgpAnnounceSetCreate { + identity: IdentityMetadataCreateParams { + name: "test-announce-set".parse().unwrap(), + description: "test bgp announce set".into(), + }, + announcement: Vec::new(), + }; + + datastore.bgp_create_announce_set(&opctx, &announce_set).await.unwrap(); + + let bgp_config = BgpConfigCreate { + identity: IdentityMetadataCreateParams { + name: "test-bgp-config".parse().unwrap(), + description: "test bgp config".into(), + }, + asn: 47, + bgp_announce_set_id: NameOrId::Name( + "test-announce-set".parse().unwrap(), + ), + vrf: None, + }; + + datastore.bgp_config_set(&opctx, &bgp_config).await.unwrap(); + + let settings = SwitchPortSettingsCreate { + identity: IdentityMetadataCreateParams { + name: "test-settings".parse().unwrap(), + description: "test settings".into(), + }, + port_config: SwitchPortConfig { + geometry: SwitchPortGeometry::Qsfp28x1, + }, + groups: Vec::new(), + links: HashMap::new(), + interfaces: HashMap::new(), + routes: HashMap::new(), + bgp_peers: HashMap::from([( + "phy0".into(), + BgpPeerConfig { + bgp_announce_set: NameOrId::Name( + "test-announce-set".parse().unwrap(), + ), + bgp_config: NameOrId::Name( + "test-bgp-config".parse().unwrap(), + ), + interface_name: "qsfp0".into(), + addr: "192.168.1.1".parse().unwrap(), + hold_time: 0, + idle_hold_time: 0, + delay_open: 0, + connect_retry: 0, + keepalive: 0, + }, + )]), + addresses: HashMap::new(), + }; + + let settings_result = datastore + .switch_port_settings_create(&opctx, &settings, None) + .await + .unwrap(); + + datastore + .switch_port_set_settings_id( + &opctx, + port_result.id, + Some(settings_result.settings.identity.id), + UpdatePrecondition::DontCare, + ) + .await + .unwrap(); + + let uplink_ports = + datastore.switch_ports_with_uplinks(&opctx).await.unwrap(); + + assert_eq!(uplink_ports.len(), 1); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } +} diff --git a/nexus/src/app/sagas/mod.rs b/nexus/src/app/sagas/mod.rs index 5b1843be3d..89e1a10052 100644 --- a/nexus/src/app/sagas/mod.rs +++ b/nexus/src/app/sagas/mod.rs @@ -36,6 +36,7 @@ pub mod snapshot_create; pub mod snapshot_delete; pub mod switch_port_settings_apply; pub mod switch_port_settings_clear; +pub mod switch_port_settings_common; pub mod test_saga; pub mod volume_delete; pub mod volume_remove_rop; diff --git a/nexus/src/app/sagas/switch_port_settings_apply.rs b/nexus/src/app/sagas/switch_port_settings_apply.rs index 0c06d6ff83..aba62b6937 100644 --- a/nexus/src/app/sagas/switch_port_settings_apply.rs +++ b/nexus/src/app/sagas/switch_port_settings_apply.rs @@ -3,53 +3,32 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use super::{NexusActionContext, NEXUS_DPD_TAG}; -use crate::app::map_switch_zone_addrs; use crate::app::sagas::retry_until_known_result; +use crate::app::sagas::switch_port_settings_common::{ + api_to_dpd_port_settings, ensure_switch_port_bgp_settings, + ensure_switch_port_uplink, select_mg_client, switch_sled_agent, + write_bootstore_config, +}; use crate::app::sagas::{ declare_saga_actions, ActionRegistry, NexusSaga, SagaInitError, }; -use crate::Nexus; use anyhow::Error; use db::datastore::SwitchPortSettingsCombinedResult; -use dpd_client::types::{ - LinkCreate, LinkId, LinkSettings, PortFec, PortId, PortSettings, PortSpeed, - RouteSettingsV4, RouteSettingsV6, -}; -use dpd_client::{Ipv4Cidr, Ipv6Cidr}; -use internal_dns::ServiceName; -use ipnetwork::IpNetwork; -use mg_admin_client::types::Prefix4; -use mg_admin_client::types::{ApplyRequest, BgpPeerConfig}; -use nexus_db_model::{SwitchLinkFec, SwitchLinkSpeed, NETWORK_KEY}; -use nexus_db_queries::context::OpContext; +use dpd_client::types::PortId; +use nexus_db_model::NETWORK_KEY; use nexus_db_queries::db::datastore::UpdatePrecondition; use nexus_db_queries::{authn, db}; -use nexus_types::external_api::params; -use omicron_common::address::SLED_AGENT_PORT; use omicron_common::api::external::{self, NameOrId}; use omicron_common::api::internal::shared::{ ParseSwitchLocationError, SwitchLocation, }; use serde::{Deserialize, Serialize}; -use sled_agent_client::types::PortConfigV1; -use sled_agent_client::types::RouteConfig; -use sled_agent_client::types::{BgpConfig, EarlyNetworkConfig}; -use sled_agent_client::types::{ - BgpPeerConfig as OmicronBgpPeerConfig, HostPortConfig, -}; -use std::collections::HashMap; -use std::net::SocketAddrV6; -use std::net::{IpAddr, Ipv6Addr}; +use std::net::IpAddr; use std::str::FromStr; use std::sync::Arc; use steno::ActionError; use uuid::Uuid; -// This is more of an implementation detail of the BGP implementation. It -// defines the maximum time the peering engine will wait for external messages -// before breaking to check for shutdown conditions. -const BGP_SESSION_RESOLUTION: u64 = 100; - // switch port settings apply saga: input parameters #[derive(Debug, Deserialize, Serialize)] @@ -176,91 +155,6 @@ async fn spa_get_switch_port_settings( Ok(port_settings) } -pub(crate) fn api_to_dpd_port_settings( - settings: &SwitchPortSettingsCombinedResult, -) -> Result { - let mut dpd_port_settings = PortSettings { - links: HashMap::new(), - v4_routes: HashMap::new(), - v6_routes: HashMap::new(), - }; - - //TODO breakouts - let link_id = LinkId(0); - - for l in settings.links.iter() { - dpd_port_settings.links.insert( - link_id.to_string(), - LinkSettings { - params: LinkCreate { - autoneg: false, - lane: Some(LinkId(0)), - kr: false, - fec: match l.fec { - SwitchLinkFec::Firecode => PortFec::Firecode, - SwitchLinkFec::Rs => PortFec::Rs, - SwitchLinkFec::None => PortFec::None, - }, - speed: match l.speed { - SwitchLinkSpeed::Speed0G => PortSpeed::Speed0G, - SwitchLinkSpeed::Speed1G => PortSpeed::Speed1G, - SwitchLinkSpeed::Speed10G => PortSpeed::Speed10G, - SwitchLinkSpeed::Speed25G => PortSpeed::Speed25G, - SwitchLinkSpeed::Speed40G => PortSpeed::Speed40G, - SwitchLinkSpeed::Speed50G => PortSpeed::Speed50G, - SwitchLinkSpeed::Speed100G => PortSpeed::Speed100G, - SwitchLinkSpeed::Speed200G => PortSpeed::Speed200G, - SwitchLinkSpeed::Speed400G => PortSpeed::Speed400G, - }, - }, - //TODO won't work for breakouts - addrs: settings - .addresses - .iter() - .map(|a| a.address.ip()) - .collect(), - }, - ); - } - - for r in &settings.routes { - match &r.dst { - IpNetwork::V4(n) => { - let gw = match r.gw.ip() { - IpAddr::V4(gw) => gw, - IpAddr::V6(_) => { - return Err( - "IPv4 destination cannot have IPv6 nexthop".into() - ) - } - }; - dpd_port_settings.v4_routes.insert( - Ipv4Cidr { prefix: n.ip(), prefix_len: n.prefix() } - .to_string(), - vec![RouteSettingsV4 { link_id: link_id.0, nexthop: gw }], - ); - } - IpNetwork::V6(n) => { - let gw = match r.gw.ip() { - IpAddr::V6(gw) => gw, - IpAddr::V4(_) => { - return Err( - "IPv6 destination cannot have IPv4 nexthop".into() - ) - } - }; - dpd_port_settings.v6_routes.insert( - Ipv6Cidr { prefix: n.ip(), prefix_len: n.prefix() } - .to_string(), - vec![RouteSettingsV6 { link_id: link_id.0, nexthop: gw }], - ); - } - } - } - - Ok(dpd_port_settings) -} - async fn spa_ensure_switch_port_settings( sagactx: NexusActionContext, ) -> Result<(), ActionError> { @@ -380,101 +274,6 @@ async fn spa_undo_ensure_switch_port_settings( Ok(()) } -async fn spa_ensure_switch_port_bgp_settings( - sagactx: NexusActionContext, -) -> Result<(), ActionError> { - let settings = sagactx - .lookup::("switch_port_settings") - .map_err(|e| { - ActionError::action_failed(format!( - "lookup switch port settings: {e}" - )) - })?; - - ensure_switch_port_bgp_settings(sagactx, settings).await -} - -pub(crate) async fn ensure_switch_port_bgp_settings( - sagactx: NexusActionContext, - settings: SwitchPortSettingsCombinedResult, -) -> Result<(), ActionError> { - let osagactx = sagactx.user_data(); - let nexus = osagactx.nexus(); - let params = sagactx.saga_params::()?; - - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); - let mg_client: Arc = - select_mg_client(&sagactx).await.map_err(|e| { - ActionError::action_failed(format!("select mg client: {e}")) - })?; - - let mut bgp_peer_configs = Vec::new(); - - for peer in settings.bgp_peers { - let config = nexus - .bgp_config_get(&opctx, peer.bgp_config_id.into()) - .await - .map_err(|e| { - ActionError::action_failed(format!("get bgp config: {e}")) - })?; - - let announcements = nexus - .bgp_announce_list( - &opctx, - ¶ms::BgpAnnounceSetSelector { - name_or_id: NameOrId::Id(config.bgp_announce_set_id), - }, - ) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "get bgp announcements: {e}" - )) - })?; - - let mut prefixes = Vec::new(); - for a in &announcements { - let value = match a.network.ip() { - IpAddr::V4(value) => Ok(value), - IpAddr::V6(_) => Err(ActionError::action_failed( - "IPv6 announcement not yet supported".to_string(), - )), - }?; - prefixes.push(Prefix4 { value, length: a.network.prefix() }); - } - - let bpc = BgpPeerConfig { - asn: *config.asn, - name: format!("{}", peer.addr.ip()), //TODO user defined name? - host: format!("{}:179", peer.addr.ip()), - hold_time: peer.hold_time.0.into(), - idle_hold_time: peer.idle_hold_time.0.into(), - delay_open: peer.delay_open.0.into(), - connect_retry: peer.connect_retry.0.into(), - keepalive: peer.keepalive.0.into(), - resolution: BGP_SESSION_RESOLUTION, - originate: prefixes, - }; - - bgp_peer_configs.push(bpc); - } - - mg_client - .inner - .bgp_apply(&ApplyRequest { - peer_group: params.switch_port_name.clone(), - peers: bgp_peer_configs, - }) - .await - .map_err(|e| { - ActionError::action_failed(format!("apply bgp settings: {e}")) - })?; - - Ok(()) -} async fn spa_undo_ensure_switch_port_bgp_settings( sagactx: NexusActionContext, ) -> Result<(), Error> { @@ -497,9 +296,13 @@ async fn spa_undo_ensure_switch_port_bgp_settings( })?; let mg_client: Arc = - select_mg_client(&sagactx).await.map_err(|e| { - ActionError::action_failed(format!("select mg client (undo): {e}")) - })?; + select_mg_client(&sagactx, &opctx, params.switch_port_id) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "select mg client (undo): {e}" + )) + })?; for peer in settings.bgp_peers { let config = nexus @@ -592,96 +395,39 @@ async fn spa_undo_ensure_switch_port_bootstore_network_settings( async fn spa_ensure_switch_port_uplink( sagactx: NexusActionContext, ) -> Result<(), ActionError> { - ensure_switch_port_uplink(sagactx, false, None).await + let params = sagactx.saga_params::()?; + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + ensure_switch_port_uplink( + sagactx, + &opctx, + false, + None, + params.switch_port_id, + params.switch_port_name, + ) + .await } async fn spa_undo_ensure_switch_port_uplink( sagactx: NexusActionContext, ) -> Result<(), Error> { - Ok(ensure_switch_port_uplink(sagactx, true, None).await?) -} - -pub(crate) async fn ensure_switch_port_uplink( - sagactx: NexusActionContext, - skip_self: bool, - inject: Option, -) -> Result<(), ActionError> { let params = sagactx.saga_params::()?; - let opctx = crate::context::op_context_for_saga_action( &sagactx, ¶ms.serialized_authn, ); - let osagactx = sagactx.user_data(); - let nexus = osagactx.nexus(); - - let switch_port = nexus - .get_switch_port(&opctx, params.switch_port_id) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "get switch port for uplink: {e}" - )) - })?; - - let switch_location: SwitchLocation = - switch_port.switch_location.parse().map_err(|e| { - ActionError::action_failed(format!( - "get switch location for uplink: {e:?}", - )) - })?; - - let mut uplinks: Vec = Vec::new(); - - // The sled agent uplinks interface is an all or nothing interface, so we - // need to get all the uplink configs for all the ports. - let active_ports = - nexus.active_port_settings(&opctx).await.map_err(|e| { - ActionError::action_failed(format!( - "get active switch port settings: {e}" - )) - })?; - - for (port, info) in &active_ports { - // Since we are undoing establishing uplinks for the settings - // associated with this port we skip adding this ports uplinks - // to the list - effectively removing them. - if skip_self && port.id == switch_port.id { - continue; - } - uplinks.push(HostPortConfig { - port: port.port_name.clone(), - addrs: info.addresses.iter().map(|a| a.address).collect(), - }) - } - - if let Some(id) = inject { - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); - let settings = nexus - .switch_port_settings_get(&opctx, &id.into()) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "get switch port settings for injection: {e}" - )) - })?; - uplinks.push(HostPortConfig { - port: params.switch_port_name.clone(), - addrs: settings.addresses.iter().map(|a| a.address).collect(), - }) - } - - let sc = switch_sled_agent(switch_location, &sagactx).await?; - sc.uplink_ensure(&sled_agent_client::types::SwitchPorts { uplinks }) - .await - .map_err(|e| { - ActionError::action_failed(format!("ensure uplink: {e}")) - })?; - - Ok(()) + Ok(ensure_switch_port_uplink( + sagactx, + &opctx, + true, + None, + params.switch_port_id, + params.switch_port_name, + ) + .await?) } // a common route representation for dendrite and port settings @@ -767,307 +513,29 @@ pub(crate) async fn select_dendrite_client( Ok(dpd_client) } -pub(crate) async fn select_mg_client( - sagactx: &NexusActionContext, -) -> Result, ActionError> { - let osagactx = sagactx.user_data(); - let params = sagactx.saga_params::()?; - let nexus = osagactx.nexus(); - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); - - let switch_port = nexus - .get_switch_port(&opctx, params.switch_port_id) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "get switch port for mg client selection: {e}" - )) - })?; - - let switch_location: SwitchLocation = - switch_port.switch_location.parse().map_err( - |e: ParseSwitchLocationError| { - ActionError::action_failed(format!( - "get switch location for uplink: {e:?}", - )) - }, - )?; - - let mg_client: Arc = osagactx - .nexus() - .mg_clients - .get(&switch_location) - .ok_or_else(|| { - ActionError::action_failed(format!( - "requested switch not available: {switch_location}" - )) - })? - .clone(); - Ok(mg_client) -} - -pub(crate) async fn get_scrimlet_address( - location: SwitchLocation, - nexus: &Arc, -) -> Result { - /* TODO this depends on DNS entries only coming from RSS, it's broken - on the upgrade path - nexus - .resolver() - .await - .lookup_socket_v6(ServiceName::Scrimlet(location)) - .await - .map_err(|e| e.to_string()) - .map_err(|e| { - ActionError::action_failed(format!( - "scrimlet dns lookup failed {e}", - )) - }) - */ - let result = nexus - .resolver() - .await - .lookup_all_ipv6(ServiceName::Dendrite) - .await +async fn spa_ensure_switch_port_bgp_settings( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let settings = sagactx + .lookup::("switch_port_settings") .map_err(|e| { ActionError::action_failed(format!( - "scrimlet dns lookup failed {e}", - )) - }); - - let mappings = match result { - Ok(addrs) => map_switch_zone_addrs(&nexus.log, addrs).await, - Err(e) => { - warn!(nexus.log, "Failed to lookup Dendrite address: {e}"); - return Err(ActionError::action_failed(format!( - "switch mapping failed {e}", - ))); - } - }; - - let addr = match mappings.get(&location) { - Some(addr) => addr, - None => { - return Err(ActionError::action_failed(format!( - "address for switch at location: {location} not found", - ))); - } - }; - - let mut segments = addr.segments(); - segments[7] = 1; - let addr = Ipv6Addr::from(segments); - - Ok(SocketAddrV6::new(addr, SLED_AGENT_PORT, 0, 0)) -} - -#[derive(Clone, Debug)] -pub struct EarlyNetworkPortUpdate { - port: PortConfigV1, - bgp_configs: Vec, -} - -pub(crate) async fn bootstore_update( - nexus: &Arc, - opctx: &OpContext, - switch_port_id: Uuid, - switch_port_name: &str, - settings: &SwitchPortSettingsCombinedResult, -) -> Result { - let switch_port = - nexus.get_switch_port(&opctx, switch_port_id).await.map_err(|e| { - ActionError::action_failed(format!( - "get switch port for uplink: {e}" + "lookup switch port settings: {e}" )) })?; - let switch_location: SwitchLocation = - switch_port.switch_location.parse().map_err( - |e: ParseSwitchLocationError| { - ActionError::action_failed(format!( - "get switch location for uplink: {e:?}", - )) - }, - )?; - - let mut peer_info = Vec::new(); - let mut bgp_configs = Vec::new(); - for p in &settings.bgp_peers { - let bgp_config = nexus - .bgp_config_get(&opctx, p.bgp_config_id.into()) - .await - .map_err(|e| { - ActionError::action_failed(format!("get bgp config: {e}")) - })?; - - let announcements = nexus - .bgp_announce_list( - &opctx, - ¶ms::BgpAnnounceSetSelector { - name_or_id: NameOrId::Id(bgp_config.bgp_announce_set_id), - }, - ) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "get bgp announcements: {e}" - )) - })?; - - peer_info.push((p, bgp_config.asn.0)); - bgp_configs.push(BgpConfig { - asn: bgp_config.asn.0, - originate: announcements - .iter() - .filter_map(|a| match a.network { - IpNetwork::V4(net) => Some(net.into()), - //TODO v6 - _ => None, - }) - .collect(), - }); - } - - let update = EarlyNetworkPortUpdate { - port: PortConfigV1 { - routes: settings - .routes - .iter() - .map(|r| RouteConfig { destination: r.dst, nexthop: r.gw.ip() }) - .collect(), - addresses: settings.addresses.iter().map(|a| a.address).collect(), - switch: switch_location, - port: switch_port_name.into(), - uplink_port_fec: settings - .links - .get(0) - .map(|l| l.fec) - .unwrap_or(SwitchLinkFec::None) - .into(), - uplink_port_speed: settings - .links - .get(0) - .map(|l| l.speed) - .unwrap_or(SwitchLinkSpeed::Speed100G) - .into(), - bgp_peers: peer_info - .iter() - .filter_map(|(p, asn)| { - //TODO v6 - match p.addr.ip() { - IpAddr::V4(addr) => Some(OmicronBgpPeerConfig { - asn: *asn, - port: switch_port_name.into(), - addr, - hold_time: Some(p.hold_time.0.into()), - connect_retry: Some(p.connect_retry.0.into()), - delay_open: Some(p.delay_open.0.into()), - idle_hold_time: Some(p.idle_hold_time.0.into()), - keepalive: Some(p.keepalive.0.into()), - }), - IpAddr::V6(_) => { - warn!(opctx.log, "IPv6 peers not yet supported"); - None - } - } - }) - .collect(), - }, - bgp_configs, - }; - - Ok(update) -} - -pub(crate) async fn read_bootstore_config( - sa: &sled_agent_client::Client, -) -> Result { - Ok(sa - .read_network_bootstore_config_cache() - .await - .map_err(|e| { - ActionError::action_failed(format!( - "read bootstore network config: {e}" - )) - })? - .into_inner()) -} - -pub(crate) async fn write_bootstore_config( - sa: &sled_agent_client::Client, - config: &EarlyNetworkConfig, -) -> Result<(), ActionError> { - sa.write_network_bootstore_config(config).await.map_err(|e| { - ActionError::action_failed(format!( - "write bootstore network config: {e}" - )) - })?; - Ok(()) -} - -#[derive(Clone, Debug, Default)] -pub(crate) struct BootstoreNetworkPortChange { - previous_port_config: Option, - changed_bgp_configs: Vec, - added_bgp_configs: Vec, -} - -pub(crate) fn apply_bootstore_update( - config: &mut EarlyNetworkConfig, - update: &EarlyNetworkPortUpdate, -) -> Result { - let mut change = BootstoreNetworkPortChange::default(); - - let rack_net_config = match &mut config.body.rack_network_config { - Some(cfg) => cfg, - None => { - return Err(ActionError::action_failed( - "rack network config not yet initialized".to_string(), - )) - } - }; - - for port in &mut rack_net_config.ports { - if port.port == update.port.port { - change.previous_port_config = Some(port.clone()); - *port = update.port.clone(); - break; - } - } - if change.previous_port_config.is_none() { - rack_net_config.ports.push(update.port.clone()); - } - - for updated_bgp in &update.bgp_configs { - let mut exists = false; - for resident_bgp in &mut rack_net_config.bgp { - if resident_bgp.asn == updated_bgp.asn { - change.changed_bgp_configs.push(resident_bgp.clone()); - *resident_bgp = updated_bgp.clone(); - exists = true; - break; - } - } - if !exists { - change.added_bgp_configs.push(updated_bgp.clone()); - } - } - rack_net_config.bgp.extend_from_slice(&change.added_bgp_configs); - - Ok(change) -} + let params = sagactx.saga_params::()?; + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); -pub(crate) async fn switch_sled_agent( - location: SwitchLocation, - sagactx: &NexusActionContext, -) -> Result { - let nexus = sagactx.user_data().nexus(); - let sled_agent_addr = get_scrimlet_address(location, nexus).await?; - Ok(sled_agent_client::Client::new( - &format!("http://{}", sled_agent_addr), - sagactx.user_data().log().clone(), - )) + ensure_switch_port_bgp_settings( + sagactx, + &opctx, + settings, + params.switch_port_name.clone(), + params.switch_port_id, + ) + .await } diff --git a/nexus/src/app/sagas/switch_port_settings_clear.rs b/nexus/src/app/sagas/switch_port_settings_clear.rs index 1ab2f6be0c..bcbd5bf894 100644 --- a/nexus/src/app/sagas/switch_port_settings_clear.rs +++ b/nexus/src/app/sagas/switch_port_settings_clear.rs @@ -5,7 +5,7 @@ use super::switch_port_settings_apply::select_dendrite_client; use super::{NexusActionContext, NEXUS_DPD_TAG}; use crate::app::sagas::retry_until_known_result; -use crate::app::sagas::switch_port_settings_apply::{ +use crate::app::sagas::switch_port_settings_common::{ api_to_dpd_port_settings, apply_bootstore_update, bootstore_update, ensure_switch_port_bgp_settings, ensure_switch_port_uplink, read_bootstore_config, select_mg_client, switch_sled_agent, @@ -214,7 +214,20 @@ async fn spa_undo_clear_switch_port_settings( async fn spa_clear_switch_port_uplink( sagactx: NexusActionContext, ) -> Result<(), ActionError> { - ensure_switch_port_uplink(sagactx, true, None).await + let params = sagactx.saga_params::()?; + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + ensure_switch_port_uplink( + sagactx, + &opctx, + true, + None, + params.switch_port_id, + params.port_name.clone(), + ) + .await } async fn spa_undo_clear_switch_port_uplink( @@ -223,8 +236,21 @@ async fn spa_undo_clear_switch_port_uplink( let id = sagactx .lookup::>("original_switch_port_settings_id") .map_err(|e| external::Error::internal_error(&e.to_string()))?; + let params = sagactx.saga_params::()?; + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); - Ok(ensure_switch_port_uplink(sagactx, false, id).await?) + Ok(ensure_switch_port_uplink( + sagactx, + &opctx, + false, + id, + params.switch_port_id, + params.port_name.clone(), + ) + .await?) } async fn spa_clear_switch_port_bgp_settings( @@ -257,9 +283,13 @@ async fn spa_clear_switch_port_bgp_settings( .map_err(ActionError::action_failed)?; let mg_client: Arc = - select_mg_client(&sagactx).await.map_err(|e| { - ActionError::action_failed(format!("select mg client (undo): {e}")) - })?; + select_mg_client(&sagactx, &opctx, params.switch_port_id) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "select mg client (undo): {e}" + )) + })?; for peer in settings.bgp_peers { let config = nexus @@ -306,7 +336,14 @@ async fn spa_undo_clear_switch_port_bgp_settings( let settings = nexus.switch_port_settings_get(&opctx, &NameOrId::Id(id)).await?; - Ok(ensure_switch_port_bgp_settings(sagactx, settings).await?) + Ok(ensure_switch_port_bgp_settings( + sagactx, + &opctx, + settings, + params.port_name.clone(), + params.switch_port_id, + ) + .await?) } async fn spa_clear_switch_port_bootstore_network_settings( diff --git a/nexus/src/app/sagas/switch_port_settings_common.rs b/nexus/src/app/sagas/switch_port_settings_common.rs new file mode 100644 index 0000000000..8e66aa12f8 --- /dev/null +++ b/nexus/src/app/sagas/switch_port_settings_common.rs @@ -0,0 +1,577 @@ +use super::NexusActionContext; +use crate::app::map_switch_zone_addrs; +use crate::Nexus; +use db::datastore::SwitchPortSettingsCombinedResult; +use dpd_client::types::{ + LinkCreate, LinkId, LinkSettings, PortFec, PortSettings, PortSpeed, + RouteSettingsV4, RouteSettingsV6, +}; +use dpd_client::{Ipv4Cidr, Ipv6Cidr}; +use internal_dns::ServiceName; +use ipnetwork::IpNetwork; +use mg_admin_client::types::Prefix4; +use mg_admin_client::types::{ApplyRequest, BgpPeerConfig}; +use nexus_db_model::{SwitchLinkFec, SwitchLinkSpeed}; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db; +use nexus_types::external_api::params; +use omicron_common::address::SLED_AGENT_PORT; +use omicron_common::api::external::NameOrId; +use omicron_common::api::internal::shared::{ + ParseSwitchLocationError, SwitchLocation, +}; +use sled_agent_client::types::PortConfigV1; +use sled_agent_client::types::RouteConfig; +use sled_agent_client::types::{BgpConfig, EarlyNetworkConfig}; +use sled_agent_client::types::{ + BgpPeerConfig as OmicronBgpPeerConfig, HostPortConfig, +}; +use std::collections::HashMap; +use std::net::SocketAddrV6; +use std::net::{IpAddr, Ipv6Addr}; +use std::sync::Arc; +use steno::ActionError; +use uuid::Uuid; + +// This is more of an implementation detail of the BGP implementation. It +// defines the maximum time the peering engine will wait for external messages +// before breaking to check for shutdown conditions. +const BGP_SESSION_RESOLUTION: u64 = 100; + +pub(crate) fn api_to_dpd_port_settings( + settings: &SwitchPortSettingsCombinedResult, +) -> Result { + let mut dpd_port_settings = PortSettings { + links: HashMap::new(), + v4_routes: HashMap::new(), + v6_routes: HashMap::new(), + }; + + //TODO breakouts + let link_id = LinkId(0); + + for l in settings.links.iter() { + dpd_port_settings.links.insert( + link_id.to_string(), + LinkSettings { + params: LinkCreate { + autoneg: false, + lane: Some(LinkId(0)), + kr: false, + fec: match l.fec { + SwitchLinkFec::Firecode => PortFec::Firecode, + SwitchLinkFec::Rs => PortFec::Rs, + SwitchLinkFec::None => PortFec::None, + }, + speed: match l.speed { + SwitchLinkSpeed::Speed0G => PortSpeed::Speed0G, + SwitchLinkSpeed::Speed1G => PortSpeed::Speed1G, + SwitchLinkSpeed::Speed10G => PortSpeed::Speed10G, + SwitchLinkSpeed::Speed25G => PortSpeed::Speed25G, + SwitchLinkSpeed::Speed40G => PortSpeed::Speed40G, + SwitchLinkSpeed::Speed50G => PortSpeed::Speed50G, + SwitchLinkSpeed::Speed100G => PortSpeed::Speed100G, + SwitchLinkSpeed::Speed200G => PortSpeed::Speed200G, + SwitchLinkSpeed::Speed400G => PortSpeed::Speed400G, + }, + }, + //TODO won't work for breakouts + addrs: settings + .addresses + .iter() + .map(|a| a.address.ip()) + .collect(), + }, + ); + } + + for r in &settings.routes { + match &r.dst { + IpNetwork::V4(n) => { + let gw = match r.gw.ip() { + IpAddr::V4(gw) => gw, + IpAddr::V6(_) => { + return Err( + "IPv4 destination cannot have IPv6 nexthop".into() + ) + } + }; + dpd_port_settings.v4_routes.insert( + Ipv4Cidr { prefix: n.ip(), prefix_len: n.prefix() } + .to_string(), + vec![RouteSettingsV4 { link_id: link_id.0, nexthop: gw }], + ); + } + IpNetwork::V6(n) => { + let gw = match r.gw.ip() { + IpAddr::V6(gw) => gw, + IpAddr::V4(_) => { + return Err( + "IPv6 destination cannot have IPv4 nexthop".into() + ) + } + }; + dpd_port_settings.v6_routes.insert( + Ipv6Cidr { prefix: n.ip(), prefix_len: n.prefix() } + .to_string(), + vec![RouteSettingsV6 { link_id: link_id.0, nexthop: gw }], + ); + } + } + } + + Ok(dpd_port_settings) +} + +pub(crate) fn apply_bootstore_update( + config: &mut EarlyNetworkConfig, + update: &EarlyNetworkPortUpdate, +) -> Result { + let mut change = BootstoreNetworkPortChange::default(); + + let rack_net_config = match &mut config.body.rack_network_config { + Some(cfg) => cfg, + None => { + return Err(ActionError::action_failed( + "rack network config not yet initialized".to_string(), + )) + } + }; + + for port in &mut rack_net_config.ports { + if port.port == update.port.port { + change.previous_port_config = Some(port.clone()); + *port = update.port.clone(); + break; + } + } + if change.previous_port_config.is_none() { + rack_net_config.ports.push(update.port.clone()); + } + + for updated_bgp in &update.bgp_configs { + let mut exists = false; + for resident_bgp in &mut rack_net_config.bgp { + if resident_bgp.asn == updated_bgp.asn { + change.changed_bgp_configs.push(resident_bgp.clone()); + *resident_bgp = updated_bgp.clone(); + exists = true; + break; + } + } + if !exists { + change.added_bgp_configs.push(updated_bgp.clone()); + } + } + rack_net_config.bgp.extend_from_slice(&change.added_bgp_configs); + + Ok(change) +} + +pub(crate) async fn bootstore_update( + nexus: &Arc, + opctx: &OpContext, + switch_port_id: Uuid, + switch_port_name: &str, + settings: &SwitchPortSettingsCombinedResult, +) -> Result { + let switch_port = + nexus.get_switch_port(&opctx, switch_port_id).await.map_err(|e| { + ActionError::action_failed(format!( + "get switch port for uplink: {e}" + )) + })?; + + let switch_location: SwitchLocation = + switch_port.switch_location.parse().map_err( + |e: ParseSwitchLocationError| { + ActionError::action_failed(format!( + "get switch location for uplink: {e:?}", + )) + }, + )?; + + let mut peer_info = Vec::new(); + let mut bgp_configs = Vec::new(); + for p in &settings.bgp_peers { + let bgp_config = nexus + .bgp_config_get(&opctx, p.bgp_config_id.into()) + .await + .map_err(|e| { + ActionError::action_failed(format!("get bgp config: {e}")) + })?; + + let announcements = nexus + .bgp_announce_list( + &opctx, + ¶ms::BgpAnnounceSetSelector { + name_or_id: NameOrId::Id(bgp_config.bgp_announce_set_id), + }, + ) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "get bgp announcements: {e}" + )) + })?; + + peer_info.push((p, bgp_config.asn.0)); + bgp_configs.push(BgpConfig { + asn: bgp_config.asn.0, + originate: announcements + .iter() + .filter_map(|a| match a.network { + IpNetwork::V4(net) => Some(net.into()), + //TODO v6 + _ => None, + }) + .collect(), + }); + } + + let update = EarlyNetworkPortUpdate { + port: PortConfigV1 { + routes: settings + .routes + .iter() + .map(|r| RouteConfig { destination: r.dst, nexthop: r.gw.ip() }) + .collect(), + addresses: settings.addresses.iter().map(|a| a.address).collect(), + switch: switch_location, + port: switch_port_name.into(), + uplink_port_fec: settings + .links + .get(0) + .map(|l| l.fec) + .unwrap_or(SwitchLinkFec::None) + .into(), + uplink_port_speed: settings + .links + .get(0) + .map(|l| l.speed) + .unwrap_or(SwitchLinkSpeed::Speed100G) + .into(), + bgp_peers: peer_info + .iter() + .filter_map(|(p, asn)| { + //TODO v6 + match p.addr.ip() { + IpAddr::V4(addr) => Some(OmicronBgpPeerConfig { + asn: *asn, + port: switch_port_name.into(), + addr, + hold_time: Some(p.hold_time.0.into()), + connect_retry: Some(p.connect_retry.0.into()), + delay_open: Some(p.delay_open.0.into()), + idle_hold_time: Some(p.idle_hold_time.0.into()), + keepalive: Some(p.keepalive.0.into()), + }), + IpAddr::V6(_) => { + warn!(opctx.log, "IPv6 peers not yet supported"); + None + } + } + }) + .collect(), + }, + bgp_configs, + }; + + Ok(update) +} + +pub(crate) async fn ensure_switch_port_uplink( + sagactx: NexusActionContext, + opctx: &OpContext, + skip_self: bool, + inject: Option, + switch_port_id: Uuid, + switch_port_name: String, +) -> Result<(), ActionError> { + let osagactx = sagactx.user_data(); + let nexus = osagactx.nexus(); + + let switch_port = + nexus.get_switch_port(&opctx, switch_port_id).await.map_err(|e| { + ActionError::action_failed(format!( + "get switch port for uplink: {e}" + )) + })?; + + let switch_location: SwitchLocation = + switch_port.switch_location.parse().map_err(|e| { + ActionError::action_failed(format!( + "get switch location for uplink: {e:?}", + )) + })?; + + let mut uplinks: Vec = Vec::new(); + + // The sled agent uplinks interface is an all or nothing interface, so we + // need to get all the uplink configs for all the ports. + let active_ports = + nexus.active_port_settings(&opctx).await.map_err(|e| { + ActionError::action_failed(format!( + "get active switch port settings: {e}" + )) + })?; + + for (port, info) in &active_ports { + // Since we are undoing establishing uplinks for the settings + // associated with this port we skip adding this ports uplinks + // to the list - effectively removing them. + if skip_self && port.id == switch_port.id { + continue; + } + uplinks.push(HostPortConfig { + port: port.port_name.clone(), + addrs: info.addresses.iter().map(|a| a.address).collect(), + }) + } + + if let Some(id) = inject { + let settings = nexus + .switch_port_settings_get(&opctx, &id.into()) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "get switch port settings for injection: {e}" + )) + })?; + uplinks.push(HostPortConfig { + port: switch_port_name.clone(), + addrs: settings.addresses.iter().map(|a| a.address).collect(), + }) + } + + let sc = switch_sled_agent(switch_location, &sagactx).await?; + sc.uplink_ensure(&sled_agent_client::types::SwitchPorts { uplinks }) + .await + .map_err(|e| { + ActionError::action_failed(format!("ensure uplink: {e}")) + })?; + + Ok(()) +} + +pub(crate) async fn read_bootstore_config( + sa: &sled_agent_client::Client, +) -> Result { + Ok(sa + .read_network_bootstore_config_cache() + .await + .map_err(|e| { + ActionError::action_failed(format!( + "read bootstore network config: {e}" + )) + })? + .into_inner()) +} + +pub(crate) async fn write_bootstore_config( + sa: &sled_agent_client::Client, + config: &EarlyNetworkConfig, +) -> Result<(), ActionError> { + sa.write_network_bootstore_config(config).await.map_err(|e| { + ActionError::action_failed(format!( + "write bootstore network config: {e}" + )) + })?; + Ok(()) +} + +pub(crate) async fn select_mg_client( + sagactx: &NexusActionContext, + opctx: &OpContext, + switch_port_id: Uuid, +) -> Result, ActionError> { + let osagactx = sagactx.user_data(); + let nexus = osagactx.nexus(); + + let switch_port = + nexus.get_switch_port(&opctx, switch_port_id).await.map_err(|e| { + ActionError::action_failed(format!( + "get switch port for mg client selection: {e}" + )) + })?; + + let switch_location: SwitchLocation = + switch_port.switch_location.parse().map_err( + |e: ParseSwitchLocationError| { + ActionError::action_failed(format!( + "get switch location for uplink: {e:?}", + )) + }, + )?; + + let mg_client: Arc = osagactx + .nexus() + .mg_clients + .get(&switch_location) + .ok_or_else(|| { + ActionError::action_failed(format!( + "requested switch not available: {switch_location}" + )) + })? + .clone(); + Ok(mg_client) +} + +pub(crate) async fn switch_sled_agent( + location: SwitchLocation, + sagactx: &NexusActionContext, +) -> Result { + let nexus = sagactx.user_data().nexus(); + let sled_agent_addr = get_scrimlet_address(location, nexus).await?; + Ok(sled_agent_client::Client::new( + &format!("http://{}", sled_agent_addr), + sagactx.user_data().log().clone(), + )) +} + +pub(crate) async fn ensure_switch_port_bgp_settings( + sagactx: NexusActionContext, + opctx: &OpContext, + settings: SwitchPortSettingsCombinedResult, + switch_port_name: String, + switch_port_id: Uuid, +) -> Result<(), ActionError> { + let osagactx = sagactx.user_data(); + let nexus = osagactx.nexus(); + let mg_client: Arc = + select_mg_client(&sagactx, opctx, switch_port_id).await.map_err( + |e| ActionError::action_failed(format!("select mg client: {e}")), + )?; + + let mut bgp_peer_configs = Vec::new(); + + for peer in settings.bgp_peers { + let config = nexus + .bgp_config_get(&opctx, peer.bgp_config_id.into()) + .await + .map_err(|e| { + ActionError::action_failed(format!("get bgp config: {e}")) + })?; + + let announcements = nexus + .bgp_announce_list( + &opctx, + ¶ms::BgpAnnounceSetSelector { + name_or_id: NameOrId::Id(config.bgp_announce_set_id), + }, + ) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "get bgp announcements: {e}" + )) + })?; + + let mut prefixes = Vec::new(); + for a in &announcements { + let value = match a.network.ip() { + IpAddr::V4(value) => Ok(value), + IpAddr::V6(_) => Err(ActionError::action_failed( + "IPv6 announcement not yet supported".to_string(), + )), + }?; + prefixes.push(Prefix4 { value, length: a.network.prefix() }); + } + + let bpc = BgpPeerConfig { + asn: *config.asn, + name: format!("{}", peer.addr.ip()), //TODO user defined name? + host: format!("{}:179", peer.addr.ip()), + hold_time: peer.hold_time.0.into(), + idle_hold_time: peer.idle_hold_time.0.into(), + delay_open: peer.delay_open.0.into(), + connect_retry: peer.connect_retry.0.into(), + keepalive: peer.keepalive.0.into(), + resolution: BGP_SESSION_RESOLUTION, + originate: prefixes, + }; + + bgp_peer_configs.push(bpc); + } + + mg_client + .inner + .bgp_apply(&ApplyRequest { + peer_group: switch_port_name, + peers: bgp_peer_configs, + }) + .await + .map_err(|e| { + ActionError::action_failed(format!("apply bgp settings: {e}")) + })?; + + Ok(()) +} + +pub(crate) async fn get_scrimlet_address( + location: SwitchLocation, + nexus: &Arc, +) -> Result { + /* TODO this depends on DNS entries only coming from RSS, it's broken + on the upgrade path + nexus + .resolver() + .await + .lookup_socket_v6(ServiceName::Scrimlet(location)) + .await + .map_err(|e| e.to_string()) + .map_err(|e| { + ActionError::action_failed(format!( + "scrimlet dns lookup failed {e}", + )) + }) + */ + let result = nexus + .resolver() + .await + .lookup_all_ipv6(ServiceName::Dendrite) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "scrimlet dns lookup failed {e}", + )) + }); + + let mappings = match result { + Ok(addrs) => map_switch_zone_addrs(&nexus.log, addrs).await, + Err(e) => { + warn!(nexus.log, "Failed to lookup Dendrite address: {e}"); + return Err(ActionError::action_failed(format!( + "switch mapping failed {e}", + ))); + } + }; + + let addr = match mappings.get(&location) { + Some(addr) => addr, + None => { + return Err(ActionError::action_failed(format!( + "address for switch at location: {location} not found", + ))); + } + }; + + let mut segments = addr.segments(); + segments[7] = 1; + let addr = Ipv6Addr::from(segments); + + Ok(SocketAddrV6::new(addr, SLED_AGENT_PORT, 0, 0)) +} + +#[derive(Clone, Debug, Default)] +pub(crate) struct BootstoreNetworkPortChange { + previous_port_config: Option, + changed_bgp_configs: Vec, + added_bgp_configs: Vec, +} + +#[derive(Clone, Debug)] +pub struct EarlyNetworkPortUpdate { + port: PortConfigV1, + bgp_configs: Vec, +} diff --git a/nexus/tests/integration_tests/switch_port.rs b/nexus/tests/integration_tests/switch_port.rs index ccd0b50fbe..d163fc6b06 100644 --- a/nexus/tests/integration_tests/switch_port.rs +++ b/nexus/tests/integration_tests/switch_port.rs @@ -318,4 +318,19 @@ async fn test_port_settings_basic_crud(ctx: &ControlPlaneTestContext) { .execute() .await .unwrap(); + + // clear port settings + + NexusRequest::new( + RequestBuilder::new( + client, + Method::DELETE, + &format!("/v1/system/hardware/switch-port/qsfp0/settings?rack_id={rack_id}&switch_location=switch0"), + ) + .expect_status(Some(StatusCode::NO_CONTENT)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap(); } From 15e307e3d2f8679b397423eb5a81ca1c9f3635bd Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Tue, 21 Nov 2023 12:22:34 -0800 Subject: [PATCH 34/56] [nexus] Add `RotUpdater` (#4502) This is analogous to #4427, and like it, we only add this type without attempting any integration into Nexus-at-large. Hopefully this PR looks bigger than it really is; the majority of changes are either: * Copy/paste additions from #4427 and change the details to make them relevant for RoT updates instead of SP updates (the tests are a particularly egregious case of this, but I think it makes sense to have duplication here as opposed to trying to make them too general?) * Refactoring to extract identical / near-identical bits after step 1 (most of this landed in the new `MgsClients` type) --- .../tests/output/collector_basic.txt | 20 +- .../tests/output/collector_errors.txt | 20 +- nexus/src/app/test_interfaces.rs | 3 + nexus/src/app/update/mgs_clients.rs | 240 +++++++ nexus/src/app/update/mod.rs | 15 +- nexus/src/app/update/rot_updater.rs | 272 ++++++++ nexus/src/app/update/sp_updater.rs | 266 ++------ nexus/tests/integration_tests/mod.rs | 1 + nexus/tests/integration_tests/rot_updater.rs | 627 ++++++++++++++++++ nexus/tests/integration_tests/sp_updater.rs | 59 +- sp-sim/src/gimlet.rs | 34 +- sp-sim/src/lib.rs | 11 +- sp-sim/src/sidecar.rs | 34 +- sp-sim/src/update.rs | 44 +- wicket-common/src/update_events.rs | 14 +- wicketd/src/update_tracker.rs | 176 +++-- wicketd/tests/integration_tests/updates.rs | 10 +- 17 files changed, 1489 insertions(+), 357 deletions(-) create mode 100644 nexus/src/app/update/mgs_clients.rs create mode 100644 nexus/src/app/update/rot_updater.rs create mode 100644 nexus/tests/integration_tests/rot_updater.rs diff --git a/nexus/inventory/tests/output/collector_basic.txt b/nexus/inventory/tests/output/collector_basic.txt index 4a3bf62d63..76b929bfba 100644 --- a/nexus/inventory/tests/output/collector_basic.txt +++ b/nexus/inventory/tests/output/collector_basic.txt @@ -5,9 +5,9 @@ baseboards: part "FAKE_SIM_SIDECAR" serial "SimSidecar1" cabooses: - board "SimGimletRot" name "SimGimlet" version "0.0.1" git_commit "eeeeeeee" board "SimGimletSp" name "SimGimlet" version "0.0.1" git_commit "ffffffff" - board "SimSidecarRot" name "SimSidecar" version "0.0.1" git_commit "eeeeeeee" + board "SimRot" name "SimGimlet" version "0.0.1" git_commit "eeeeeeee" + board "SimRot" name "SimSidecar" version "0.0.1" git_commit "eeeeeeee" board "SimSidecarSp" name "SimSidecar" version "0.0.1" git_commit "ffffffff" SPs: @@ -31,13 +31,13 @@ cabooses found: SpSlot1 baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimGimletSp" SpSlot1 baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimSidecarSp" SpSlot1 baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimSidecarSp" - RotSlotA baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": board "SimGimletRot" - RotSlotA baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimGimletRot" - RotSlotA baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimSidecarRot" - RotSlotA baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimSidecarRot" - RotSlotB baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": board "SimGimletRot" - RotSlotB baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimGimletRot" - RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimSidecarRot" - RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimSidecarRot" + RotSlotA baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": board "SimRot" + RotSlotA baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimRot" + RotSlotA baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimRot" + RotSlotA baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimRot" + RotSlotB baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": board "SimRot" + RotSlotB baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimRot" + RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimRot" + RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimRot" errors: diff --git a/nexus/inventory/tests/output/collector_errors.txt b/nexus/inventory/tests/output/collector_errors.txt index 4404046253..c61d2e7c29 100644 --- a/nexus/inventory/tests/output/collector_errors.txt +++ b/nexus/inventory/tests/output/collector_errors.txt @@ -5,9 +5,9 @@ baseboards: part "FAKE_SIM_SIDECAR" serial "SimSidecar1" cabooses: - board "SimGimletRot" name "SimGimlet" version "0.0.1" git_commit "eeeeeeee" board "SimGimletSp" name "SimGimlet" version "0.0.1" git_commit "ffffffff" - board "SimSidecarRot" name "SimSidecar" version "0.0.1" git_commit "eeeeeeee" + board "SimRot" name "SimGimlet" version "0.0.1" git_commit "eeeeeeee" + board "SimRot" name "SimSidecar" version "0.0.1" git_commit "eeeeeeee" board "SimSidecarSp" name "SimSidecar" version "0.0.1" git_commit "ffffffff" SPs: @@ -31,14 +31,14 @@ cabooses found: SpSlot1 baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimGimletSp" SpSlot1 baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimSidecarSp" SpSlot1 baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimSidecarSp" - RotSlotA baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": board "SimGimletRot" - RotSlotA baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimGimletRot" - RotSlotA baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimSidecarRot" - RotSlotA baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimSidecarRot" - RotSlotB baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": board "SimGimletRot" - RotSlotB baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimGimletRot" - RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimSidecarRot" - RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimSidecarRot" + RotSlotA baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": board "SimRot" + RotSlotA baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimRot" + RotSlotA baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimRot" + RotSlotA baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimRot" + RotSlotB baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": board "SimRot" + RotSlotB baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimRot" + RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimRot" + RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimRot" errors: error: MGS "http://[100::1]:12345": listing ignition targets: Communication Error <> diff --git a/nexus/src/app/test_interfaces.rs b/nexus/src/app/test_interfaces.rs index ad2ea50e07..6161a9a1c1 100644 --- a/nexus/src/app/test_interfaces.rs +++ b/nexus/src/app/test_interfaces.rs @@ -10,6 +10,9 @@ use sled_agent_client::Client as SledAgentClient; use std::sync::Arc; use uuid::Uuid; +pub use super::update::MgsClients; +pub use super::update::RotUpdateError; +pub use super::update::RotUpdater; pub use super::update::SpUpdateError; pub use super::update::SpUpdater; pub use super::update::UpdateProgress; diff --git a/nexus/src/app/update/mgs_clients.rs b/nexus/src/app/update/mgs_clients.rs new file mode 100644 index 0000000000..5915505829 --- /dev/null +++ b/nexus/src/app/update/mgs_clients.rs @@ -0,0 +1,240 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Module providing support for handling failover between multiple MGS clients + +use futures::Future; +use gateway_client::types::SpType; +use gateway_client::types::SpUpdateStatus; +use gateway_client::Client; +use slog::Logger; +use std::collections::VecDeque; +use std::sync::Arc; +use uuid::Uuid; + +pub(super) type GatewayClientError = + gateway_client::Error; + +pub(super) enum PollUpdateStatus { + Preparing { progress: Option }, + InProgress { progress: Option }, + Complete, +} + +#[derive(Debug, thiserror::Error)] +pub enum UpdateStatusError { + #[error("different update is now preparing ({0})")] + DifferentUpdatePreparing(Uuid), + #[error("different update is now in progress ({0})")] + DifferentUpdateInProgress(Uuid), + #[error("different update is now complete ({0})")] + DifferentUpdateComplete(Uuid), + #[error("different update is now aborted ({0})")] + DifferentUpdateAborted(Uuid), + #[error("different update failed ({0})")] + DifferentUpdateFailed(Uuid), + #[error("update status lost (did the SP reset?)")] + UpdateStatusLost, + #[error("update was aborted")] + UpdateAborted, + #[error("update failed (error code {0})")] + UpdateFailedWithCode(u32), + #[error("update failed (error message {0})")] + UpdateFailedWithMessage(String), +} + +#[derive(Debug, thiserror::Error)] +pub(super) enum PollUpdateStatusError { + #[error(transparent)] + StatusError(#[from] UpdateStatusError), + #[error(transparent)] + ClientError(#[from] GatewayClientError), +} + +#[derive(Debug, Clone)] +pub struct MgsClients { + clients: VecDeque>, +} + +impl MgsClients { + /// Create a new `MgsClients` with the given `clients`. + /// + /// # Panics + /// + /// Panics if `clients` is empty. + pub fn new>>>(clients: T) -> Self { + let clients = clients.into(); + assert!(!clients.is_empty()); + Self { clients } + } + + /// Create a new `MgsClients` with the given `clients`. + /// + /// # Panics + /// + /// Panics if `clients` is empty. + pub fn from_clients>(clients: I) -> Self { + let clients = clients + .into_iter() + .map(Arc::new) + .collect::>>(); + Self::new(clients) + } + + /// Run `op` against all clients in sequence until either one succeeds (in + /// which case the success value is returned), one fails with a + /// non-communication error (in which case that error is returned), or all + /// of them fail with communication errors (in which case the communication + /// error from the last-attempted client is returned). + /// + /// On a successful return, the internal client list will be reordered so + /// any future accesses will attempt the most-recently-successful client. + pub(super) async fn try_all_serially( + &mut self, + log: &Logger, + op: F, + ) -> Result + where + // Seems like it would be nicer to take `&Client` here instead of + // needing to clone each `Arc`, but there's currently no decent way of + // doing that without boxing the returned future: + // https://users.rust-lang.org/t/how-to-express-that-the-future-returned-by-a-closure-lives-only-as-long-as-its-argument/90039/10 + F: Fn(Arc) -> Fut, + Fut: Future>, + { + let mut last_err = None; + for (i, client) in self.clients.iter().enumerate() { + match op(Arc::clone(client)).await { + Ok(value) => { + self.clients.rotate_left(i); + return Ok(value); + } + Err(GatewayClientError::CommunicationError(err)) => { + if i < self.clients.len() { + warn!( + log, "communication error with MGS; \ + will try next client"; + "mgs_addr" => client.baseurl(), + "err" => %err, + ); + } + last_err = Some(err); + continue; + } + Err(err) => return Err(err), + } + } + + // The only way to get here is if all clients failed with communication + // errors. Return the error from the last MGS we tried. + Err(GatewayClientError::CommunicationError(last_err.unwrap())) + } + + /// Poll for the status of an expected-to-be-in-progress update. + pub(super) async fn poll_update_status( + &mut self, + sp_type: SpType, + sp_slot: u32, + component: &'static str, + update_id: Uuid, + log: &Logger, + ) -> Result { + let update_status = self + .try_all_serially(log, |client| async move { + let update_status = client + .sp_component_update_status(sp_type, sp_slot, component) + .await?; + + debug!( + log, "got update status"; + "mgs_addr" => client.baseurl(), + "status" => ?update_status, + ); + + Ok(update_status) + }) + .await? + .into_inner(); + + match update_status { + SpUpdateStatus::Preparing { id, progress } => { + if id == update_id { + let progress = progress.and_then(|progress| { + if progress.current > progress.total { + warn!( + log, "nonsense preparing progress"; + "current" => progress.current, + "total" => progress.total, + ); + None + } else if progress.total == 0 { + None + } else { + Some( + f64::from(progress.current) + / f64::from(progress.total), + ) + } + }); + Ok(PollUpdateStatus::Preparing { progress }) + } else { + Err(UpdateStatusError::DifferentUpdatePreparing(id).into()) + } + } + SpUpdateStatus::InProgress { id, bytes_received, total_bytes } => { + if id == update_id { + let progress = if bytes_received > total_bytes { + warn!( + log, "nonsense update progress"; + "bytes_received" => bytes_received, + "total_bytes" => total_bytes, + ); + None + } else if total_bytes == 0 { + None + } else { + Some(f64::from(bytes_received) / f64::from(total_bytes)) + }; + Ok(PollUpdateStatus::InProgress { progress }) + } else { + Err(UpdateStatusError::DifferentUpdateInProgress(id).into()) + } + } + SpUpdateStatus::Complete { id } => { + if id == update_id { + Ok(PollUpdateStatus::Complete) + } else { + Err(UpdateStatusError::DifferentUpdateComplete(id).into()) + } + } + SpUpdateStatus::None => { + Err(UpdateStatusError::UpdateStatusLost.into()) + } + SpUpdateStatus::Aborted { id } => { + if id == update_id { + Err(UpdateStatusError::UpdateAborted.into()) + } else { + Err(UpdateStatusError::DifferentUpdateAborted(id).into()) + } + } + SpUpdateStatus::Failed { code, id } => { + if id == update_id { + Err(UpdateStatusError::UpdateFailedWithCode(code).into()) + } else { + Err(UpdateStatusError::DifferentUpdateFailed(id).into()) + } + } + SpUpdateStatus::RotError { id, message } => { + if id == update_id { + Err(UpdateStatusError::UpdateFailedWithMessage(format!( + "rot error: {message}" + )) + .into()) + } else { + Err(UpdateStatusError::DifferentUpdateFailed(id).into()) + } + } + } + } +} diff --git a/nexus/src/app/update/mod.rs b/nexus/src/app/update/mod.rs index 165a6ae23b..7d5c642822 100644 --- a/nexus/src/app/update/mod.rs +++ b/nexus/src/app/update/mod.rs @@ -26,9 +26,22 @@ use std::path::Path; use tokio::io::AsyncWriteExt; use uuid::Uuid; +mod mgs_clients; +mod rot_updater; mod sp_updater; -pub use sp_updater::{SpUpdateError, SpUpdater, UpdateProgress}; +pub use mgs_clients::{MgsClients, UpdateStatusError}; +pub use rot_updater::{RotUpdateError, RotUpdater}; +pub use sp_updater::{SpUpdateError, SpUpdater}; + +#[derive(Debug, PartialEq, Clone)] +pub enum UpdateProgress { + Started, + Preparing { progress: Option }, + InProgress { progress: Option }, + Complete, + Failed(String), +} static BASE_ARTIFACT_DIR: &str = "/var/tmp/oxide_artifacts"; diff --git a/nexus/src/app/update/rot_updater.rs b/nexus/src/app/update/rot_updater.rs new file mode 100644 index 0000000000..d7d21e3b3a --- /dev/null +++ b/nexus/src/app/update/rot_updater.rs @@ -0,0 +1,272 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Module containing types for updating RoTs via MGS. + +use super::mgs_clients::PollUpdateStatusError; +use super::MgsClients; +use super::UpdateProgress; +use super::UpdateStatusError; +use crate::app::update::mgs_clients::PollUpdateStatus; +use gateway_client::types::RotSlot; +use gateway_client::types::SpComponentFirmwareSlot; +use gateway_client::types::SpType; +use gateway_client::SpComponent; +use slog::Logger; +use std::time::Duration; +use tokio::sync::watch; +use uuid::Uuid; + +type GatewayClientError = gateway_client::Error; + +#[derive(Debug, thiserror::Error)] +pub enum RotUpdateError { + #[error("error communicating with MGS")] + MgsCommunication(#[from] GatewayClientError), + + #[error("failed checking update status: {0}")] + PollUpdateStatus(#[from] UpdateStatusError), +} + +impl From for RotUpdateError { + fn from(err: PollUpdateStatusError) -> Self { + match err { + PollUpdateStatusError::StatusError(err) => err.into(), + PollUpdateStatusError::ClientError(err) => err.into(), + } + } +} + +pub struct RotUpdater { + log: Logger, + progress: watch::Sender>, + sp_type: SpType, + sp_slot: u32, + target_rot_slot: RotSlot, + update_id: Uuid, + // TODO-clarity maybe a newtype for this? TBD how we get this from + // wherever it's stored, which might give us a stronger type already. + rot_hubris_archive: Vec, +} + +impl RotUpdater { + pub fn new( + sp_type: SpType, + sp_slot: u32, + target_rot_slot: RotSlot, + update_id: Uuid, + rot_hubris_archive: Vec, + log: &Logger, + ) -> Self { + let log = log.new(slog::o!( + "component" => "RotUpdater", + "sp_type" => format!("{sp_type:?}"), + "sp_slot" => sp_slot, + "target_rot_slot" => format!("{target_rot_slot:?}"), + "update_id" => format!("{update_id}"), + )); + let progress = watch::Sender::new(None); + Self { + log, + progress, + sp_type, + sp_slot, + target_rot_slot, + update_id, + rot_hubris_archive, + } + } + + pub fn progress_watcher(&self) -> watch::Receiver> { + self.progress.subscribe() + } + + /// Drive this RoT update to completion (or failure). + /// + /// Only one MGS instance is required to drive an update; however, if + /// multiple MGS instances are available and passed to this method and an + /// error occurs communicating with one instance, `RotUpdater` will try the + /// remaining instances before failing. + pub async fn update( + self, + mut mgs_clients: MgsClients, + ) -> Result<(), RotUpdateError> { + // The async blocks below want `&self` references, but we take `self` + // for API clarity (to start a new update, the caller should construct a + // new updater). Create a `&self` ref that we use through the remainder + // of this method. + let me = &self; + + mgs_clients + .try_all_serially(&self.log, |client| async move { + me.start_update_one_mgs(&client).await + }) + .await?; + + // `wait_for_update_completion` uses `try_all_mgs_clients` internally, + // so we don't wrap it here. + me.wait_for_update_completion(&mut mgs_clients).await?; + + mgs_clients + .try_all_serially(&self.log, |client| async move { + me.mark_target_slot_active_one_mgs(&client).await + }) + .await?; + + mgs_clients + .try_all_serially(&self.log, |client| async move { + me.finalize_update_via_reset_one_mgs(&client).await + }) + .await?; + + // wait for any progress watchers to be dropped before we return; + // otherwise, they'll get `RecvError`s when trying to check the current + // status + self.progress.closed().await; + + Ok(()) + } + + async fn start_update_one_mgs( + &self, + client: &gateway_client::Client, + ) -> Result<(), GatewayClientError> { + let firmware_slot = self.target_rot_slot.as_u16(); + + // Start the update. + client + .sp_component_update( + self.sp_type, + self.sp_slot, + SpComponent::ROT.const_as_str(), + firmware_slot, + &self.update_id, + reqwest::Body::from(self.rot_hubris_archive.clone()), + ) + .await?; + + self.progress.send_replace(Some(UpdateProgress::Started)); + + info!( + self.log, "RoT update started"; + "mgs_addr" => client.baseurl(), + ); + + Ok(()) + } + + async fn wait_for_update_completion( + &self, + mgs_clients: &mut MgsClients, + ) -> Result<(), RotUpdateError> { + // How frequently do we poll MGS for the update progress? + const STATUS_POLL_INTERVAL: Duration = Duration::from_secs(3); + + loop { + let status = mgs_clients + .poll_update_status( + self.sp_type, + self.sp_slot, + SpComponent::ROT.const_as_str(), + self.update_id, + &self.log, + ) + .await?; + + // For `Preparing` and `InProgress`, we could check the progress + // information returned by these steps and try to check that + // we're still _making_ progress, but every Nexus instance needs + // to do that anyway in case we (or the MGS instance delivering + // the update) crash, so we'll omit that check here. Instead, we + // just sleep and we'll poll again shortly. + match status { + PollUpdateStatus::Preparing { progress } => { + self.progress.send_replace(Some( + UpdateProgress::Preparing { progress }, + )); + } + PollUpdateStatus::InProgress { progress } => { + self.progress.send_replace(Some( + UpdateProgress::InProgress { progress }, + )); + } + PollUpdateStatus::Complete => { + self.progress.send_replace(Some( + UpdateProgress::InProgress { progress: Some(1.0) }, + )); + return Ok(()); + } + } + + tokio::time::sleep(STATUS_POLL_INTERVAL).await; + } + } + + async fn mark_target_slot_active_one_mgs( + &self, + client: &gateway_client::Client, + ) -> Result<(), GatewayClientError> { + // RoT currently doesn't support non-persistent slot swapping, so always + // tell it to persist our choice. + let persist = true; + + let slot = self.target_rot_slot.as_u16(); + + client + .sp_component_active_slot_set( + self.sp_type, + self.sp_slot, + SpComponent::ROT.const_as_str(), + persist, + &SpComponentFirmwareSlot { slot }, + ) + .await?; + + // TODO-correctness Should we send some kind of update to + // `self.progress`? We already sent `InProgress(1.0)` when the update + // finished delivering. Or perhaps we shouldn't even be doing this step + // and the reset, and let our caller handle the finalization? + + info!( + self.log, "RoT target slot marked active"; + "mgs_addr" => client.baseurl(), + ); + + Ok(()) + } + + async fn finalize_update_via_reset_one_mgs( + &self, + client: &gateway_client::Client, + ) -> Result<(), GatewayClientError> { + client + .sp_component_reset( + self.sp_type, + self.sp_slot, + SpComponent::ROT.const_as_str(), + ) + .await?; + + self.progress.send_replace(Some(UpdateProgress::Complete)); + info!( + self.log, "RoT update complete"; + "mgs_addr" => client.baseurl(), + ); + + Ok(()) + } +} + +trait RotSlotAsU16 { + fn as_u16(&self) -> u16; +} + +impl RotSlotAsU16 for RotSlot { + fn as_u16(&self) -> u16 { + match self { + RotSlot::A => 0, + RotSlot::B => 1, + } + } +} diff --git a/nexus/src/app/update/sp_updater.rs b/nexus/src/app/update/sp_updater.rs index 9abb2ad222..419a733441 100644 --- a/nexus/src/app/update/sp_updater.rs +++ b/nexus/src/app/update/sp_updater.rs @@ -4,13 +4,15 @@ //! Module containing types for updating SPs via MGS. -use futures::Future; +use crate::app::update::mgs_clients::PollUpdateStatus; + +use super::mgs_clients::PollUpdateStatusError; +use super::MgsClients; +use super::UpdateProgress; +use super::UpdateStatusError; use gateway_client::types::SpType; -use gateway_client::types::SpUpdateStatus; use gateway_client::SpComponent; use slog::Logger; -use std::collections::VecDeque; -use std::sync::Arc; use std::time::Duration; use tokio::sync::watch; use uuid::Uuid; @@ -22,20 +24,17 @@ pub enum SpUpdateError { #[error("error communicating with MGS")] MgsCommunication(#[from] GatewayClientError), - // Error returned when we successfully start an update but it fails to - // complete successfully. - #[error("update failed to complete: {0}")] - FailedToComplete(String), + #[error("failed checking update status: {0}")] + PollUpdateStatus(#[from] UpdateStatusError), } -// TODO-cleanup Probably share this with other update implementations? -#[derive(Debug, PartialEq, Clone)] -pub enum UpdateProgress { - Started, - Preparing { progress: Option }, - InProgress { progress: Option }, - Complete, - Failed(String), +impl From for SpUpdateError { + fn from(err: PollUpdateStatusError) -> Self { + match err { + PollUpdateStatusError::StatusError(err) => err.into(), + PollUpdateStatusError::ClientError(err) => err.into(), + } + } } pub struct SpUpdater { @@ -58,6 +57,7 @@ impl SpUpdater { log: &Logger, ) -> Self { let log = log.new(slog::o!( + "component" => "SpUpdater", "sp_type" => format!("{sp_type:?}"), "sp_slot" => sp_slot, "update_id" => format!("{update_id}"), @@ -76,78 +76,38 @@ impl SpUpdater { /// multiple MGS instances are available and passed to this method and an /// error occurs communicating with one instance, `SpUpdater` will try the /// remaining instances before failing. - /// - /// # Panics - /// - /// If `mgs_clients` is empty. - pub async fn update>>>( + pub async fn update( self, - mgs_clients: T, + mut mgs_clients: MgsClients, ) -> Result<(), SpUpdateError> { - let mut mgs_clients = mgs_clients.into(); - assert!(!mgs_clients.is_empty()); - // The async blocks below want `&self` references, but we take `self` // for API clarity (to start a new SP update, the caller should // construct a new `SpUpdater`). Create a `&self` ref that we use // through the remainder of this method. let me = &self; - me.try_all_mgs_clients(&mut mgs_clients, |client| async move { - me.start_update_one_mgs(&client).await - }) - .await?; + mgs_clients + .try_all_serially(&self.log, |client| async move { + me.start_update_one_mgs(&client).await + }) + .await?; // `wait_for_update_completion` uses `try_all_mgs_clients` internally, // so we don't wrap it here. me.wait_for_update_completion(&mut mgs_clients).await?; - me.try_all_mgs_clients(&mut mgs_clients, |client| async move { - me.finalize_update_via_reset_one_mgs(&client).await - }) - .await?; + mgs_clients + .try_all_serially(&self.log, |client| async move { + me.finalize_update_via_reset_one_mgs(&client).await + }) + .await?; - Ok(()) - } + // wait for any progress watchers to be dropped before we return; + // otherwise, they'll get `RecvError`s when trying to check the current + // status + self.progress.closed().await; - // Helper method to run `op` against all clients. If `op` returns - // successfully for one client, that client will be rotated to the front of - // the list (so any subsequent operations can start with the first client). - async fn try_all_mgs_clients( - &self, - mgs_clients: &mut VecDeque>, - op: F, - ) -> Result - where - F: Fn(Arc) -> Fut, - Fut: Future>, - { - let mut last_err = None; - for (i, client) in mgs_clients.iter().enumerate() { - match op(Arc::clone(client)).await { - Ok(val) => { - // Shift our list of MGS clients such that the one we just - // used is at the front for subsequent requests. - mgs_clients.rotate_left(i); - return Ok(val); - } - // If we have an error communicating with an MGS instance - // (timeout, unexpected connection close, etc.), we'll move on - // and try the next MGS client. If this was the last client, - // we'll stash the error in `last_err` and return it below the - // loop. - Err(GatewayClientError::CommunicationError(err)) => { - last_err = Some(err); - continue; - } - Err(err) => return Err(err), - } - } - - // We know we have at least one `mgs_client`, so the only way to get - // here is if all clients failed with connection errors. Return the - // error from the last MGS we tried. - Err(GatewayClientError::CommunicationError(last_err.unwrap())) + Ok(()) } async fn start_update_one_mgs( @@ -183,142 +143,48 @@ impl SpUpdater { async fn wait_for_update_completion( &self, - mgs_clients: &mut VecDeque>, + mgs_clients: &mut MgsClients, ) -> Result<(), SpUpdateError> { // How frequently do we poll MGS for the update progress? const STATUS_POLL_INTERVAL: Duration = Duration::from_secs(3); loop { - let update_status = self - .try_all_mgs_clients(mgs_clients, |client| async move { - let update_status = client - .sp_component_update_status( - self.sp_type, - self.sp_slot, - SpComponent::SP_ITSELF.const_as_str(), - ) - .await?; - - info!( - self.log, "got SP update status"; - "mgs_addr" => client.baseurl(), - "status" => ?update_status, - ); - - Ok(update_status) - }) - .await? - .into_inner(); - - // The majority of possible update statuses indicate failure; we'll - // handle the small number of non-failure cases by either - // `continue`ing or `return`ing; all other branches will give us an - // error string that we can report. - let error_message = match update_status { - // For `Preparing` and `InProgress`, we could check the progress - // information returned by these steps and try to check that - // we're still _making_ progress, but every Nexus instance needs - // to do that anyway in case we (or the MGS instance delivering - // the update) crash, so we'll omit that check here. Instead, we - // just sleep and we'll poll again shortly. - SpUpdateStatus::Preparing { id, progress } => { - if id == self.update_id { - let progress = progress.and_then(|progress| { - if progress.current > progress.total { - warn!( - self.log, "nonsense SP preparing progress"; - "current" => progress.current, - "total" => progress.total, - ); - None - } else if progress.total == 0 { - None - } else { - Some( - f64::from(progress.current) - / f64::from(progress.total), - ) - } - }); - self.progress.send_replace(Some( - UpdateProgress::Preparing { progress }, - )); - tokio::time::sleep(STATUS_POLL_INTERVAL).await; - continue; - } else { - format!("different update is now preparing ({id})") - } - } - SpUpdateStatus::InProgress { - id, - bytes_received, - total_bytes, - } => { - if id == self.update_id { - let progress = if bytes_received > total_bytes { - warn!( - self.log, "nonsense SP progress"; - "bytes_received" => bytes_received, - "total_bytes" => total_bytes, - ); - None - } else if total_bytes == 0 { - None - } else { - Some( - f64::from(bytes_received) - / f64::from(total_bytes), - ) - }; - self.progress.send_replace(Some( - UpdateProgress::InProgress { progress }, - )); - tokio::time::sleep(STATUS_POLL_INTERVAL).await; - continue; - } else { - format!("different update is now in progress ({id})") - } - } - SpUpdateStatus::Complete { id } => { - if id == self.update_id { - self.progress.send_replace(Some( - UpdateProgress::InProgress { progress: Some(1.0) }, - )); - return Ok(()); - } else { - format!("different update is now in complete ({id})") - } + let status = mgs_clients + .poll_update_status( + self.sp_type, + self.sp_slot, + SpComponent::SP_ITSELF.const_as_str(), + self.update_id, + &self.log, + ) + .await?; + + // For `Preparing` and `InProgress`, we could check the progress + // information returned by these steps and try to check that + // we're still _making_ progress, but every Nexus instance needs + // to do that anyway in case we (or the MGS instance delivering + // the update) crash, so we'll omit that check here. Instead, we + // just sleep and we'll poll again shortly. + match status { + PollUpdateStatus::Preparing { progress } => { + self.progress.send_replace(Some( + UpdateProgress::Preparing { progress }, + )); } - SpUpdateStatus::None => { - "update status lost (did the SP reset?)".to_string() + PollUpdateStatus::InProgress { progress } => { + self.progress.send_replace(Some( + UpdateProgress::InProgress { progress }, + )); } - SpUpdateStatus::Aborted { id } => { - if id == self.update_id { - "update was aborted".to_string() - } else { - format!("different update is now in complete ({id})") - } + PollUpdateStatus::Complete => { + self.progress.send_replace(Some( + UpdateProgress::InProgress { progress: Some(1.0) }, + )); + return Ok(()); } - SpUpdateStatus::Failed { code, id } => { - if id == self.update_id { - format!("update failed (error code {code})") - } else { - format!("different update failed ({id})") - } - } - SpUpdateStatus::RotError { id, message } => { - if id == self.update_id { - format!("update failed (rot error: {message})") - } else { - format!("different update failed with rot error ({id})") - } - } - }; + } - self.progress.send_replace(Some(UpdateProgress::Failed( - error_message.clone(), - ))); - return Err(SpUpdateError::FailedToComplete(error_message)); + tokio::time::sleep(STATUS_POLL_INTERVAL).await; } } diff --git a/nexus/tests/integration_tests/mod.rs b/nexus/tests/integration_tests/mod.rs index e0bb09de4f..87c5c74f0f 100644 --- a/nexus/tests/integration_tests/mod.rs +++ b/nexus/tests/integration_tests/mod.rs @@ -25,6 +25,7 @@ mod projects; mod rack; mod role_assignments; mod roles_builtin; +mod rot_updater; mod router_routes; mod saml; mod schema; diff --git a/nexus/tests/integration_tests/rot_updater.rs b/nexus/tests/integration_tests/rot_updater.rs new file mode 100644 index 0000000000..750f9571d0 --- /dev/null +++ b/nexus/tests/integration_tests/rot_updater.rs @@ -0,0 +1,627 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Tests `RotUpdater`'s delivery of updates to RoTs via MGS + +use gateway_client::types::{RotSlot, SpType}; +use gateway_messages::{SpPort, UpdateInProgressStatus, UpdateStatus}; +use gateway_test_utils::setup as mgs_setup; +use hubtools::RawHubrisArchive; +use hubtools::{CabooseBuilder, HubrisArchiveBuilder}; +use omicron_nexus::app::test_interfaces::{ + MgsClients, RotUpdater, UpdateProgress, +}; +use sp_sim::SimulatedSp; +use sp_sim::SIM_ROT_BOARD; +use std::mem; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; +use std::time::Duration; +use tokio::io::AsyncWriteExt; +use tokio::net::TcpListener; +use tokio::net::TcpStream; +use tokio::sync::mpsc; +use uuid::Uuid; + +fn make_fake_rot_image() -> Vec { + let caboose = CabooseBuilder::default() + .git_commit("fake-git-commit") + .board(SIM_ROT_BOARD) + .version("0.0.0") + .name("fake-name") + .build(); + + let mut builder = HubrisArchiveBuilder::with_fake_image(); + builder.write_caboose(caboose.as_slice()).unwrap(); + builder.build_to_vec().unwrap() +} + +#[tokio::test] +async fn test_rot_updater_updates_sled() { + // Start MGS + Sim SP. + let mgstestctx = + mgs_setup::test_setup("test_rot_updater_updates_sled", SpPort::One) + .await; + + // Configure an MGS client. + let mgs_clients = MgsClients::from_clients([gateway_client::Client::new( + &mgstestctx.client.url("/").to_string(), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), + )]); + + // Configure and instantiate an `RotUpdater`. + let sp_type = SpType::Sled; + let sp_slot = 0; + let update_id = Uuid::new_v4(); + let hubris_archive = make_fake_rot_image(); + let target_rot_slot = RotSlot::B; + + let rot_updater = RotUpdater::new( + sp_type, + sp_slot, + target_rot_slot, + update_id, + hubris_archive.clone(), + &mgstestctx.logctx.log, + ); + + // Run the update. + rot_updater.update(mgs_clients).await.expect("update failed"); + + // Ensure the RoT received the complete update. + let last_update_image = mgstestctx.simrack.gimlets[sp_slot as usize] + .last_rot_update_data() + .await + .expect("simulated RoT did not receive an update"); + + let hubris_archive = RawHubrisArchive::from_vec(hubris_archive).unwrap(); + + assert_eq!( + hubris_archive.image.data.as_slice(), + &*last_update_image, + "simulated RoT update contents (len {}) \ + do not match test generated fake image (len {})", + last_update_image.len(), + hubris_archive.image.data.len() + ); + + mgstestctx.teardown().await; +} + +#[tokio::test] +async fn test_rot_updater_updates_switch() { + // Start MGS + Sim SP. + let mgstestctx = + mgs_setup::test_setup("test_rot_updater_updates_switch", SpPort::One) + .await; + + // Configure an MGS client. + let mgs_clients = MgsClients::from_clients([gateway_client::Client::new( + &mgstestctx.client.url("/").to_string(), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), + )]); + + let sp_type = SpType::Switch; + let sp_slot = 0; + let update_id = Uuid::new_v4(); + let hubris_archive = make_fake_rot_image(); + let target_rot_slot = RotSlot::B; + + let rot_updater = RotUpdater::new( + sp_type, + sp_slot, + target_rot_slot, + update_id, + hubris_archive.clone(), + &mgstestctx.logctx.log, + ); + + rot_updater.update(mgs_clients).await.expect("update failed"); + + let last_update_image = mgstestctx.simrack.sidecars[sp_slot as usize] + .last_rot_update_data() + .await + .expect("simulated RoT did not receive an update"); + + let hubris_archive = RawHubrisArchive::from_vec(hubris_archive).unwrap(); + + assert_eq!( + hubris_archive.image.data.as_slice(), + &*last_update_image, + "simulated RoT update contents (len {}) \ + do not match test generated fake image (len {})", + last_update_image.len(), + hubris_archive.image.data.len() + ); + + mgstestctx.teardown().await; +} + +#[tokio::test] +async fn test_rot_updater_remembers_successful_mgs_instance() { + // Start MGS + Sim SP. + let mgstestctx = mgs_setup::test_setup( + "test_rot_updater_remembers_successful_mgs_instance", + SpPort::One, + ) + .await; + + // Also start a local TCP server that we will claim is an MGS instance, but + // it will close connections immediately after accepting them. This will + // allow us to count how many connections it receives, while simultaneously + // causing errors in the RotUpdater when it attempts to use this "MGS". + let (failing_mgs_task, failing_mgs_addr, failing_mgs_conn_counter) = { + let socket = TcpListener::bind("[::1]:0").await.unwrap(); + let addr = socket.local_addr().unwrap(); + let conn_count = Arc::new(AtomicUsize::new(0)); + + let task = { + let conn_count = Arc::clone(&conn_count); + tokio::spawn(async move { + loop { + let (mut stream, _peer) = socket.accept().await.unwrap(); + conn_count.fetch_add(1, Ordering::SeqCst); + stream.shutdown().await.unwrap(); + } + }) + }; + + (task, addr, conn_count) + }; + + // Order the MGS clients such that the bogus MGS that immediately closes + // connections comes first. `RotUpdater` should remember that the second MGS + // instance succeeds, and only send subsequent requests to it: we should + // only see a single attempted connection to the bogus MGS, even though + // delivering an update requires a bare minimum of three requests (start the + // update, query the status, reset the RoT) and often more (if repeated + // queries are required to wait for completion). + let mgs_clients = MgsClients::from_clients([ + gateway_client::Client::new( + &format!("http://{failing_mgs_addr}"), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient1")), + ), + gateway_client::Client::new( + &mgstestctx.client.url("/").to_string(), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), + ), + ]); + + let sp_type = SpType::Sled; + let sp_slot = 0; + let update_id = Uuid::new_v4(); + let hubris_archive = make_fake_rot_image(); + let target_rot_slot = RotSlot::B; + + let rot_updater = RotUpdater::new( + sp_type, + sp_slot, + target_rot_slot, + update_id, + hubris_archive.clone(), + &mgstestctx.logctx.log, + ); + + rot_updater.update(mgs_clients).await.expect("update failed"); + + let last_update_image = mgstestctx.simrack.gimlets[sp_slot as usize] + .last_rot_update_data() + .await + .expect("simulated RoT did not receive an update"); + + let hubris_archive = RawHubrisArchive::from_vec(hubris_archive).unwrap(); + + assert_eq!( + hubris_archive.image.data.as_slice(), + &*last_update_image, + "simulated RoT update contents (len {}) \ + do not match test generated fake image (len {})", + last_update_image.len(), + hubris_archive.image.data.len() + ); + + // Check that our bogus MGS only received a single connection attempt. + // (After RotUpdater failed to talk to this instance, it should have fallen + // back to the valid one for all further requests.) + assert_eq!( + failing_mgs_conn_counter.load(Ordering::SeqCst), + 1, + "bogus MGS instance didn't receive the expected number of connections" + ); + failing_mgs_task.abort(); + + mgstestctx.teardown().await; +} + +#[tokio::test] +async fn test_rot_updater_switches_mgs_instances_on_failure() { + enum MgsProxy { + One(TcpStream), + Two(TcpStream), + } + + // Start MGS + Sim SP. + let mgstestctx = mgs_setup::test_setup( + "test_rot_updater_switches_mgs_instances_on_failure", + SpPort::One, + ) + .await; + let mgs_bind_addr = mgstestctx.client.bind_address; + + let spawn_mgs_proxy_task = |mut stream: TcpStream| { + tokio::spawn(async move { + let mut mgs_stream = TcpStream::connect(mgs_bind_addr) + .await + .expect("failed to connect to MGS"); + tokio::io::copy_bidirectional(&mut stream, &mut mgs_stream) + .await + .expect("failed to proxy connection to MGS"); + }) + }; + + // Start two MGS proxy tasks; when each receives an incoming TCP connection, + // it forwards that `TcpStream` along the `mgs_proxy_connections` channel + // along with a tag of which proxy it is. We'll use this below to flip flop + // between MGS "instances" (really these two proxies). + let (mgs_proxy_connections_tx, mut mgs_proxy_connections_rx) = + mpsc::unbounded_channel(); + let (mgs_proxy_one_task, mgs_proxy_one_addr) = { + let socket = TcpListener::bind("[::1]:0").await.unwrap(); + let addr = socket.local_addr().unwrap(); + let mgs_proxy_connections_tx = mgs_proxy_connections_tx.clone(); + let task = tokio::spawn(async move { + loop { + let (stream, _peer) = socket.accept().await.unwrap(); + mgs_proxy_connections_tx.send(MgsProxy::One(stream)).unwrap(); + } + }); + (task, addr) + }; + let (mgs_proxy_two_task, mgs_proxy_two_addr) = { + let socket = TcpListener::bind("[::1]:0").await.unwrap(); + let addr = socket.local_addr().unwrap(); + let task = tokio::spawn(async move { + loop { + let (stream, _peer) = socket.accept().await.unwrap(); + mgs_proxy_connections_tx.send(MgsProxy::Two(stream)).unwrap(); + } + }); + (task, addr) + }; + + // Disable connection pooling so each request gets a new TCP connection. + let client = + reqwest::Client::builder().pool_max_idle_per_host(0).build().unwrap(); + + // Configure two MGS clients pointed at our two proxy tasks. + let mgs_clients = MgsClients::from_clients([ + gateway_client::Client::new_with_client( + &format!("http://{mgs_proxy_one_addr}"), + client.clone(), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient1")), + ), + gateway_client::Client::new_with_client( + &format!("http://{mgs_proxy_two_addr}"), + client, + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient2")), + ), + ]); + + let sp_type = SpType::Sled; + let sp_slot = 0; + let update_id = Uuid::new_v4(); + let hubris_archive = make_fake_rot_image(); + let target_rot_slot = RotSlot::B; + + let rot_updater = RotUpdater::new( + sp_type, + sp_slot, + target_rot_slot, + update_id, + hubris_archive.clone(), + &mgstestctx.logctx.log, + ); + + // Spawn the actual update task. + let mut update_task = tokio::spawn(rot_updater.update(mgs_clients)); + + // Loop over incoming requests. We expect this sequence: + // + // 1. Connection arrives on the first proxy + // 2. We spawn a task to service that request, and set `should_swap` + // 3. Connection arrives on the first proxy + // 4. We drop that connection, flip `expected_proxy`, and clear + // `should_swap` + // 5. Connection arrives on the second proxy + // 6. We spawn a task to service that request, and set `should_swap` + // 7. Connection arrives on the second proxy + // 8. We drop that connection, flip `expected_proxy`, and clear + // `should_swap` + // + // ... repeat until the update is complete. + let mut expected_proxy = 0; + let mut proxy_one_count = 0; + let mut proxy_two_count = 0; + let mut total_requests_handled = 0; + let mut should_swap = false; + loop { + tokio::select! { + Some(proxy_stream) = mgs_proxy_connections_rx.recv() => { + let stream = match proxy_stream { + MgsProxy::One(stream) => { + assert_eq!(expected_proxy, 0); + proxy_one_count += 1; + stream + } + MgsProxy::Two(stream) => { + assert_eq!(expected_proxy, 1); + proxy_two_count += 1; + stream + } + }; + + // Should we trigger `RotUpdater` to swap to the other MGS + // (proxy)? If so, do that by dropping this connection (which + // will cause a client failure) and note that we expect the next + // incoming request to come on the other proxy. + if should_swap { + mem::drop(stream); + expected_proxy ^= 1; + should_swap = false; + } else { + // Otherwise, handle this connection. + total_requests_handled += 1; + spawn_mgs_proxy_task(stream); + should_swap = true; + } + } + + result = &mut update_task => { + match result { + Ok(Ok(())) => { + mgs_proxy_one_task.abort(); + mgs_proxy_two_task.abort(); + break; + } + Ok(Err(err)) => panic!("update failed: {err}"), + Err(err) => panic!("update task panicked: {err}"), + } + } + } + } + + // An RoT update requires a minimum of 4 requests to MGS: post the update, + // check the status, post to mark the new target slot active, and post an + // RoT reset. There may be more requests if the update is not yet complete + // when the status is checked, but we can just check that each of our + // proxies received at least 2 incoming requests; based on our outline + // above, if we got the minimum of 4 requests, it would look like this: + // + // 1. POST update -> first proxy (success) + // 2. GET status -> first proxy (fail) + // 3. GET status retry -> second proxy (success) + // 4. POST new target slot -> second proxy (fail) + // 5. POST new target slot -> first proxy (success) + // 6. POST reset -> first proxy (fail) + // 7. POST reset -> second proxy (success) + // + // This pattern would repeat if multiple status requests were required, so + // we always expect the first proxy to see exactly one more connection + // attempt than the second (because it went first before they started + // swapping), and the two together should see a total of one less than + // double the number of successful requests required. + assert!(total_requests_handled >= 3); + assert_eq!(proxy_one_count, proxy_two_count + 1); + assert_eq!( + (proxy_one_count + proxy_two_count + 1) / 2, + total_requests_handled + ); + + let last_update_image = mgstestctx.simrack.gimlets[sp_slot as usize] + .last_rot_update_data() + .await + .expect("simulated RoT did not receive an update"); + + let hubris_archive = RawHubrisArchive::from_vec(hubris_archive).unwrap(); + + assert_eq!( + hubris_archive.image.data.as_slice(), + &*last_update_image, + "simulated RoT update contents (len {}) \ + do not match test generated fake image (len {})", + last_update_image.len(), + hubris_archive.image.data.len() + ); + + mgstestctx.teardown().await; +} + +#[tokio::test] +async fn test_rot_updater_delivers_progress() { + // Start MGS + Sim SP. + let mgstestctx = mgs_setup::test_setup( + "test_rot_updater_delivers_progress", + SpPort::One, + ) + .await; + + // Configure an MGS client. + let mgs_clients = MgsClients::from_clients([gateway_client::Client::new( + &mgstestctx.client.url("/").to_string(), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), + )]); + + let sp_type = SpType::Sled; + let sp_slot = 0; + let update_id = Uuid::new_v4(); + let hubris_archive = make_fake_rot_image(); + let target_rot_slot = RotSlot::B; + + let rot_updater = RotUpdater::new( + sp_type, + sp_slot, + target_rot_slot, + update_id, + hubris_archive.clone(), + &mgstestctx.logctx.log, + ); + + let hubris_archive = RawHubrisArchive::from_vec(hubris_archive).unwrap(); + let rot_image_len = hubris_archive.image.data.len() as u32; + + // Subscribe to update progress, and check that there is no status yet; we + // haven't started the update. + let mut progress = rot_updater.progress_watcher(); + assert_eq!(*progress.borrow_and_update(), None); + + // Install a semaphore on the requests our target SP will receive so we can + // inspect progress messages without racing. + let target_sp = &mgstestctx.simrack.gimlets[sp_slot as usize]; + let sp_accept_sema = target_sp.install_udp_accept_semaphore().await; + let mut sp_responses = target_sp.responses_sent_count().unwrap(); + + // Spawn the update on a background task so we can watch `progress` as it is + // applied. + let do_update_task = tokio::spawn(rot_updater.update(mgs_clients)); + + // Allow the SP to respond to 1 message: the "prepare update" messages that + // trigger the start of an update, then ensure we see the "started" + // progress. + sp_accept_sema.send(1).unwrap(); + progress.changed().await.unwrap(); + assert_eq!(*progress.borrow_and_update(), Some(UpdateProgress::Started)); + + // Ensure our simulated SP is in the state we expect: it's prepared for an + // update but has not yet received any data. + assert_eq!( + target_sp.current_update_status().await, + UpdateStatus::InProgress(UpdateInProgressStatus { + id: update_id.into(), + bytes_received: 0, + total_size: rot_image_len, + }) + ); + + // Record the number of responses the SP has sent; we'll use + // `sp_responses.changed()` in the loop below, and want to mark whatever + // value this watch channel currently has as seen. + sp_responses.borrow_and_update(); + + // At this point, there are two clients racing each other to talk to our + // simulated SP: + // + // 1. MGS is trying to deliver the update + // 2. `rot_updater` is trying to poll (via MGS) for update status + // + // and we want to ensure that we see any relevant progress reports from + // `rot_updater`. We'll let one MGS -> SP message through at a time (waiting + // until our SP has responded by waiting for a change to `sp_responses`) + // then check its update state: if it changed, the packet we let through was + // data from MGS; otherwise, it was a status request from `rot_updater`. + // + // This loop will continue until either: + // + // 1. We see an `UpdateStatus::InProgress` message indicating 100% delivery, + // at which point we break out of the loop + // 2. We time out waiting for the previous step (by timing out for either + // the SP to process a request or `rot_updater` to realize there's been + // progress), at which point we panic and fail this test. + let mut prev_bytes_received = 0; + let mut expect_progress_change = false; + loop { + // Allow the SP to accept and respond to a single UDP packet. + sp_accept_sema.send(1).unwrap(); + + // Wait until the SP has sent a response, with a safety rail that we + // haven't screwed up our untangle-the-race logic: if we don't see the + // SP process any new messages after several seconds, our test is + // broken, so fail. + tokio::time::timeout(Duration::from_secs(10), sp_responses.changed()) + .await + .expect("timeout waiting for SP response count to change") + .expect("sp response count sender dropped"); + + // Inspec the SP's in-memory update state; we expect only `InProgress` + // or `Complete`, and in either case we note whether we expect to see + // status changes from `rot_updater`. + match target_sp.current_update_status().await { + UpdateStatus::InProgress(rot_progress) => { + if rot_progress.bytes_received > prev_bytes_received { + prev_bytes_received = rot_progress.bytes_received; + expect_progress_change = true; + continue; + } + } + UpdateStatus::Complete(_) => { + if prev_bytes_received < rot_image_len { + prev_bytes_received = rot_image_len; + continue; + } + } + status @ (UpdateStatus::None + | UpdateStatus::Preparing(_) + | UpdateStatus::SpUpdateAuxFlashChckScan { .. } + | UpdateStatus::Aborted(_) + | UpdateStatus::Failed { .. } + | UpdateStatus::RotError { .. }) => { + panic!("unexpected status {status:?}"); + } + } + + // If we get here, the most recent packet did _not_ change the SP's + // internal update state, so it was a status request from `rot_updater`. + // If we expect the updater to see new progress, wait for that change + // here. + if expect_progress_change || prev_bytes_received == rot_image_len { + // Safety rail that we haven't screwed up our untangle-the-race + // logic: if we don't see a new progress after several seconds, our + // test is broken, so fail. + tokio::time::timeout(Duration::from_secs(10), progress.changed()) + .await + .expect("progress timeout") + .expect("progress watch sender dropped"); + let status = progress.borrow_and_update().clone().unwrap(); + expect_progress_change = false; + + // We're done if we've observed the final progress message. + if let UpdateProgress::InProgress { progress: Some(value) } = status + { + if value == 1.0 { + break; + } + } else { + panic!("unexpected progerss status {status:?}"); + } + } + } + + // The update has been fully delivered to the SP, but we don't see an + // `UpdateStatus::Complete` message until the RoT is reset. Release the SP + // semaphore since we're no longer racing to observe intermediate progress, + // and wait for the completion message. + sp_accept_sema.send(usize::MAX).unwrap(); + progress.changed().await.unwrap(); + assert_eq!(*progress.borrow_and_update(), Some(UpdateProgress::Complete)); + + // drop our progress receiver so `do_update_task` can complete + mem::drop(progress); + + do_update_task.await.expect("update task panicked").expect("update failed"); + + let last_update_image = target_sp + .last_rot_update_data() + .await + .expect("simulated RoT did not receive an update"); + + assert_eq!( + hubris_archive.image.data.as_slice(), + &*last_update_image, + "simulated RoT update contents (len {}) \ + do not match test generated fake image (len {})", + last_update_image.len(), + hubris_archive.image.data.len() + ); + + mgstestctx.teardown().await; +} diff --git a/nexus/tests/integration_tests/sp_updater.rs b/nexus/tests/integration_tests/sp_updater.rs index 351c28ad9c..89735ac3d9 100644 --- a/nexus/tests/integration_tests/sp_updater.rs +++ b/nexus/tests/integration_tests/sp_updater.rs @@ -9,7 +9,9 @@ use gateway_messages::{SpPort, UpdateInProgressStatus, UpdateStatus}; use gateway_test_utils::setup as mgs_setup; use hubtools::RawHubrisArchive; use hubtools::{CabooseBuilder, HubrisArchiveBuilder}; -use omicron_nexus::app::test_interfaces::{SpUpdater, UpdateProgress}; +use omicron_nexus::app::test_interfaces::{ + MgsClients, SpUpdater, UpdateProgress, +}; use sp_sim::SimulatedSp; use sp_sim::SIM_GIMLET_BOARD; use sp_sim::SIM_SIDECAR_BOARD; @@ -44,10 +46,10 @@ async fn test_sp_updater_updates_sled() { .await; // Configure an MGS client. - let mgs_client = Arc::new(gateway_client::Client::new( + let mgs_clients = MgsClients::from_clients([gateway_client::Client::new( &mgstestctx.client.url("/").to_string(), mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), - )); + )]); // Configure and instantiate an `SpUpdater`. let sp_type = SpType::Sled; @@ -64,11 +66,11 @@ async fn test_sp_updater_updates_sled() { ); // Run the update. - sp_updater.update([mgs_client]).await.expect("update failed"); + sp_updater.update(mgs_clients).await.expect("update failed"); // Ensure the SP received the complete update. let last_update_image = mgstestctx.simrack.gimlets[sp_slot as usize] - .last_update_data() + .last_sp_update_data() .await .expect("simulated SP did not receive an update"); @@ -94,10 +96,10 @@ async fn test_sp_updater_updates_switch() { .await; // Configure an MGS client. - let mgs_client = Arc::new(gateway_client::Client::new( + let mgs_clients = MgsClients::from_clients([gateway_client::Client::new( &mgstestctx.client.url("/").to_string(), mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), - )); + )]); let sp_type = SpType::Switch; let sp_slot = 0; @@ -112,10 +114,10 @@ async fn test_sp_updater_updates_switch() { &mgstestctx.logctx.log, ); - sp_updater.update([mgs_client]).await.expect("update failed"); + sp_updater.update(mgs_clients).await.expect("update failed"); let last_update_image = mgstestctx.simrack.sidecars[sp_slot as usize] - .last_update_data() + .last_sp_update_data() .await .expect("simulated SP did not receive an update"); @@ -172,16 +174,16 @@ async fn test_sp_updater_remembers_successful_mgs_instance() { // delivering an update requires a bare minimum of three requests (start the // update, query the status, reset the SP) and often more (if repeated // queries are required to wait for completion). - let mgs_clients = [ - Arc::new(gateway_client::Client::new( + let mgs_clients = MgsClients::from_clients([ + gateway_client::Client::new( &format!("http://{failing_mgs_addr}"), mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient1")), - )), - Arc::new(gateway_client::Client::new( + ), + gateway_client::Client::new( &mgstestctx.client.url("/").to_string(), mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), - )), - ]; + ), + ]); let sp_type = SpType::Sled; let sp_slot = 0; @@ -199,7 +201,7 @@ async fn test_sp_updater_remembers_successful_mgs_instance() { sp_updater.update(mgs_clients).await.expect("update failed"); let last_update_image = mgstestctx.simrack.gimlets[sp_slot as usize] - .last_update_data() + .last_sp_update_data() .await .expect("simulated SP did not receive an update"); @@ -288,18 +290,18 @@ async fn test_sp_updater_switches_mgs_instances_on_failure() { reqwest::Client::builder().pool_max_idle_per_host(0).build().unwrap(); // Configure two MGS clients pointed at our two proxy tasks. - let mgs_clients = [ - Arc::new(gateway_client::Client::new_with_client( + let mgs_clients = MgsClients::from_clients([ + gateway_client::Client::new_with_client( &format!("http://{mgs_proxy_one_addr}"), client.clone(), mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient1")), - )), - Arc::new(gateway_client::Client::new_with_client( + ), + gateway_client::Client::new_with_client( &format!("http://{mgs_proxy_two_addr}"), client, mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient2")), - )), - ]; + ), + ]); let sp_type = SpType::Sled; let sp_slot = 0; @@ -408,7 +410,7 @@ async fn test_sp_updater_switches_mgs_instances_on_failure() { ); let last_update_image = mgstestctx.simrack.gimlets[sp_slot as usize] - .last_update_data() + .last_sp_update_data() .await .expect("simulated SP did not receive an update"); @@ -434,10 +436,10 @@ async fn test_sp_updater_delivers_progress() { .await; // Configure an MGS client. - let mgs_client = Arc::new(gateway_client::Client::new( + let mgs_clients = MgsClients::from_clients([gateway_client::Client::new( &mgstestctx.client.url("/").to_string(), mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), - )); + )]); let sp_type = SpType::Sled; let sp_slot = 0; @@ -468,7 +470,7 @@ async fn test_sp_updater_delivers_progress() { // Spawn the update on a background task so we can watch `progress` as it is // applied. - let do_update_task = tokio::spawn(sp_updater.update([mgs_client])); + let do_update_task = tokio::spawn(sp_updater.update(mgs_clients)); // Allow the SP to respond to 2 messages: the caboose check and the "prepare // update" messages that trigger the start of an update, then ensure we see @@ -589,10 +591,13 @@ async fn test_sp_updater_delivers_progress() { progress.changed().await.unwrap(); assert_eq!(*progress.borrow_and_update(), Some(UpdateProgress::Complete)); + // drop our progress receiver so `do_update_task` can complete + mem::drop(progress); + do_update_task.await.expect("update task panicked").expect("update failed"); let last_update_image = target_sp - .last_update_data() + .last_sp_update_data() .await .expect("simulated SP did not receive an update"); diff --git a/sp-sim/src/gimlet.rs b/sp-sim/src/gimlet.rs index be8d903d3f..0c753b62b5 100644 --- a/sp-sim/src/gimlet.rs +++ b/sp-sim/src/gimlet.rs @@ -14,6 +14,7 @@ use crate::server::UdpServer; use crate::update::SimSpUpdate; use crate::Responsiveness; use crate::SimulatedSp; +use crate::SIM_ROT_BOARD; use anyhow::{anyhow, bail, Context, Result}; use async_trait::async_trait; use futures::future; @@ -107,10 +108,16 @@ impl SimulatedSp for Gimlet { self.rot.lock().unwrap().handle_deserialized(request) } - async fn last_update_data(&self) -> Option> { + async fn last_sp_update_data(&self) -> Option> { let handler = self.handler.as_ref()?; let handler = handler.lock().await; - handler.update_state.last_update_data() + handler.update_state.last_sp_update_data() + } + + async fn last_rot_update_data(&self) -> Option> { + let handler = self.handler.as_ref()?; + let handler = handler.lock().await; + handler.update_state.last_rot_update_data() } async fn current_update_status(&self) -> gateway_messages::UpdateStatus { @@ -573,7 +580,7 @@ struct Handler { power_state: PowerState, startup_options: StartupOptions, update_state: SimSpUpdate, - reset_pending: bool, + reset_pending: Option, // To simulate an SP reset, we should (after doing whatever housekeeping we // need to track the reset) intentionally _fail_ to respond to the request, @@ -615,7 +622,7 @@ impl Handler { power_state: PowerState::A2, startup_options: StartupOptions::empty(), update_state: SimSpUpdate::default(), - reset_pending: false, + reset_pending: None, should_fail_to_respond_signal: None, } } @@ -1065,8 +1072,9 @@ impl SpHandler for Handler { "port" => ?port, "component" => ?component, ); - if component == SpComponent::SP_ITSELF { - self.reset_pending = true; + if component == SpComponent::SP_ITSELF || component == SpComponent::ROT + { + self.reset_pending = Some(component); Ok(()) } else { Err(SpError::RequestUnsupportedForComponent) @@ -1086,9 +1094,9 @@ impl SpHandler for Handler { "component" => ?component, ); if component == SpComponent::SP_ITSELF { - if self.reset_pending { + if self.reset_pending == Some(SpComponent::SP_ITSELF) { self.update_state.sp_reset(); - self.reset_pending = false; + self.reset_pending = None; if let Some(signal) = self.should_fail_to_respond_signal.take() { // Instruct `server::handle_request()` to _not_ respond to @@ -1099,6 +1107,14 @@ impl SpHandler for Handler { } else { Err(SpError::ResetComponentTriggerWithoutPrepare) } + } else if component == SpComponent::ROT { + if self.reset_pending == Some(SpComponent::ROT) { + self.update_state.rot_reset(); + self.reset_pending = None; + Ok(()) + } else { + Err(SpError::ResetComponentTriggerWithoutPrepare) + } } else { Err(SpError::RequestUnsupportedForComponent) } @@ -1322,7 +1338,7 @@ impl SpHandler for Handler { static SP_VERS: &[u8] = b"0.0.1"; static ROT_GITC: &[u8] = b"eeeeeeee"; - static ROT_BORD: &[u8] = b"SimGimletRot"; + static ROT_BORD: &[u8] = SIM_ROT_BOARD.as_bytes(); static ROT_NAME: &[u8] = b"SimGimlet"; static ROT_VERS: &[u8] = b"0.0.1"; diff --git a/sp-sim/src/lib.rs b/sp-sim/src/lib.rs index 668c7c3311..0958e8a177 100644 --- a/sp-sim/src/lib.rs +++ b/sp-sim/src/lib.rs @@ -28,6 +28,8 @@ use std::net::SocketAddrV6; use tokio::sync::mpsc; use tokio::sync::watch; +pub const SIM_ROT_BOARD: &str = "SimRot"; + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Responsiveness { Responsive, @@ -58,8 +60,13 @@ pub trait SimulatedSp { /// Get the last completed update delivered to this simulated SP. /// - /// Only returns data after a simulated reset. - async fn last_update_data(&self) -> Option>; + /// Only returns data after a simulated reset of the SP. + async fn last_sp_update_data(&self) -> Option>; + + /// Get the last completed update delivered to this simulated RoT. + /// + /// Only returns data after a simulated reset of the RoT. + async fn last_rot_update_data(&self) -> Option>; /// Get the current update status, just as would be returned by an MGS /// request to get the update status. diff --git a/sp-sim/src/sidecar.rs b/sp-sim/src/sidecar.rs index c8fb4c5481..46fe8b5df7 100644 --- a/sp-sim/src/sidecar.rs +++ b/sp-sim/src/sidecar.rs @@ -16,6 +16,7 @@ use crate::server::UdpServer; use crate::update::SimSpUpdate; use crate::Responsiveness; use crate::SimulatedSp; +use crate::SIM_ROT_BOARD; use anyhow::Result; use async_trait::async_trait; use futures::future; @@ -118,10 +119,16 @@ impl SimulatedSp for Sidecar { self.rot.lock().unwrap().handle_deserialized(request) } - async fn last_update_data(&self) -> Option> { + async fn last_sp_update_data(&self) -> Option> { let handler = self.handler.as_ref()?; let handler = handler.lock().await; - handler.update_state.last_update_data() + handler.update_state.last_sp_update_data() + } + + async fn last_rot_update_data(&self) -> Option> { + let handler = self.handler.as_ref()?; + let handler = handler.lock().await; + handler.update_state.last_rot_update_data() } async fn current_update_status(&self) -> gateway_messages::UpdateStatus { @@ -380,7 +387,7 @@ struct Handler { power_state: PowerState, update_state: SimSpUpdate, - reset_pending: bool, + reset_pending: Option, // To simulate an SP reset, we should (after doing whatever housekeeping we // need to track the reset) intentionally _fail_ to respond to the request, @@ -419,7 +426,7 @@ impl Handler { rot_active_slot: RotSlotId::A, power_state: PowerState::A2, update_state: SimSpUpdate::default(), - reset_pending: false, + reset_pending: None, should_fail_to_respond_signal: None, } } @@ -846,8 +853,9 @@ impl SpHandler for Handler { "port" => ?port, "component" => ?component, ); - if component == SpComponent::SP_ITSELF { - self.reset_pending = true; + if component == SpComponent::SP_ITSELF || component == SpComponent::ROT + { + self.reset_pending = Some(component); Ok(()) } else { Err(SpError::RequestUnsupportedForComponent) @@ -867,9 +875,9 @@ impl SpHandler for Handler { "component" => ?component, ); if component == SpComponent::SP_ITSELF { - if self.reset_pending { + if self.reset_pending == Some(SpComponent::SP_ITSELF) { self.update_state.sp_reset(); - self.reset_pending = false; + self.reset_pending = None; if let Some(signal) = self.should_fail_to_respond_signal.take() { // Instruct `server::handle_request()` to _not_ respond to @@ -880,6 +888,14 @@ impl SpHandler for Handler { } else { Err(SpError::ResetComponentTriggerWithoutPrepare) } + } else if component == SpComponent::ROT { + if self.reset_pending == Some(SpComponent::ROT) { + self.update_state.rot_reset(); + self.reset_pending = None; + Ok(()) + } else { + Err(SpError::ResetComponentTriggerWithoutPrepare) + } } else { Err(SpError::RequestUnsupportedForComponent) } @@ -1101,7 +1117,7 @@ impl SpHandler for Handler { static SP_VERS: &[u8] = b"0.0.1"; static ROT_GITC: &[u8] = b"eeeeeeee"; - static ROT_BORD: &[u8] = b"SimSidecarRot"; + static ROT_BORD: &[u8] = SIM_ROT_BOARD.as_bytes(); static ROT_NAME: &[u8] = b"SimSidecar"; static ROT_VERS: &[u8] = b"0.0.1"; diff --git a/sp-sim/src/update.rs b/sp-sim/src/update.rs index e57659ca1a..9879a3ecde 100644 --- a/sp-sim/src/update.rs +++ b/sp-sim/src/update.rs @@ -13,12 +13,17 @@ use gateway_messages::UpdateInProgressStatus; pub(crate) struct SimSpUpdate { state: UpdateState, - last_update_data: Option>, + last_sp_update_data: Option>, + last_rot_update_data: Option>, } impl Default for SimSpUpdate { fn default() -> Self { - Self { state: UpdateState::NotPrepared, last_update_data: None } + Self { + state: UpdateState::NotPrepared, + last_sp_update_data: None, + last_rot_update_data: None, + } } } @@ -80,6 +85,7 @@ impl SimSpUpdate { let mut stolen = Cursor::new(Box::default()); mem::swap(data, &mut stolen); self.state = UpdateState::Completed { + component: *component, id: *id, data: stolen.into_inner(), }; @@ -112,16 +118,37 @@ impl SimSpUpdate { } pub(crate) fn sp_reset(&mut self) { - self.last_update_data = match &self.state { - UpdateState::Completed { data, .. } => Some(data.clone()), + match &self.state { + UpdateState::Completed { data, component, .. } => { + if *component == SpComponent::SP_ITSELF { + self.last_sp_update_data = Some(data.clone()); + } + } + UpdateState::NotPrepared + | UpdateState::Prepared { .. } + | UpdateState::Aborted(_) => (), + } + } + + pub(crate) fn rot_reset(&mut self) { + match &self.state { + UpdateState::Completed { data, component, .. } => { + if *component == SpComponent::ROT { + self.last_rot_update_data = Some(data.clone()); + } + } UpdateState::NotPrepared | UpdateState::Prepared { .. } - | UpdateState::Aborted(_) => None, - }; + | UpdateState::Aborted(_) => (), + } + } + + pub(crate) fn last_sp_update_data(&self) -> Option> { + self.last_sp_update_data.clone() } - pub(crate) fn last_update_data(&self) -> Option> { - self.last_update_data.clone() + pub(crate) fn last_rot_update_data(&self) -> Option> { + self.last_rot_update_data.clone() } } @@ -138,6 +165,7 @@ enum UpdateState { }, Aborted(UpdateId), Completed { + component: SpComponent, id: UpdateId, data: Box<[u8]>, }, diff --git a/wicket-common/src/update_events.rs b/wicket-common/src/update_events.rs index e0f9d4b228..fe92887646 100644 --- a/wicket-common/src/update_events.rs +++ b/wicket-common/src/update_events.rs @@ -10,6 +10,7 @@ use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; use std::fmt; +use std::sync::Arc; use thiserror::Error; use update_engine::errors::NestedEngineError; use update_engine::StepSpec; @@ -197,12 +198,13 @@ pub enum UpdateTerminalError { #[source] error: gateway_client::Error, }, - #[error("failed to upload trampoline phase 2 to MGS (was a new TUF repo uploaded?)")] - // Currently, the only way this error variant can be produced is if the - // upload task died or was replaced because a new TUF repository was - // uploaded. In the future, we may want to produce errors here if the upload - // to MGS fails too many times, for example. - TrampolinePhase2UploadFailed, + #[error("uploading trampoline phase 2 to MGS failed")] + TrampolinePhase2UploadFailed { + #[source] + error: Arc, + }, + #[error("uploading trampoline phase 2 to MGS cancelled (was a new TUF repo uploaded?)")] + TrampolinePhase2UploadCancelled, #[error("downloading installinator failed")] DownloadingInstallinatorFailed { #[source] diff --git a/wicketd/src/update_tracker.rs b/wicketd/src/update_tracker.rs index f4b5db2476..a86ea35cc3 100644 --- a/wicketd/src/update_tracker.rs +++ b/wicketd/src/update_tracker.rs @@ -41,7 +41,6 @@ use installinator_common::InstallinatorSpec; use installinator_common::M2Slot; use installinator_common::WriteOutput; use omicron_common::api::external::SemverVersion; -use omicron_common::backoff; use omicron_common::update::ArtifactHash; use slog::error; use slog::info; @@ -103,12 +102,22 @@ struct SpUpdateData { } #[derive(Debug)] -struct UploadTrampolinePhase2ToMgsStatus { - hash: ArtifactHash, - // The upload task retries forever until it succeeds, so we don't need to - // keep a "tried but failed" variant here; we just need to know the ID of - // the uploaded image once it's done. - uploaded_image_id: Option, +enum UploadTrampolinePhase2ToMgsStatus { + Running { hash: ArtifactHash }, + Done { hash: ArtifactHash, uploaded_image_id: HostPhase2RecoveryImageId }, + Failed(Arc), +} + +impl UploadTrampolinePhase2ToMgsStatus { + fn hash(&self) -> Option { + match self { + UploadTrampolinePhase2ToMgsStatus::Running { hash } + | UploadTrampolinePhase2ToMgsStatus::Done { hash, .. } => { + Some(*hash) + } + UploadTrampolinePhase2ToMgsStatus::Failed(_) => None, + } + } } #[derive(Debug)] @@ -308,9 +317,8 @@ impl UpdateTracker { ) -> UploadTrampolinePhase2ToMgs { let artifact = plan.trampoline_phase_2.clone(); let (status_tx, status_rx) = - watch::channel(UploadTrampolinePhase2ToMgsStatus { + watch::channel(UploadTrampolinePhase2ToMgsStatus::Running { hash: artifact.data.hash(), - uploaded_image_id: None, }); let task = tokio::spawn(upload_trampoline_phase_2_to_mgs( self.mgs_client.clone(), @@ -426,8 +434,8 @@ impl<'tr> SpawnUpdateDriver for RealSpawnUpdateDriver<'tr> { // this artifact? If not, cancel the old task (which // might still be trying to upload) and start a new one // with our current image. - if prev.status.borrow().hash - != plan.trampoline_phase_2.data.hash() + if prev.status.borrow().hash() + != Some(plan.trampoline_phase_2.data.hash()) { // It does _not_ match - we have a new plan with a // different trampoline image. If the old task is @@ -1147,19 +1155,38 @@ impl UpdateDriver { // We expect this loop to run just once, but iterate just in // case the image ID doesn't get populated the first time. loop { + match &*upload_trampoline_phase_2_to_mgs.borrow_and_update() + { + UploadTrampolinePhase2ToMgsStatus::Running { .. } => { + // fall through to `.changed()` below + }, + UploadTrampolinePhase2ToMgsStatus::Done { + uploaded_image_id, + .. + } => { + return StepSuccess::new( + uploaded_image_id.clone(), + ).into(); + } + UploadTrampolinePhase2ToMgsStatus::Failed(error) => { + let error = Arc::clone(error); + return Err(UpdateTerminalError::TrampolinePhase2UploadFailed { + error, + }); + } + } + + // `upload_trampoline_phase_2_to_mgs` holds onto the sending + // half of this channel until all receivers are gone, so the + // only way we can fail to receive here is if that task + // panicked (which would abort our process) or was cancelled + // (because a new TUF repo has been uploaded), in which case + // we should fail the current update. upload_trampoline_phase_2_to_mgs.changed().await.map_err( |_recv_err| { - UpdateTerminalError::TrampolinePhase2UploadFailed + UpdateTerminalError::TrampolinePhase2UploadCancelled } )?; - - if let Some(image_id) = upload_trampoline_phase_2_to_mgs - .borrow() - .uploaded_image_id - .as_ref() - { - return StepSuccess::new(image_id.clone()).into(); - } } }, ).register(); @@ -2149,59 +2176,68 @@ async fn upload_trampoline_phase_2_to_mgs( status: watch::Sender, log: Logger, ) { - let data = artifact.data; - let hash = data.hash(); - let upload_task = move || { - let mgs_client = mgs_client.clone(); - let data = data.clone(); - - async move { - let image_stream = data.reader_stream().await.map_err(|e| { - // TODO-correctness If we get an I/O error opening the file - // associated with `data`, is it actually a transient error? If - // we change this to `permanent` we'll have to do some different - // error handling below and at our call site to retry. We - // _shouldn't_ get errors from `reader_stream()` in general, so - // it's probably okay either way? - backoff::BackoffError::transient(format!("{e:#}")) - })?; - mgs_client - .recovery_host_phase2_upload(reqwest::Body::wrap_stream( - image_stream, - )) - .await - .map_err(|e| backoff::BackoffError::transient(e.to_string())) - } - }; + // We make at most 3 attempts to upload the trampoline to our local MGS, + // sleeping briefly between attempts if we fail. + const MAX_ATTEMPTS: usize = 3; + const SLEEP_BETWEEN_ATTEMPTS: Duration = Duration::from_secs(1); + + let mut attempt = 1; + let final_status = loop { + let image_stream = match artifact.data.reader_stream().await { + Ok(stream) => stream, + Err(err) => { + error!( + log, "failed to read trampoline phase 2"; + "err" => #%err, + ); + break UploadTrampolinePhase2ToMgsStatus::Failed(Arc::new( + err.context("failed to read trampoline phase 2"), + )); + } + }; - let log_failure = move |err, delay| { - warn!( - log, - "failed to upload trampoline phase 2 to MGS, will retry in {:?}", - delay; - "err" => %err, - ); + match mgs_client + .recovery_host_phase2_upload(reqwest::Body::wrap_stream( + image_stream, + )) + .await + { + Ok(response) => { + break UploadTrampolinePhase2ToMgsStatus::Done { + hash: artifact.data.hash(), + uploaded_image_id: response.into_inner(), + }; + } + Err(err) => { + if attempt < MAX_ATTEMPTS { + error!( + log, "failed to upload trampoline phase 2 to MGS; \ + will retry after {SLEEP_BETWEEN_ATTEMPTS:?}"; + "attempt" => attempt, + "err" => %DisplayErrorChain::new(&err), + ); + tokio::time::sleep(SLEEP_BETWEEN_ATTEMPTS).await; + attempt += 1; + continue; + } else { + error!( + log, "failed to upload trampoline phase 2 to MGS; \ + giving up"; + "attempt" => attempt, + "err" => %DisplayErrorChain::new(&err), + ); + break UploadTrampolinePhase2ToMgsStatus::Failed(Arc::new( + anyhow::Error::new(err) + .context("failed to upload trampoline phase 2"), + )); + } + } + } }; - // retry_policy_internal_service_aggressive() retries forever, so we can - // unwrap this call to retry_notify - let uploaded_image_id = backoff::retry_notify( - backoff::retry_policy_internal_service_aggressive(), - upload_task, - log_failure, - ) - .await - .unwrap() - .into_inner(); - - // Notify all receivers that we've uploaded the image. - _ = status.send(UploadTrampolinePhase2ToMgsStatus { - hash, - uploaded_image_id: Some(uploaded_image_id), - }); - - // Wait for all receivers to be gone before we exit, so they don't get recv - // errors unless we're cancelled. + // Send our final status, then wait for all receivers to be gone before we + // exit, so they don't get recv errors unless we're cancelled. + status.send_replace(final_status); status.closed().await; } diff --git a/wicketd/tests/integration_tests/updates.rs b/wicketd/tests/integration_tests/updates.rs index b65833a74b..52bf1d1283 100644 --- a/wicketd/tests/integration_tests/updates.rs +++ b/wicketd/tests/integration_tests/updates.rs @@ -177,15 +177,15 @@ async fn test_updates() { StepEventKind::ExecutionFailed { failed_step, .. } => { // TODO: obviously we shouldn't stop here, get past more of the // update process in this test. - assert_eq!(failed_step.info.component, UpdateComponent::Rot); + assert_eq!(failed_step.info.component, UpdateComponent::Host); } other => { panic!("unexpected terminal event kind: {other:?}"); } } - // Try starting the update again -- this should fail because we require that update state is - // cleared before starting a new one. + // Try starting the update again -- this should fail because we require that + // update state is cleared before starting a new one. { let error = wicketd_testctx .wicketd_client @@ -197,8 +197,8 @@ async fn test_updates() { ); let error_str = error.to_string(); assert!( - // Errors lose type information across the OpenAPI boundary, so sadly we have to match on - // the error string. + // Errors lose type information across the OpenAPI boundary, so + // sadly we have to match on the error string. error_str.contains("existing update data found"), "unexpected error: {error_str}" ); From 26a8db3cd8b850c3bbf221b1d1d129148a788e08 Mon Sep 17 00:00:00 2001 From: James MacMahon Date: Tue, 21 Nov 2023 15:54:15 -0500 Subject: [PATCH 35/56] Various volume management fixes (#4410) This commit bundles up a few fixes related to volume management: - `find_deleted_volume_regions` now returns the `Option` that resulted from the `left_join` in the query for freed regions. This is then consulted to see if sending a DELETE for that region is safe: if the `Option` is `Some`, then the region snapshot has not been deleted yet, and sending a region DELETE will surely result in a `must delete snapshots first` error from the corresponding Crucible agent. - Fix a few typos in nexus/src/app/sagas/common_storage.rs - Nexus now waits for the Agent's `zfs destroy` of a snapshot to take place. Otherwise if illumos doesn't immediately remove the snapshot it may be returned by a subsequent `zfs list` later. - Either `decrease_crucible_resource_count_and_soft_delete_volume` or `volume_hard_delete` should be called when unwinding a saga, calling both is not required. - In the snapshot deletion saga, use `append_parallel` for the two volume delete sub sagas: in order to _not_ orphan Crucible resources, it's important that both volumes be soft deleted, and that failing to delete one volume's Crucible resources does not cause the other to _not_ be soft deleted. - Also fix a very confusing typo when building the destination volume delete sub saga. --- nexus/db-queries/src/db/datastore/volume.rs | 3 +- nexus/src/app/sagas/common_storage.rs | 218 ++++++++++++++++---- nexus/src/app/sagas/snapshot_create.rs | 23 ++- nexus/src/app/sagas/snapshot_delete.rs | 35 +++- nexus/src/app/sagas/volume_delete.rs | 138 ++++++++++--- 5 files changed, 319 insertions(+), 98 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/volume.rs b/nexus/db-queries/src/db/datastore/volume.rs index 1e64d784f7..5f126050ae 100644 --- a/nexus/db-queries/src/db/datastore/volume.rs +++ b/nexus/db-queries/src/db/datastore/volume.rs @@ -457,7 +457,7 @@ impl DataStore { /// snapshots. pub async fn find_deleted_volume_regions( &self, - ) -> ListResultVec<(Dataset, Region, Volume)> { + ) -> ListResultVec<(Dataset, Region, Option, Volume)> { use db::schema::dataset::dsl as dataset_dsl; use db::schema::region::dsl as region_dsl; use db::schema::region_snapshot::dsl; @@ -494,6 +494,7 @@ impl DataStore { .select(( Dataset::as_select(), Region::as_select(), + Option::::as_select(), Volume::as_select(), )) .load_async(&*self.pool_connection_unauthorized().await?) diff --git a/nexus/src/app/sagas/common_storage.rs b/nexus/src/app/sagas/common_storage.rs index a57afb215d..a7350d91fd 100644 --- a/nexus/src/app/sagas/common_storage.rs +++ b/nexus/src/app/sagas/common_storage.rs @@ -73,7 +73,9 @@ pub(crate) async fn ensure_region_in_dataset( let log_create_failure = |_, delay| { warn!( log, - "Region requested, not yet created. Retrying in {:?}", delay + "Region requested, not yet created. Retrying in {:?}", + delay; + "region" => %region.id(), ); }; @@ -157,7 +159,12 @@ pub(super) async fn delete_crucible_region( .await; if let Err(e) = result { - error!(log, "delete_crucible_region: region_get saw {:?}", e); + error!( + log, + "delete_crucible_region: region_get saw {:?}", + e; + "region_id" => %region_id, + ); match e { crucible_agent_client::Error::ErrorResponse(rv) => { match rv.status() { @@ -191,7 +198,12 @@ pub(super) async fn delete_crucible_region( }) .await .map_err(|e| { - error!(log, "delete_crucible_region: region_delete saw {:?}", e); + error!( + log, + "delete_crucible_region: region_delete saw {:?}", + e; + "region_id" => %region_id, + ); match e { crucible_agent_client::Error::ErrorResponse(rv) => { match rv.status() { @@ -226,7 +238,12 @@ pub(super) async fn delete_crucible_region( }) .await .map_err(|e| { - error!(log, "delete_crucible_region: region_get saw {:?}", e); + error!( + log, + "delete_crucible_region: region_get saw {:?}", + e; + "region_id" => %region_id, + ); match e { crucible_agent_client::Error::ErrorResponse(rv) => { @@ -250,29 +267,33 @@ pub(super) async fn delete_crucible_region( })?; match region.state { - RegionState::Tombstoned => { - Err(BackoffError::transient(WaitError::Transient(anyhow!( - "region {} not deleted yet", - region_id.to_string(), - )))) - } + RegionState::Tombstoned => Err(BackoffError::transient( + WaitError::Transient(anyhow!("region not deleted yet")), + )), RegionState::Destroyed => { - info!(log, "region {} deleted", region_id.to_string(),); + info!( + log, + "region deleted"; + "region_id" => %region_id, + ); Ok(()) } - _ => { - Err(BackoffError::transient(WaitError::Transient(anyhow!( - "region {} unexpected state", - region_id.to_string(), - )))) - } + _ => Err(BackoffError::transient(WaitError::Transient( + anyhow!("region unexpected state {:?}", region.state), + ))), } }, |e: WaitError, delay| { - info!(log, "{:?}, trying again in {:?}", e, delay,); + info!( + log, + "{:?}, trying again in {:?}", + e, + delay; + "region_id" => %region_id, + ); }, ) .await @@ -338,8 +359,10 @@ pub(super) async fn delete_crucible_running_snapshot( .map_err(|e| { error!( log, - "delete_crucible_snapshot: region_delete_running_snapshot saw {:?}", - e + "delete_crucible_running_snapshot: region_delete_running_snapshot saw {:?}", + e; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, ); match e { crucible_agent_client::Error::ErrorResponse(rv) => { @@ -377,7 +400,14 @@ pub(super) async fn delete_crucible_running_snapshot( }) .await .map_err(|e| { - error!(log, "delete_crucible_snapshot: region_get_snapshots saw {:?}", e); + error!( + log, + "delete_crucible_running_snapshot: region_get_snapshots saw {:?}", + e; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, + ); + match e { crucible_agent_client::Error::ErrorResponse(rv) => { match rv.status() { @@ -409,19 +439,17 @@ pub(super) async fn delete_crucible_running_snapshot( Some(running_snapshot) => { info!( log, - "region {} snapshot {} running_snapshot is Some, state is {}", - region_id.to_string(), - snapshot_id.to_string(), - running_snapshot.state.to_string(), + "running_snapshot is Some, state is {}", + running_snapshot.state.to_string(); + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, ); match running_snapshot.state { RegionState::Tombstoned => { Err(BackoffError::transient( WaitError::Transient(anyhow!( - "region {} snapshot {} running_snapshot not deleted yet", - region_id.to_string(), - snapshot_id.to_string(), + "running_snapshot tombstoned, not deleted yet", ) ))) } @@ -429,9 +457,7 @@ pub(super) async fn delete_crucible_running_snapshot( RegionState::Destroyed => { info!( log, - "region {} snapshot {} running_snapshot deleted", - region_id.to_string(), - snapshot_id.to_string(), + "running_snapshot deleted", ); Ok(()) @@ -440,9 +466,7 @@ pub(super) async fn delete_crucible_running_snapshot( _ => { Err(BackoffError::transient( WaitError::Transient(anyhow!( - "region {} snapshot {} running_snapshot unexpected state", - region_id.to_string(), - snapshot_id.to_string(), + "running_snapshot unexpected state", ) ))) } @@ -453,9 +477,9 @@ pub(super) async fn delete_crucible_running_snapshot( // deleted? info!( log, - "region {} snapshot {} running_snapshot is None", - region_id.to_string(), - snapshot_id.to_string(), + "running_snapshot is None"; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, ); // break here - it's possible that the running snapshot @@ -469,7 +493,9 @@ pub(super) async fn delete_crucible_running_snapshot( log, "{:?}, trying again in {:?}", e, - delay, + delay; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, ); } ) @@ -494,7 +520,14 @@ pub(super) async fn delete_crucible_snapshot( region_id: Uuid, snapshot_id: Uuid, ) -> Result<(), Error> { - // delete snapshot - this endpoint is synchronous, it is not only a request + // Unlike other Crucible agent endpoints, this one is synchronous in that it + // is not only a request to the Crucible agent: `zfs destroy` is performed + // right away. However this is still a request to illumos that may not take + // effect right away. Wait until the snapshot no longer appears in the list + // of region snapshots, meaning it was not returned from `zfs list`. + + info!(log, "deleting region {region_id} snapshot {snapshot_id}"); + retry_until_known_result(log, || async { client .region_delete_snapshot( @@ -507,7 +540,10 @@ pub(super) async fn delete_crucible_snapshot( .map_err(|e| { error!( log, - "delete_crucible_snapshot: region_delete_snapshot saw {:?}", e + "delete_crucible_snapshot: region_delete_snapshot saw {:?}", + e; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, ); match e { crucible_agent_client::Error::ErrorResponse(rv) => { @@ -524,7 +560,101 @@ pub(super) async fn delete_crucible_snapshot( } })?; - Ok(()) + #[derive(Debug, thiserror::Error)] + enum WaitError { + #[error("Transient error: {0}")] + Transient(#[from] anyhow::Error), + + #[error("Permanent error: {0}")] + Permanent(#[from] Error), + } + + backoff::retry_notify( + backoff::retry_policy_internal_service_aggressive(), + || async { + let response = retry_until_known_result(log, || async { + client + .region_get_snapshots(&RegionId(region_id.to_string())) + .await + }) + .await + .map_err(|e| { + error!( + log, + "delete_crucible_snapshot: region_get_snapshots saw {:?}", + e; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, + ); + match e { + crucible_agent_client::Error::ErrorResponse(rv) => { + match rv.status() { + status if status.is_client_error() => { + BackoffError::Permanent(WaitError::Permanent( + Error::invalid_request(&rv.message), + )) + } + _ => BackoffError::Permanent(WaitError::Permanent( + Error::internal_error(&rv.message), + )), + } + } + _ => BackoffError::Permanent(WaitError::Permanent( + Error::internal_error( + "unexpected failure during `region_get_snapshots`", + ), + )), + } + })?; + + if response + .snapshots + .iter() + .any(|x| x.name == snapshot_id.to_string()) + { + info!( + log, + "snapshot still exists, waiting"; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, + ); + + Err(BackoffError::transient(WaitError::Transient(anyhow!( + "snapshot not deleted yet", + )))) + } else { + info!( + log, + "snapshot deleted"; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, + ); + + Ok(()) + } + }, + |e: WaitError, delay| { + info!( + log, + "{:?}, trying again in {:?}", + e, + delay; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, + ); + }, + ) + .await + .map_err(|e| match e { + WaitError::Transient(e) => { + // The backoff crate can be configured with a maximum elapsed time + // before giving up, which means that Transient could be returned + // here. Our current policies do **not** set this though. + Error::internal_error(&e.to_string()) + } + + WaitError::Permanent(e) => e, + }) } // Given a list of datasets and region snapshots, send DELETE calls to the @@ -645,10 +775,8 @@ pub(crate) async fn call_pantry_attach_for_disk( info!( log, - "sending attach for disk {} volume {} to endpoint {}", - disk_id, + "sending attach for disk {disk_id} volume {} to endpoint {endpoint}", disk.volume_id, - endpoint, ); let volume_construction_request: crucible_pantry_client::types::VolumeConstructionRequest = @@ -684,7 +812,7 @@ pub(crate) async fn call_pantry_detach_for_disk( ) -> Result<(), ActionError> { let endpoint = format!("http://{}", pantry_address); - info!(log, "sending detach for disk {} to endpoint {}", disk_id, endpoint,); + info!(log, "sending detach for disk {disk_id} to endpoint {endpoint}"); let client = crucible_pantry_client::Client::new(&endpoint); diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 5a686b2f3d..3b4dfc0043 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -496,17 +496,19 @@ async fn ssc_create_destination_volume_record( async fn ssc_create_destination_volume_record_undo( sagactx: NexusActionContext, ) -> Result<(), anyhow::Error> { + let log = sagactx.user_data().log(); let osagactx = sagactx.user_data(); let destination_volume_id = sagactx.lookup::("destination_volume_id")?; - osagactx - .datastore() - .decrease_crucible_resource_count_and_soft_delete_volume( - destination_volume_id, - ) - .await?; + // This saga contains what is necessary to clean up the destination volume + // resources. It's safe here to perform a volume hard delete without + // decreasing the crucible resource count because the destination volume is + // guaranteed to never have read only resources that require that + // accounting. + + info!(log, "hard deleting volume {}", destination_volume_id,); osagactx.datastore().volume_hard_delete(destination_volume_id).await?; @@ -1396,17 +1398,22 @@ async fn ssc_create_volume_record_undo( let osagactx = sagactx.user_data(); let volume_id = sagactx.lookup::("volume_id")?; + // `volume_create` will increase the resource count for read only resources + // in a volume, which there are guaranteed to be for snapshot volumes. + // decreasing crucible resources is necessary as an undo step. Do not call + // `volume_hard_delete` here: soft deleting volumes is necessary for + // `find_deleted_volume_regions` to work. + info!( log, "calling decrease crucible resource count for volume {}", volume_id ); + osagactx .datastore() .decrease_crucible_resource_count_and_soft_delete_volume(volume_id) .await?; - osagactx.datastore().volume_hard_delete(volume_id).await?; - Ok(()) } diff --git a/nexus/src/app/sagas/snapshot_delete.rs b/nexus/src/app/sagas/snapshot_delete.rs index 0589b1ea03..75fc16754d 100644 --- a/nexus/src/app/sagas/snapshot_delete.rs +++ b/nexus/src/app/sagas/snapshot_delete.rs @@ -26,6 +26,9 @@ declare_saga_actions! { SPACE_ACCOUNT -> "no_result2" { + ssd_account_space } + NOOP -> "no_result3" { + + ssd_noop + } } #[derive(Debug)] @@ -71,7 +74,7 @@ impl NexusSaga for SagaSnapshotDelete { DELETE_VOLUME_DESTINATION_PARAMS, serde_json::to_value(&volume_delete_params).map_err(|e| { super::SagaInitError::SerializeError( - String::from("volume_id"), + String::from("destination_volume_id"), e, ) })?, @@ -83,16 +86,21 @@ impl NexusSaga for SagaSnapshotDelete { )); sagas::volume_delete::create_dag(subsaga_builder) }; - builder.append(steno::Node::subsaga( - "delete_volume", - make_volume_delete_dag()?, - DELETE_VOLUME_PARAMS, - )); - builder.append(steno::Node::subsaga( - "delete_destination_volume", - make_volume_delete_dag()?, - DELETE_VOLUME_DESTINATION_PARAMS, - )); + + builder.append_parallel(vec![ + steno::Node::subsaga( + "delete_volume", + make_volume_delete_dag()?, + DELETE_VOLUME_PARAMS, + ), + steno::Node::subsaga( + "delete_destination_volume", + make_volume_delete_dag()?, + DELETE_VOLUME_DESTINATION_PARAMS, + ), + ]); + + builder.append(noop_action()); Ok(builder.build()?) } @@ -148,3 +156,8 @@ async fn ssd_account_space( .map_err(ActionError::action_failed)?; Ok(()) } + +// Sagas must end in one node, not parallel +async fn ssd_noop(_sagactx: NexusActionContext) -> Result<(), ActionError> { + Ok(()) +} diff --git a/nexus/src/app/sagas/volume_delete.rs b/nexus/src/app/sagas/volume_delete.rs index 43530e913c..22425a0b99 100644 --- a/nexus/src/app/sagas/volume_delete.rs +++ b/nexus/src/app/sagas/volume_delete.rs @@ -332,6 +332,74 @@ async fn svd_delete_crucible_snapshot_records( /// be a different volume id (i.e. for a previously deleted disk) than the one /// in this saga's params struct. /// +/// It's insufficient to rely on the struct of CrucibleResources to clean up +/// that is returned as part of svd_decrease_crucible_resource_count. Imagine a +/// disk that is composed of three regions (a subset of +/// [`VolumeConstructionRequest`] is shown here): +/// +/// { +/// "type": "volume", +/// "id": "6b353c87-afac-4ee2-b71a-6fe35fcf9e46", +/// "sub_volumes": [ +/// { +/// "type": "region", +/// "opts": { +/// "targets": [ +/// "[fd00:1122:3344:101::5]:1000", +/// "[fd00:1122:3344:102::9]:1000", +/// "[fd00:1122:3344:103::2]:1000" +/// ], +/// "read_only": false +/// } +/// } +/// ], +/// "read_only_parent": null, +/// } +/// +/// Taking a snapshot of this will produce the following volume: +/// +/// { +/// "type": "volume", +/// "id": "1ef7282e-a3fb-4222-85a8-b16d3fbfd738", <-- new UUID +/// "sub_volumes": [ +/// { +/// "type": "region", +/// "opts": { +/// "targets": [ +/// "[fd00:1122:3344:101::5]:1001", <-- port changed +/// "[fd00:1122:3344:102::9]:1001", <-- port changed +/// "[fd00:1122:3344:103::2]:1001" <-- port changed +/// ], +/// "read_only": true <-- read_only now true +/// } +/// } +/// ], +/// "read_only_parent": null, +/// } +/// +/// The snapshot targets will use the same IP but different port: snapshots are +/// initially located on the same filesystem as their region. +/// +/// The disk's volume has no read only resources, while the snapshot's volume +/// does. The disk volume's targets are all regions (backed by downstairs that +/// are read/write) while the snapshot volume's targets are all snapshots +/// (backed by volumes that are read-only). The two volumes are linked in the +/// sense that the snapshots from the second are contained *within* the regions +/// of the first, reflecting the resource nesting from ZFS. This is also +/// reflected in the REST endpoint that the Crucible agent uses: +/// +/// /crucible/0/regions/{id}/snapshots/{name} +/// +/// If the disk is then deleted, the volume delete saga will run for the first +/// volume shown here. The CrucibleResources struct returned as part of +/// [`svd_decrease_crucible_resource_count`] will contain *nothing* to clean up: +/// the regions contain snapshots that are part of other volumes and cannot be +/// deleted, and the disk's volume doesn't reference any read-only resources. +/// +/// This is expected and normal: regions are "leaked" all the time due to +/// snapshots preventing their deletion. This part of the saga detects when +/// those regions can be cleaned up. +/// /// Note: each delete of a snapshot could trigger another delete of a region, if /// that region's use has gone to zero. A snapshot delete will never trigger /// another snapshot delete. @@ -353,42 +421,46 @@ async fn svd_delete_freed_crucible_regions( }, )?; - // Send DELETE calls to the corresponding Crucible agents - delete_crucible_regions( - log, + for (dataset, region, region_snapshot, volume) in freed_datasets_regions_and_volumes - .iter() - .map(|(d, r, _)| (d.clone(), r.clone())) - .collect(), - ) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "failed to delete_crucible_regions: {:?}", - e, - )) - })?; + { + if region_snapshot.is_some() { + // We cannot delete this region yet, the snapshot has not been + // deleted. This can occur when multiple volume delete sagas run + // concurrently: one will decrement the crucible resources (but + // hasn't made the appropriate DELETE calls to remove the running + // snapshots and snapshots yet), and the other will be here trying + // to delete the region. This race results in the crucible agent + // returning "must delete snapshots first" and causing saga unwinds. + // + // Another volume delete (probably the one racing with this one!) + // will pick up this region and remove it. + continue; + } + + // Send DELETE calls to the corresponding Crucible agents + delete_crucible_regions(log, vec![(dataset.clone(), region.clone())]) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "failed to delete_crucible_regions: {:?}", + e, + )) + })?; - // Remove region DB records - osagactx - .datastore() - .regions_hard_delete( - log, - freed_datasets_regions_and_volumes - .iter() - .map(|(_, r, _)| r.id()) - .collect(), - ) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "failed to regions_hard_delete: {:?}", - e, - )) - })?; + // Remove region DB record + osagactx + .datastore() + .regions_hard_delete(log, vec![region.id()]) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "failed to regions_hard_delete: {:?}", + e, + )) + })?; - // Remove volume DB records - for (_, _, volume) in &freed_datasets_regions_and_volumes { + // Remove volume DB record osagactx.datastore().volume_hard_delete(volume.id()).await.map_err( |e| { ActionError::action_failed(format!( From 2f92c02a0c9d1585ff1b3210565b0c9f0f25e9a4 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Tue, 21 Nov 2023 13:55:00 -0800 Subject: [PATCH 36/56] Reject bad combinations of `up*.sql` migrations (#4546) Fixes #4531. If I cherry-pick these changes onto `main` just prior to #4529 (i.e., when the `11.0.0` directory contained upsql files for both 10.0.0 and 11.0.0), the `dbinit_equals_sum_of_all_up` integration test fails as desired: ``` thread 'integration_tests::schema::dbinit_equals_sum_of_all_up' panicked at nexus/tests/integration_tests/schema.rs:133:58: called `Result::unwrap()` on an `Err` value: "invalid `up*.sql` combination: /data/github/omicron/nexus/../schema/crdb/11.0.0/up01.sql, /data/github/omicron/nexus/../schema/crdb/11.0.0/up1.sql" ``` --- Cargo.lock | 2 +- nexus/db-queries/Cargo.toml | 2 +- .../src/db/datastore/db_metadata.rs | 242 +++++++++++++++++- schema/crdb/README.adoc | 11 +- 4 files changed, 237 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b324f4919b..2e0663161d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4007,6 +4007,7 @@ dependencies = [ "base64 0.21.5", "bb8", "camino", + "camino-tempfile", "chrono", "cookie", "db-macros", @@ -4059,7 +4060,6 @@ dependencies = [ "steno", "strum", "subprocess", - "tempfile", "term", "thiserror", "tokio", diff --git a/nexus/db-queries/Cargo.toml b/nexus/db-queries/Cargo.toml index b1b8f3b28f..94e3a56abf 100644 --- a/nexus/db-queries/Cargo.toml +++ b/nexus/db-queries/Cargo.toml @@ -58,6 +58,7 @@ omicron-workspace-hack.workspace = true [dev-dependencies] assert_matches.workspace = true +camino-tempfile.workspace = true expectorate.workspace = true hyper-rustls.workspace = true gateway-client.workspace = true @@ -75,5 +76,4 @@ regex.workspace = true rustls.workspace = true strum.workspace = true subprocess.workspace = true -tempfile.workspace = true term.workspace = true diff --git a/nexus/db-queries/src/db/datastore/db_metadata.rs b/nexus/db-queries/src/db/datastore/db_metadata.rs index 0ae61a7c38..39a70f7a1e 100644 --- a/nexus/db-queries/src/db/datastore/db_metadata.rs +++ b/nexus/db-queries/src/db/datastore/db_metadata.rs @@ -26,12 +26,14 @@ use std::str::FromStr; pub const EARLIEST_SUPPORTED_VERSION: &'static str = "1.0.0"; /// Describes a single file containing a schema change, as SQL. +#[derive(Debug)] pub struct SchemaUpgradeStep { pub path: Utf8PathBuf, pub sql: String, } /// Describes a sequence of files containing schema changes. +#[derive(Debug)] pub struct SchemaUpgrade { pub steps: Vec, } @@ -39,10 +41,18 @@ pub struct SchemaUpgrade { /// Reads a "version directory" and reads all SQL changes into /// a result Vec. /// -/// Any file that starts with "up" and ends with "sql" is considered -/// part of the migration, and fully read to a string. +/// Files that do not begin with "up" and end with ".sql" are ignored. The +/// collection of `up*.sql` files must fall into one of these two conventions: /// -/// These are sorted lexicographically. +/// * "up.sql" with no other files +/// * "up1.sql", "up2.sql", ..., beginning from 1, optionally with leading +/// zeroes (e.g., "up01.sql", "up02.sql", ...). There is no maximum value, but +/// there may not be any gaps (e.g., if "up2.sql" and "up4.sql" exist, so must +/// "up3.sql") and there must not be any repeats (e.g., if "up1.sql" exists, +/// "up01.sql" must not exist). +/// +/// Any violation of these two rules will result in an error. Collections of the +/// second form (`up1.sql`, ...) will be sorted numerically. pub async fn all_sql_for_version_migration>( path: P, ) -> Result { @@ -54,19 +64,83 @@ pub async fn all_sql_for_version_migration>( for entry in entries { let entry = entry.map_err(|err| format!("Invalid entry: {err}"))?; let pathbuf = entry.into_path(); - let is_up = pathbuf - .file_name() - .map(|name| name.starts_with("up")) - .unwrap_or(false); - let is_sql = matches!(pathbuf.extension(), Some("sql")); - if is_up && is_sql { - up_sqls.push(pathbuf); + + // Ensure filename ends with ".sql" + if pathbuf.extension() != Some("sql") { + continue; + } + + // Ensure filename begins with "up", and extract anything in between + // "up" and ".sql". + let Some(remaining_filename) = pathbuf + .file_stem() + .and_then(|file_stem| file_stem.strip_prefix("up")) + else { + continue; + }; + + // Ensure the remaining filename is either empty (i.e., the filename is + // exactly "up.sql") or parseable as an unsigned integer. We give + // "up.sql" the "up_number" 0 (checked in the loop below), and require + // any other number to be nonzero. + if remaining_filename.is_empty() { + up_sqls.push((0, pathbuf)); + } else { + let Ok(up_number) = remaining_filename.parse::() else { + return Err(format!( + "invalid filename (non-numeric `up*.sql`): {pathbuf}", + )); + }; + if up_number == 0 { + return Err(format!( + "invalid filename (`up*.sql` numbering must start at 1): \ + {pathbuf}", + )); + } + up_sqls.push((up_number, pathbuf)); } } up_sqls.sort(); + // Validate that we have a reasonable sequence of `up*.sql` numbers. + match up_sqls.as_slice() { + [] => return Err("no `up*.sql` files found".to_string()), + [(up_number, path)] => { + // For a single file, we allow either `up.sql` (keyed as + // up_number=0) or `up1.sql`; reject any higher number. + if *up_number > 1 { + return Err(format!( + "`up*.sql` numbering must start at 1: found first file \ + {path}" + )); + } + } + _ => { + for (i, (up_number, path)) in up_sqls.iter().enumerate() { + // We have 2 or more `up*.sql`; they should be numbered exactly + // 1..=up_sqls.len(). + if i as u64 + 1 != *up_number { + // We know we have at least two elements, so report an error + // referencing either the next item (if we're first) or the + // previous item (if we're not first). + let (path_a, path_b) = if i == 0 { + let (_, next_path) = &up_sqls[1]; + (path, next_path) + } else { + let (_, prev_path) = &up_sqls[i - 1]; + (prev_path, path) + }; + return Err(format!( + "invalid `up*.sql` combination: {path_a}, {path_b}" + )); + } + } + } + } + + // This collection of `up*.sql` files is valid; read them all, in order. let mut result = SchemaUpgrade { steps: vec![] }; - for path in up_sqls.into_iter() { + for (_, path) in up_sqls.into_iter() { let sql = tokio::fs::read_to_string(&path) .await .map_err(|e| format!("Cannot read {path}: {e}"))?; @@ -403,11 +477,150 @@ impl DataStore { #[cfg(test)] mod test { use super::*; + use camino_tempfile::Utf8TempDir; use nexus_db_model::schema::SCHEMA_VERSION; use nexus_test_utils::db as test_db; use omicron_test_utils::dev; use std::sync::Arc; + // Confirm that `all_sql_for_version_migration` rejects `up*.sql` files + // where the `*` doesn't contain a positive integer. + #[tokio::test] + async fn all_sql_for_version_migration_rejects_invalid_up_sql_names() { + for (invalid_filename, error_prefix) in [ + ("upA.sql", "invalid filename (non-numeric `up*.sql`)"), + ("up1a.sql", "invalid filename (non-numeric `up*.sql`)"), + ("upaaa1.sql", "invalid filename (non-numeric `up*.sql`)"), + ("up-3.sql", "invalid filename (non-numeric `up*.sql`)"), + ( + "up0.sql", + "invalid filename (`up*.sql` numbering must start at 1)", + ), + ( + "up00.sql", + "invalid filename (`up*.sql` numbering must start at 1)", + ), + ( + "up000.sql", + "invalid filename (`up*.sql` numbering must start at 1)", + ), + ] { + let tempdir = Utf8TempDir::new().unwrap(); + let filename = tempdir.path().join(invalid_filename); + _ = tokio::fs::File::create(&filename).await.unwrap(); + + match all_sql_for_version_migration(tempdir.path()).await { + Ok(upgrade) => { + panic!( + "unexpected success on {invalid_filename} \ + (produced {upgrade:?})" + ); + } + Err(message) => { + assert_eq!(message, format!("{error_prefix}: {filename}")); + } + } + } + } + + // Confirm that `all_sql_for_version_migration` rejects a directory with no + // appriopriately-named files. + #[tokio::test] + async fn all_sql_for_version_migration_rejects_no_up_sql_files() { + for filenames in [ + &[] as &[&str], + &["README.md"], + &["foo.sql", "bar.sql"], + &["up1sql", "up2sql"], + ] { + let tempdir = Utf8TempDir::new().unwrap(); + for filename in filenames { + _ = tokio::fs::File::create(tempdir.path().join(filename)) + .await + .unwrap(); + } + + match all_sql_for_version_migration(tempdir.path()).await { + Ok(upgrade) => { + panic!( + "unexpected success on {filenames:?} \ + (produced {upgrade:?})" + ); + } + Err(message) => { + assert_eq!(message, "no `up*.sql` files found"); + } + } + } + } + + // Confirm that `all_sql_for_version_migration` rejects collections of + // `up*.sql` files with individually-valid names but that do not pass the + // rules of the entire collection. + #[tokio::test] + async fn all_sql_for_version_migration_rejects_invalid_up_sql_collections() + { + for invalid_filenames in [ + &["up.sql", "up1.sql"] as &[&str], + &["up1.sql", "up01.sql"], + &["up1.sql", "up3.sql"], + &["up1.sql", "up2.sql", "up3.sql", "up02.sql"], + ] { + let tempdir = Utf8TempDir::new().unwrap(); + for filename in invalid_filenames { + _ = tokio::fs::File::create(tempdir.path().join(filename)) + .await + .unwrap(); + } + + match all_sql_for_version_migration(tempdir.path()).await { + Ok(upgrade) => { + panic!( + "unexpected success on {invalid_filenames:?} \ + (produced {upgrade:?})" + ); + } + Err(message) => { + assert!( + message.starts_with("invalid `up*.sql` combination: "), + "message did not start with expected prefix: \ + {message:?}" + ); + } + } + } + } + + // Confirm that `all_sql_for_version_migration` accepts legal collections of + // `up*.sql` filenames. + #[tokio::test] + async fn all_sql_for_version_migration_allows_valid_up_sql_collections() { + for filenames in [ + &["up.sql"] as &[&str], + &["up1.sql", "up2.sql"], + &[ + "up01.sql", "up02.sql", "up03.sql", "up04.sql", "up05.sql", + "up06.sql", "up07.sql", "up08.sql", "up09.sql", "up10.sql", + "up11.sql", + ], + &["up00001.sql", "up00002.sql", "up00003.sql"], + ] { + let tempdir = Utf8TempDir::new().unwrap(); + for filename in filenames { + _ = tokio::fs::File::create(tempdir.path().join(filename)) + .await + .unwrap(); + } + + match all_sql_for_version_migration(tempdir.path()).await { + Ok(_) => (), + Err(message) => { + panic!("unexpected failure on {filenames:?}: {message:?}"); + } + } + } + } + // Confirms that calling the internal "ensure_schema" function can succeed // when the database is already at that version. #[tokio::test] @@ -444,7 +657,7 @@ mod test { let conn = pool.pool().get().await.unwrap(); // Mimic the layout of "schema/crdb". - let config_dir = tempfile::TempDir::new().unwrap(); + let config_dir = Utf8TempDir::new().unwrap(); // Helper to create the version directory and "up.sql". let add_upgrade = |version: SemverVersion, sql: String| { @@ -499,8 +712,9 @@ mod test { .await; // Show that the datastores can be created concurrently. - let config = - SchemaConfig { schema_dir: config_dir.path().to_path_buf() }; + let config = SchemaConfig { + schema_dir: config_dir.path().to_path_buf().into_std_path_buf(), + }; let _ = futures::future::join_all((0..10).map(|_| { let log = log.clone(); let pool = pool.clone(); diff --git a/schema/crdb/README.adoc b/schema/crdb/README.adoc index fba36ed73b..5b9c2f6a10 100644 --- a/schema/crdb/README.adoc +++ b/schema/crdb/README.adoc @@ -14,9 +14,11 @@ We use the following conventions: appear in each file. More on this below. ** If there's only one statement required, we put it into `up.sql`. ** If more than one change is needed, any number of files starting with `up` - and ending with `.sql` may be used. These files will be sorted in - lexicographic order before being executed. Each will be executed in a - separate transaction. + and ending with `.sql` may be used. These files must follow a + numerically-increasing pattern starting with 1 (leading prefixes are allowed, + so `up1.sql`, `up2.sql`, ..., or `up01.sql`, `up02.sql`, etc.), and they will + be sorted numerically by these values. Each will be executed in a separate + transaction. ** CockroachDB documentation recommends the following: "Execute schema changes ... in an explicit transaction consisting of the single schema change statement.". Practically this means: If you want to change multiple @@ -65,7 +67,8 @@ Process: * If only one SQL statement is necessary to get from `OLD_VERSION` to `NEW_VERSION`, put that statement into `schema/crdb/NEW_VERSION/up.sql`. If multiple statements are required, put each one into a separate file, naming - these `schema/crdb/NEW_VERSION/upN.sql` for as many `N` as you need. + these `schema/crdb/NEW_VERSION/upN.sql` for as many `N` as you need, staring + with `N=1`. ** Each file should contain _either_ one schema-modifying statement _or_ some number of data-modifying statements. You can combine multiple data-modifying statements. But you should not mix schema-modifying statements and From 5c6ad0836c41678010ce54b88cf80874382ad1bd Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Tue, 21 Nov 2023 13:57:21 -0800 Subject: [PATCH 37/56] Nexus inventory: Add collection of RoT CMPA and CFPA pages (#4496) The RoT can report four different 512-byte pages (CMPA, and CFPA active/inactive/scratch). Given multiple RoT artifacts that are viable (match the right board, etc.) but are signed with different keys, these pages are required to identify which archive was signed with a key that the RoT will accept. This PR adds collection of these pages to the inventory system added in #4291. The implementation here is fairly bulky but very mechanical, and is implemented almost identically to the way we collect cabooses: there's an `rot_page_which` to identify which of the four kinds of page it is, and a table for storing the relatively small number of raw page data values. Most of the changes in this PR resulted from "find where we're doing something for cabooses, then do the analogous thing for RoT pages". There are a couple minor quibbles in the unit tests that I'll point out by leaving comments below. The RoT pages now show up when viewing a collection through omdb (note that the quite long base64 string is truncated; there's a command line flag to override the truncation and show the full string): ```console $ omdb db inventory collections show e2f84867-010d-4ac3-bbf3-bc1e865da16b > x.txt note: database URL not specified. Will search DNS. note: (override with --db-url or OMDB_DB_URL) note: using database URL postgresql://root@[::1]:43301/omicron?sslmode=disable note: database schema version matches expected (11.0.0) collection: e2f84867-010d-4ac3-bbf3-bc1e865da16b collector: e6bff1ff-24fb-49dc-a54e-c6a350cd4d6c (likely a Nexus instance) started: 2023-11-14T18:51:54.900Z done: 2023-11-14T18:51:54.942Z errors: 0 Sled SimGimlet00 part number: FAKE_SIM_GIMLET power: A2 revision: 0 MGS slot: Sled 0 (cubby 0) found at: 2023-11-14 18:51:54.924602 UTC from http://[::1]:42341 cabooses: SLOT BOARD NAME VERSION GIT_COMMIT SpSlot0 SimGimletSp SimGimlet 0.0.1 ffffffff SpSlot1 SimGimletSp SimGimlet 0.0.1 ffffffff RotSlotA SimGimletRot SimGimlet 0.0.1 eeeeeeee RotSlotB SimGimletRot SimGimlet 0.0.1 eeeeeeee RoT pages: SLOT DATA_BASE64 Cmpa Z2ltbGV0LWNtcGEAAAAAAAAAAAAAAAAA... CfpaActive Z2ltbGV0LWNmcGEtYWN0aXZlAAAAAAAA... CfpaInactive Z2ltbGV0LWNmcGEtaW5hY3RpdmUAAAAA... CfpaScratch Z2ltbGV0LWNmcGEtc2NyYXRjaAAAAAAA... RoT: active slot: slot A RoT: persistent boot preference: slot A RoT: pending persistent boot preference: - RoT: transient boot preference: - RoT: slot A SHA3-256: - RoT: slot B SHA3-256: - Sled SimGimlet01 part number: FAKE_SIM_GIMLET power: A2 revision: 0 MGS slot: Sled 1 (cubby 1) found at: 2023-11-14 18:51:54.935038 UTC from http://[::1]:42341 cabooses: SLOT BOARD NAME VERSION GIT_COMMIT SpSlot0 SimGimletSp SimGimlet 0.0.1 ffffffff SpSlot1 SimGimletSp SimGimlet 0.0.1 ffffffff RotSlotA SimGimletRot SimGimlet 0.0.1 eeeeeeee RotSlotB SimGimletRot SimGimlet 0.0.1 eeeeeeee RoT pages: SLOT DATA_BASE64 Cmpa Z2ltbGV0LWNtcGEAAAAAAAAAAAAAAAAA... CfpaActive Z2ltbGV0LWNmcGEtYWN0aXZlAAAAAAAA... CfpaInactive Z2ltbGV0LWNmcGEtaW5hY3RpdmUAAAAA... CfpaScratch Z2ltbGV0LWNmcGEtc2NyYXRjaAAAAAAA... RoT: active slot: slot A RoT: persistent boot preference: slot A RoT: pending persistent boot preference: - RoT: transient boot preference: - RoT: slot A SHA3-256: - RoT: slot B SHA3-256: - Switch SimSidecar0 part number: FAKE_SIM_SIDECAR power: A2 revision: 0 MGS slot: Switch 0 found at: 2023-11-14 18:51:54.904 UTC from http://[::1]:42341 cabooses: SLOT BOARD NAME VERSION GIT_COMMIT SpSlot0 SimSidecarSp SimSidecar 0.0.1 ffffffff SpSlot1 SimSidecarSp SimSidecar 0.0.1 ffffffff RotSlotA SimSidecarRot SimSidecar 0.0.1 eeeeeeee RotSlotB SimSidecarRot SimSidecar 0.0.1 eeeeeeee RoT pages: SLOT DATA_BASE64 Cmpa c2lkZWNhci1jbXBhAAAAAAAAAAAAAAAA... CfpaActive c2lkZWNhci1jZnBhLWFjdGl2ZQAAAAAA... CfpaInactive c2lkZWNhci1jZnBhLWluYWN0aXZlAAAA... CfpaScratch c2lkZWNhci1jZnBhLXNjcmF0Y2gAAAAA... RoT: active slot: slot A RoT: persistent boot preference: slot A RoT: pending persistent boot preference: - RoT: transient boot preference: - RoT: slot A SHA3-256: - RoT: slot B SHA3-256: - Switch SimSidecar1 part number: FAKE_SIM_SIDECAR power: A2 revision: 0 MGS slot: Switch 1 found at: 2023-11-14 18:51:54.915680 UTC from http://[::1]:42341 cabooses: SLOT BOARD NAME VERSION GIT_COMMIT SpSlot0 SimSidecarSp SimSidecar 0.0.1 ffffffff SpSlot1 SimSidecarSp SimSidecar 0.0.1 ffffffff RotSlotA SimSidecarRot SimSidecar 0.0.1 eeeeeeee RotSlotB SimSidecarRot SimSidecar 0.0.1 eeeeeeee RoT pages: SLOT DATA_BASE64 Cmpa c2lkZWNhci1jbXBhAAAAAAAAAAAAAAAA... CfpaActive c2lkZWNhci1jZnBhLWFjdGl2ZQAAAAAA... CfpaInactive c2lkZWNhci1jZnBhLWluYWN0aXZlAAAA... CfpaScratch c2lkZWNhci1jZnBhLXNjcmF0Y2gAAAAA... RoT: active slot: slot A RoT: persistent boot preference: slot A RoT: pending persistent boot preference: - RoT: transient boot preference: - RoT: slot A SHA3-256: - RoT: slot B SHA3-256: - ``` There's also a new `omdb` subcommand to report the RoT pages (which does not truncate, but if we think it should that'd be easy to change): ```console $ omdb db inventory rot-pages note: database URL not specified. Will search DNS. note: (override with --db-url or OMDB_DB_URL) note: using database URL postgresql://root@[::1]:43301/omicron?sslmode=disable note: database schema version matches expected (11.0.0) ID DATA_BASE64 099ba572-a978-4592-ae7a-452629377904 c2lkZWNhci1jZnBhLWluYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 0e9dc5b0-b190-43da-acb6-84450fdfdb94 c2lkZWNhci1jbXBhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 80923bac-fbcc-46e0-b861-9dba906c14f7 Z2ltbGV0LWNmcGEtaW5hY3RpdmUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 98cc4225-a791-4092-99c6-81e27e8d8ffa c2lkZWNhci1jZnBhLWFjdGl2ZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= a32eaf95-a20e-4570-8860-e0fb584a2ff1 c2lkZWNhci1jZnBhLXNjcmF0Y2gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= c941810a-1c6a-4dda-9c71-41a0caf62ace Z2ltbGV0LWNtcGEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= e96042d0-ae8a-435c-9118-1b71e8a9a651 Z2ltbGV0LWNmcGEtYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= fdc27064-4338-4cbe-bfe5-622b11a9afbc Z2ltbGV0LWNmcGEtc2NyYXRjaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= --- Cargo.lock | 2 + dev-tools/omdb/Cargo.toml | 1 + dev-tools/omdb/src/bin/omdb/db.rs | 129 ++++++- nexus/db-model/src/inventory.rs | 101 ++++- nexus/db-model/src/schema.rs | 26 +- .../db-queries/src/db/datastore/inventory.rs | 360 ++++++++++++++---- nexus/inventory/Cargo.toml | 1 + nexus/inventory/src/builder.rs | 249 +++++++++++- nexus/inventory/src/collector.rs | 104 +++++ nexus/inventory/src/examples.rs | 72 +++- .../tests/output/collector_basic.txt | 28 ++ .../tests/output/collector_errors.txt | 28 ++ nexus/types/src/inventory.rs | 77 ++++ schema/crdb/13.0.0/up1.sql | 4 + schema/crdb/13.0.0/up2.sql | 2 + schema/crdb/13.0.0/up3.sql | 6 + schema/crdb/13.0.0/up4.sql | 17 + schema/crdb/dbinit.sql | 39 +- sp-sim/src/gimlet.rs | 19 +- sp-sim/src/sidecar.rs | 19 +- 20 files changed, 1193 insertions(+), 91 deletions(-) create mode 100644 schema/crdb/13.0.0/up1.sql create mode 100644 schema/crdb/13.0.0/up2.sql create mode 100644 schema/crdb/13.0.0/up3.sql create mode 100644 schema/crdb/13.0.0/up4.sql diff --git a/Cargo.lock b/Cargo.lock index 2e0663161d..3c9c31a2ac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4084,6 +4084,7 @@ name = "nexus-inventory" version = "0.1.0" dependencies = [ "anyhow", + "base64 0.21.5", "chrono", "expectorate", "gateway-client", @@ -4730,6 +4731,7 @@ dependencies = [ "tabled", "textwrap 0.16.0", "tokio", + "unicode-width", "uuid", ] diff --git a/dev-tools/omdb/Cargo.toml b/dev-tools/omdb/Cargo.toml index a8834a0b29..7544374906 100644 --- a/dev-tools/omdb/Cargo.toml +++ b/dev-tools/omdb/Cargo.toml @@ -37,6 +37,7 @@ strum.workspace = true tabled.workspace = true textwrap.workspace = true tokio = { workspace = true, features = [ "full" ] } +unicode-width.workspace = true uuid.workspace = true ipnetwork.workspace = true omicron-workspace-hack.workspace = true diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 5fa19a1a27..85c55d4e61 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -51,6 +51,7 @@ use nexus_db_model::Sled; use nexus_db_model::Snapshot; use nexus_db_model::SnapshotState; use nexus_db_model::SwCaboose; +use nexus_db_model::SwRotPage; use nexus_db_model::Vmm; use nexus_db_model::Volume; use nexus_db_model::Zpool; @@ -70,10 +71,12 @@ use nexus_types::internal_api::params::DnsRecord; use nexus_types::internal_api::params::Srv; use nexus_types::inventory::CabooseWhich; use nexus_types::inventory::Collection; +use nexus_types::inventory::RotPageWhich; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Generation; use omicron_common::postgres_config::PostgresConfigWithUrl; use sled_agent_client::types::VolumeConstructionRequest; +use std::borrow::Cow; use std::cmp::Ordering; use std::collections::BTreeMap; use std::collections::BTreeSet; @@ -247,6 +250,8 @@ enum InventoryCommands { Cabooses, /// list and show details from particular collections Collections(CollectionsArgs), + /// list all root of trust pages ever found + RotPages, } #[derive(Debug, Args)] @@ -267,6 +272,9 @@ enum CollectionsCommands { struct CollectionsShowArgs { /// id of the collection id: Uuid, + /// show long strings in their entirety + #[clap(long)] + show_long_strings: bool, } #[derive(Debug, Args)] @@ -2233,9 +2241,25 @@ async fn cmd_db_inventory( command: CollectionsCommands::List, }) => cmd_db_inventory_collections_list(&conn, limit).await, InventoryCommands::Collections(CollectionsArgs { - command: CollectionsCommands::Show(CollectionsShowArgs { id }), + command: + CollectionsCommands::Show(CollectionsShowArgs { + id, + show_long_strings, + }), }) => { - cmd_db_inventory_collections_show(opctx, datastore, id, limit).await + let long_string_formatter = + LongStringFormatter { show_long_strings }; + cmd_db_inventory_collections_show( + opctx, + datastore, + id, + limit, + long_string_formatter, + ) + .await + } + InventoryCommands::RotPages => { + cmd_db_inventory_rot_pages(&conn, limit).await } } } @@ -2318,6 +2342,41 @@ async fn cmd_db_inventory_cabooses( Ok(()) } +async fn cmd_db_inventory_rot_pages( + conn: &DataStoreConnection<'_>, + limit: NonZeroU32, +) -> Result<(), anyhow::Error> { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct RotPageRow { + id: Uuid, + data_base64: String, + } + + use db::schema::sw_root_of_trust_page::dsl; + let mut rot_pages = dsl::sw_root_of_trust_page + .limit(i64::from(u32::from(limit))) + .select(SwRotPage::as_select()) + .load_async(&**conn) + .await + .context("loading rot_pages")?; + check_limit(&rot_pages, limit, || "loading rot_pages"); + rot_pages.sort(); + + let rows = rot_pages.into_iter().map(|rot_page| RotPageRow { + id: rot_page.id, + data_base64: rot_page.data_base64, + }); + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{}", table); + + Ok(()) +} + async fn cmd_db_inventory_collections_list( conn: &DataStoreConnection<'_>, limit: NonZeroU32, @@ -2400,6 +2459,7 @@ async fn cmd_db_inventory_collections_show( datastore: &DataStore, id: Uuid, limit: NonZeroU32, + long_string_formatter: LongStringFormatter, ) -> Result<(), anyhow::Error> { let (collection, incomplete) = datastore .inventory_collection_read_best_effort(opctx, id, limit) @@ -2411,7 +2471,7 @@ async fn cmd_db_inventory_collections_show( inv_collection_print(&collection).await?; let nerrors = inv_collection_print_errors(&collection).await?; - inv_collection_print_devices(&collection).await?; + inv_collection_print_devices(&collection, &long_string_formatter).await?; if nerrors > 0 { eprintln!( @@ -2467,6 +2527,7 @@ async fn inv_collection_print_errors( async fn inv_collection_print_devices( collection: &Collection, + long_string_formatter: &LongStringFormatter, ) -> Result<(), anyhow::Error> { // Assemble a list of baseboard ids, sorted first by device type (sled, // switch, power), then by slot number. This is the order in which we will @@ -2545,6 +2606,30 @@ async fn inv_collection_print_devices( .to_string(); println!("{}", textwrap::indent(&table.to_string(), " ")); + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct RotPageRow<'a> { + slot: String, + data_base64: Cow<'a, str>, + } + + println!(" RoT pages:"); + let rot_page_rows: Vec<_> = RotPageWhich::iter() + .filter_map(|which| { + collection.rot_page_for(which, baseboard_id).map(|d| (which, d)) + }) + .map(|(which, found_page)| RotPageRow { + slot: format!("{which:?}"), + data_base64: long_string_formatter + .maybe_truncate(&found_page.page.data_base64), + }) + .collect(); + let table = tabled::Table::new(rot_page_rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + println!("{}", textwrap::indent(&table.to_string(), " ")); + if let Some(rot) = rot { println!(" RoT: active slot: slot {:?}", rot.active_slot); println!( @@ -2617,3 +2702,41 @@ async fn inv_collection_print_devices( Ok(()) } + +#[derive(Debug)] +struct LongStringFormatter { + show_long_strings: bool, +} + +impl LongStringFormatter { + fn maybe_truncate<'a>(&self, s: &'a str) -> Cow<'a, str> { + use unicode_width::UnicodeWidthChar; + + // pick an arbitrary width at which we'll truncate, knowing that these + // strings are probably contained in tables with other columns + const TRUNCATE_AT_WIDTH: usize = 32; + + // quick check for short strings or if we should show long strings in + // their entirety + if self.show_long_strings || s.len() <= TRUNCATE_AT_WIDTH { + return s.into(); + } + + // longer check; we'll do the proper thing here and check the unicode + // width, and we don't really care about speed, so we can just iterate + // over chars + let mut width = 0; + for (pos, ch) in s.char_indices() { + let ch_width = UnicodeWidthChar::width(ch).unwrap_or(0); + if width + ch_width > TRUNCATE_AT_WIDTH { + let (prefix, _) = s.split_at(pos); + return format!("{prefix}...").into(); + } + width += ch_width; + } + + // if we didn't break out of the loop, `s` in its entirety is not too + // wide, so return it as-is + s.into() + } +} diff --git a/nexus/db-model/src/inventory.rs b/nexus/db-model/src/inventory.rs index 5b09f289bb..d94334787d 100644 --- a/nexus/db-model/src/inventory.rs +++ b/nexus/db-model/src/inventory.rs @@ -6,7 +6,8 @@ use crate::schema::{ hw_baseboard_id, inv_caboose, inv_collection, inv_collection_error, - inv_root_of_trust, inv_service_processor, sw_caboose, + inv_root_of_trust, inv_root_of_trust_page, inv_service_processor, + sw_caboose, sw_root_of_trust_page, }; use crate::{impl_enum_type, SqlU16, SqlU32}; use chrono::DateTime; @@ -18,7 +19,7 @@ use diesel::pg::Pg; use diesel::serialize::ToSql; use diesel::{serialize, sql_types}; use nexus_types::inventory::{ - BaseboardId, Caboose, Collection, PowerState, RotSlot, + BaseboardId, Caboose, Collection, PowerState, RotPage, RotSlot, }; use uuid::Uuid; @@ -132,6 +133,59 @@ impl From for nexus_types::inventory::CabooseWhich { } } +// See [`nexus_types::inventory::RotPageWhich`]. +impl_enum_type!( + #[derive(SqlType, Debug, QueryId)] + #[diesel(postgres_type(name = "root_of_trust_page_which"))] + pub struct RotPageWhichEnum; + + #[derive(Copy, Clone, Debug, AsExpression, FromSqlRow, PartialEq)] + #[diesel(sql_type = RotPageWhichEnum)] + pub enum RotPageWhich; + + // Enum values + Cmpa => b"cmpa" + CfpaActive => b"cfpa_active" + CfpaInactive => b"cfpa_inactive" + CfpaScratch => b"cfpa_scratch" +); + +impl From for RotPageWhich { + fn from(c: nexus_types::inventory::RotPageWhich) -> Self { + use nexus_types::inventory as nexus_inventory; + match c { + nexus_inventory::RotPageWhich::Cmpa => RotPageWhich::Cmpa, + nexus_inventory::RotPageWhich::CfpaActive => { + RotPageWhich::CfpaActive + } + nexus_inventory::RotPageWhich::CfpaInactive => { + RotPageWhich::CfpaInactive + } + nexus_inventory::RotPageWhich::CfpaScratch => { + RotPageWhich::CfpaScratch + } + } + } +} + +impl From for nexus_types::inventory::RotPageWhich { + fn from(row: RotPageWhich) -> Self { + use nexus_types::inventory as nexus_inventory; + match row { + RotPageWhich::Cmpa => nexus_inventory::RotPageWhich::Cmpa, + RotPageWhich::CfpaActive => { + nexus_inventory::RotPageWhich::CfpaActive + } + RotPageWhich::CfpaInactive => { + nexus_inventory::RotPageWhich::CfpaInactive + } + RotPageWhich::CfpaScratch => { + nexus_inventory::RotPageWhich::CfpaScratch + } + } + } +} + // See [`nexus_types::inventory::SpType`]. impl_enum_type!( #[derive(SqlType, Debug, QueryId)] @@ -271,6 +325,36 @@ impl From for Caboose { } } +/// See [`nexus_types::inventory::RotPage`]. +#[derive( + Queryable, + Insertable, + Clone, + Debug, + Selectable, + Eq, + PartialEq, + Ord, + PartialOrd, +)] +#[diesel(table_name = sw_root_of_trust_page)] +pub struct SwRotPage { + pub id: Uuid, + pub data_base64: String, +} + +impl From for SwRotPage { + fn from(p: RotPage) -> Self { + Self { id: Uuid::new_v4(), data_base64: p.data_base64 } + } +} + +impl From for RotPage { + fn from(row: SwRotPage) -> Self { + Self { data_base64: row.data_base64 } + } +} + /// See [`nexus_types::inventory::Collection`]. #[derive(Queryable, Insertable, Clone, Debug, Selectable)] #[diesel(table_name = inv_collection_error)] @@ -441,3 +525,16 @@ pub struct InvCaboose { pub which: CabooseWhich, pub sw_caboose_id: Uuid, } + +/// See [`nexus_types::inventory::RotPageFound`]. +#[derive(Queryable, Clone, Debug, Selectable)] +#[diesel(table_name = inv_root_of_trust_page)] +pub struct InvRotPage { + pub inv_collection_id: Uuid, + pub hw_baseboard_id: Uuid, + pub time_collected: DateTime, + pub source: String, + + pub which: RotPageWhich, + pub sw_root_of_trust_page_id: Uuid, +} diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 960b53873a..7f7dd57027 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1187,6 +1187,13 @@ table! { } } +table! { + sw_root_of_trust_page (id) { + id -> Uuid, + data_base64 -> Text, + } +} + table! { inv_collection (id) { id -> Uuid, @@ -1248,6 +1255,18 @@ table! { } } +table! { + inv_root_of_trust_page (inv_collection_id, hw_baseboard_id, which) { + inv_collection_id -> Uuid, + hw_baseboard_id -> Uuid, + time_collected -> Timestamptz, + source -> Text, + + which -> crate::RotPageWhichEnum, + sw_root_of_trust_page_id -> Uuid, + } +} + table! { bootstore_keys (key, generation) { key -> Text, @@ -1270,7 +1289,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(12, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(13, 0, 0); allow_tables_to_appear_in_same_query!( system_update, @@ -1285,6 +1304,11 @@ joinable!(ip_pool_range -> ip_pool (ip_pool_id)); allow_tables_to_appear_in_same_query!(inv_collection, inv_collection_error); joinable!(inv_collection_error -> inv_collection (inv_collection_id)); allow_tables_to_appear_in_same_query!(hw_baseboard_id, sw_caboose, inv_caboose); +allow_tables_to_appear_in_same_query!( + hw_baseboard_id, + sw_root_of_trust_page, + inv_root_of_trust_page +); allow_tables_to_appear_in_same_query!( dataset, diff --git a/nexus/db-queries/src/db/datastore/inventory.rs b/nexus/db-queries/src/db/datastore/inventory.rs index b743d28ee8..28a438629e 100644 --- a/nexus/db-queries/src/db/datastore/inventory.rs +++ b/nexus/db-queries/src/db/datastore/inventory.rs @@ -35,10 +35,13 @@ use nexus_db_model::InvCaboose; use nexus_db_model::InvCollection; use nexus_db_model::InvCollectionError; use nexus_db_model::InvRootOfTrust; +use nexus_db_model::InvRotPage; use nexus_db_model::InvServiceProcessor; +use nexus_db_model::RotPageWhichEnum; use nexus_db_model::SpType; use nexus_db_model::SpTypeEnum; use nexus_db_model::SwCaboose; +use nexus_db_model::SwRotPage; use nexus_types::inventory::Collection; use omicron_common::api::external::Error; use omicron_common::api::external::InternalContext; @@ -76,6 +79,11 @@ impl DataStore { .iter() .map(|s| SwCaboose::from((**s).clone())) .collect::>(); + let rot_pages = collection + .rot_pages + .iter() + .map(|p| SwRotPage::from((**p).clone())) + .collect::>(); let error_values = collection .errors .iter() @@ -140,6 +148,19 @@ impl DataStore { .await?; } + // Insert records (and generate ids) for each distinct RoT page that + // we've found. Like baseboards, these might already be present and + // rows in this table are not scoped to a particular collection + // because they only map (immutable) identifiers to UUIDs. + { + use db::schema::sw_root_of_trust_page::dsl; + let _ = diesel::insert_into(dsl::sw_root_of_trust_page) + .values(rot_pages) + .on_conflict_do_nothing() + .execute_async(&conn) + .await?; + } + // Insert a record describing the collection itself. { use db::schema::inv_collection::dsl; @@ -468,6 +489,85 @@ impl DataStore { } } + // Insert rows for the root of trust pages that we found. This is + // almost identical to inserting cabooses above, and just like for + // cabooses, we do this using INSERT INTO ... SELECT. We have these + // three tables: + // + // - `hw_baseboard` with an "id" primary key and lookup columns + // "part_number" and "serial_number" + // - `sw_root_of_trust_page` with an "id" primary key and lookup + // column "data_base64" + // - `inv_root_of_trust_page` with foreign keys "hw_baseboard_id", + // "sw_root_of_trust_page_id", and various other columns + // + // and generate an INSERT INTO query that is structurally the same + // as the caboose query described above. + for (which, tree) in &collection.rot_pages_found { + use db::schema::hw_baseboard_id::dsl as dsl_baseboard_id; + use db::schema::inv_root_of_trust_page::dsl as dsl_inv_rot_page; + use db::schema::sw_root_of_trust_page::dsl as dsl_sw_rot_page; + let db_which = nexus_db_model::RotPageWhich::from(*which); + for (baseboard_id, found_rot_page) in tree { + let selection = db::schema::hw_baseboard_id::table + .inner_join( + db::schema::sw_root_of_trust_page::table.on( + dsl_baseboard_id::part_number + .eq(baseboard_id.part_number.clone()) + .and( + dsl_baseboard_id::serial_number.eq( + baseboard_id.serial_number.clone(), + ), + ) + .and(dsl_sw_rot_page::data_base64.eq( + found_rot_page.page.data_base64.clone(), + )), + ), + ) + .select(( + dsl_baseboard_id::id, + dsl_sw_rot_page::id, + collection_id.into_sql::(), + found_rot_page + .time_collected + .into_sql::(), + found_rot_page + .source + .clone() + .into_sql::(), + db_which.into_sql::(), + )); + + let _ = diesel::insert_into( + db::schema::inv_root_of_trust_page::table, + ) + .values(selection) + .into_columns(( + dsl_inv_rot_page::hw_baseboard_id, + dsl_inv_rot_page::sw_root_of_trust_page_id, + dsl_inv_rot_page::inv_collection_id, + dsl_inv_rot_page::time_collected, + dsl_inv_rot_page::source, + dsl_inv_rot_page::which, + )) + .execute_async(&conn) + .await?; + + // See the comments above. The same applies here. If you + // update the statement below because the schema for + // `inv_root_of_trust_page` has changed, be sure to update + // the code above, too! + let ( + _hw_baseboard_id, + _sw_root_of_trust_page_id, + _inv_collection_id, + _time_collected, + _source, + _which, + ) = dsl_inv_rot_page::inv_root_of_trust_page::all_columns(); + } + } + // Finally, insert the list of errors. { use db::schema::inv_collection_error::dsl as errors_dsl; @@ -720,7 +820,7 @@ impl DataStore { // start removing it and we'd also need to make sure we didn't leak a // collection if we crash while deleting it. let conn = self.pool_connection_authorized(opctx).await?; - let (ncollections, nsps, nrots, ncabooses, nerrors) = conn + let (ncollections, nsps, nrots, ncabooses, nrot_pages, nerrors) = conn .transaction_async(|conn| async move { // Remove the record describing the collection itself. let ncollections = { @@ -729,7 +829,7 @@ impl DataStore { dsl::inv_collection.filter(dsl::id.eq(collection_id)), ) .execute_async(&conn) - .await?; + .await? }; // Remove rows for service processors. @@ -740,7 +840,7 @@ impl DataStore { .filter(dsl::inv_collection_id.eq(collection_id)), ) .execute_async(&conn) - .await?; + .await? }; // Remove rows for roots of trust. @@ -751,7 +851,7 @@ impl DataStore { .filter(dsl::inv_collection_id.eq(collection_id)), ) .execute_async(&conn) - .await?; + .await? }; // Remove rows for cabooses found. @@ -762,7 +862,18 @@ impl DataStore { .filter(dsl::inv_collection_id.eq(collection_id)), ) .execute_async(&conn) - .await?; + .await? + }; + + // Remove rows for root of trust pages found. + let nrot_pages = { + use db::schema::inv_root_of_trust_page::dsl; + diesel::delete( + dsl::inv_root_of_trust_page + .filter(dsl::inv_collection_id.eq(collection_id)), + ) + .execute_async(&conn) + .await? }; // Remove rows for errors encountered. @@ -773,10 +884,10 @@ impl DataStore { .filter(dsl::inv_collection_id.eq(collection_id)), ) .execute_async(&conn) - .await?; + .await? }; - Ok((ncollections, nsps, nrots, ncabooses, nerrors)) + Ok((ncollections, nsps, nrots, ncabooses, nrot_pages, nerrors)) }) .await .map_err(|error| match error { @@ -792,6 +903,7 @@ impl DataStore { "nsps" => nsps, "nrots" => nrots, "ncabooses" => ncabooses, + "nrot_pages" => nrot_pages, "nerrors" => nerrors, ); @@ -1068,6 +1180,88 @@ impl DataStore { ); } + // Fetch records of RoT pages found. + let inv_rot_page_rows = { + use db::schema::inv_root_of_trust_page::dsl; + dsl::inv_root_of_trust_page + .filter(dsl::inv_collection_id.eq(id)) + .limit(sql_limit) + .select(InvRotPage::as_select()) + .load_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })? + }; + limit_reached = limit_reached || inv_rot_page_rows.len() == usize_limit; + + // Collect the unique sw_rot_page_ids for those pages. + let sw_rot_page_ids: BTreeSet<_> = inv_rot_page_rows + .iter() + .map(|inv_rot_page| inv_rot_page.sw_root_of_trust_page_id) + .collect(); + // Fetch the corresponing records. + let rot_pages_by_id: BTreeMap<_, _> = { + use db::schema::sw_root_of_trust_page::dsl; + dsl::sw_root_of_trust_page + .filter(dsl::id.eq_any(sw_rot_page_ids)) + .limit(sql_limit) + .select(SwRotPage::as_select()) + .load_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? + .into_iter() + .map(|sw_rot_page_row| { + ( + sw_rot_page_row.id, + Arc::new(nexus_types::inventory::RotPage::from( + sw_rot_page_row, + )), + ) + }) + .collect() + }; + limit_reached = limit_reached || rot_pages_by_id.len() == usize_limit; + + // Assemble the lists of rot pages found. + let mut rot_pages_found = BTreeMap::new(); + for p in inv_rot_page_rows { + let by_baseboard = rot_pages_found + .entry(nexus_types::inventory::RotPageWhich::from(p.which)) + .or_insert_with(BTreeMap::new); + let Some(bb) = baseboards_by_id.get(&p.hw_baseboard_id) else { + let msg = format!( + "unknown baseboard found in inv_root_of_trust_page: {}", + p.hw_baseboard_id + ); + return Err(Error::internal_error(&msg)); + }; + let Some(sw_rot_page) = + rot_pages_by_id.get(&p.sw_root_of_trust_page_id) + else { + let msg = format!( + "unknown rot page found in inv_root_of_trust_page: {}", + p.sw_root_of_trust_page_id + ); + return Err(Error::internal_error(&msg)); + }; + + let previous = by_baseboard.insert( + bb.clone(), + nexus_types::inventory::RotPageFound { + time_collected: p.time_collected, + source: p.source, + page: sw_rot_page.clone(), + }, + ); + bail_unless!( + previous.is_none(), + "duplicate rot page found: {:?} baseboard {:?}", + p.which, + p.hw_baseboard_id + ); + } + Ok(( Collection { id, @@ -1077,9 +1271,11 @@ impl DataStore { collector, baseboards: baseboards_by_id.values().cloned().collect(), cabooses: cabooses_by_id.values().cloned().collect(), + rot_pages: rot_pages_by_id.values().cloned().collect(), sps, rots, cabooses_found, + rot_pages_found, }, limit_reached, )) @@ -1141,6 +1337,7 @@ mod test { use nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL; use nexus_types::inventory::CabooseWhich; use nexus_types::inventory::Collection; + use nexus_types::inventory::RotPageWhich; use omicron_test_utils::dev; use std::num::NonZeroU32; use uuid::Uuid; @@ -1156,28 +1353,44 @@ mod test { .await?) } - async fn count_baseboards_cabooses( - conn: &DataStoreConnection<'_>, - ) -> anyhow::Result<(usize, usize)> { - conn.transaction_async(|conn| async move { - conn.batch_execute_async(ALLOW_FULL_TABLE_SCAN_SQL).await.unwrap(); - let bb_count = schema::hw_baseboard_id::dsl::hw_baseboard_id - .select(diesel::dsl::count_star()) - .first_async::(&conn) - .await - .context("failed to count baseboards")?; - let caboose_count = schema::sw_caboose::dsl::sw_caboose - .select(diesel::dsl::count_star()) - .first_async::(&conn) - .await - .context("failed to count cabooses")?; - let bb_count_usize = usize::try_from(bb_count) - .context("failed to convert baseboard count to usize")?; - let caboose_count_usize = usize::try_from(caboose_count) - .context("failed to convert caboose count to usize")?; - Ok((bb_count_usize, caboose_count_usize)) - }) - .await + struct CollectionCounts { + baseboards: usize, + cabooses: usize, + rot_pages: usize, + } + + impl CollectionCounts { + async fn new(conn: &DataStoreConnection<'_>) -> anyhow::Result { + conn.transaction_async(|conn| async move { + conn.batch_execute_async(ALLOW_FULL_TABLE_SCAN_SQL) + .await + .unwrap(); + let bb_count = schema::hw_baseboard_id::dsl::hw_baseboard_id + .select(diesel::dsl::count_star()) + .first_async::(&conn) + .await + .context("failed to count baseboards")?; + let caboose_count = schema::sw_caboose::dsl::sw_caboose + .select(diesel::dsl::count_star()) + .first_async::(&conn) + .await + .context("failed to count cabooses")?; + let rot_page_count = + schema::sw_root_of_trust_page::dsl::sw_root_of_trust_page + .select(diesel::dsl::count_star()) + .first_async::(&conn) + .await + .context("failed to count rot pages")?; + let baseboards = usize::try_from(bb_count) + .context("failed to convert baseboard count to usize")?; + let cabooses = usize::try_from(caboose_count) + .context("failed to convert caboose count to usize")?; + let rot_pages = usize::try_from(rot_page_count) + .context("failed to convert rot page count to usize")?; + Ok(Self { baseboards, cabooses, rot_pages }) + }) + .await + } } /// Tests inserting several collections, reading them back, and making sure @@ -1205,14 +1418,15 @@ mod test { .expect("failed to read collection back"); assert_eq!(collection1, collection_read); - // There ought to be no baseboards or cabooses in the databases from - // that collection. + // There ought to be no baseboards, cabooses, or RoT pages in the + // databases from that collection. assert_eq!(collection1.baseboards.len(), 0); assert_eq!(collection1.cabooses.len(), 0); - let (nbaseboards, ncabooses) = - count_baseboards_cabooses(&conn).await.unwrap(); - assert_eq!(collection1.baseboards.len(), nbaseboards); - assert_eq!(collection1.cabooses.len(), ncabooses); + assert_eq!(collection1.rot_pages.len(), 0); + let coll_counts = CollectionCounts::new(&conn).await.unwrap(); + assert_eq!(collection1.baseboards.len(), coll_counts.baseboards); + assert_eq!(collection1.cabooses.len(), coll_counts.cabooses); + assert_eq!(collection1.rot_pages.len(), coll_counts.rot_pages); // Now insert a more complex collection, write it to the database, and // read it back. @@ -1227,14 +1441,16 @@ mod test { .await .expect("failed to read collection back"); assert_eq!(collection2, collection_read); - // Verify that we have exactly the set of cabooses and baseboards in the - // databases that came from this first non-empty collection. + // Verify that we have exactly the set of cabooses, baseboards, and RoT + // pages in the databases that came from this first non-empty + // collection. assert_ne!(collection2.baseboards.len(), collection1.baseboards.len()); assert_ne!(collection2.cabooses.len(), collection1.cabooses.len()); - let (nbaseboards, ncabooses) = - count_baseboards_cabooses(&conn).await.unwrap(); - assert_eq!(collection2.baseboards.len(), nbaseboards); - assert_eq!(collection2.cabooses.len(), ncabooses); + assert_ne!(collection2.rot_pages.len(), collection1.rot_pages.len()); + let coll_counts = CollectionCounts::new(&conn).await.unwrap(); + assert_eq!(collection2.baseboards.len(), coll_counts.baseboards); + assert_eq!(collection2.cabooses.len(), coll_counts.cabooses); + assert_eq!(collection2.rot_pages.len(), coll_counts.rot_pages); // Check that we get an error on the limit being reached for // `read_all_or_nothing` @@ -1249,9 +1465,9 @@ mod test { .is_err()); // Now insert an equivalent collection again. Verify the distinct - // baseboards and cabooses again. This is important: the insertion - // process should re-use the baseboards and cabooses from the previous - // collection. + // baseboards, cabooses, and RoT pages again. This is important: the + // insertion process should re-use the baseboards, cabooses, and RoT + // pages from the previous collection. let Representative { builder, .. } = representative(); let collection3 = builder.build(); datastore @@ -1263,18 +1479,19 @@ mod test { .await .expect("failed to read collection back"); assert_eq!(collection3, collection_read); - // Verify that we have the same number of cabooses and baseboards, since - // those didn't change. + // Verify that we have the same number of cabooses, baseboards, and RoT + // pages, since those didn't change. assert_eq!(collection3.baseboards.len(), collection2.baseboards.len()); assert_eq!(collection3.cabooses.len(), collection2.cabooses.len()); - let (nbaseboards, ncabooses) = - count_baseboards_cabooses(&conn).await.unwrap(); - assert_eq!(collection3.baseboards.len(), nbaseboards); - assert_eq!(collection3.cabooses.len(), ncabooses); + assert_eq!(collection3.rot_pages.len(), collection2.rot_pages.len()); + let coll_counts = CollectionCounts::new(&conn).await.unwrap(); + assert_eq!(collection3.baseboards.len(), coll_counts.baseboards); + assert_eq!(collection3.cabooses.len(), coll_counts.cabooses); + assert_eq!(collection3.rot_pages.len(), coll_counts.rot_pages); // Now insert a collection that's almost equivalent, but has an extra - // couple of baseboards and caboose. Verify that we re-use the existing - // ones, but still insert the new ones. + // couple of baseboards, one caboose, and one RoT page. Verify that we + // re-use the existing ones, but still insert the new ones. let Representative { mut builder, .. } = representative(); builder.found_sp_state( "test suite", @@ -1298,6 +1515,14 @@ mod test { nexus_inventory::examples::caboose("dummy"), ) .unwrap(); + builder + .found_rot_page( + &bb, + RotPageWhich::Cmpa, + "dummy", + nexus_inventory::examples::rot_page("dummy"), + ) + .unwrap(); let collection4 = builder.build(); datastore .inventory_insert_collection(&opctx, &collection4) @@ -1313,14 +1538,15 @@ mod test { collection4.baseboards.len(), collection3.baseboards.len() + 2 ); + assert_eq!(collection4.cabooses.len(), collection3.cabooses.len() + 1); assert_eq!( - collection4.cabooses.len(), - collection3.baseboards.len() + 1 + collection4.rot_pages.len(), + collection3.rot_pages.len() + 1 ); - let (nbaseboards, ncabooses) = - count_baseboards_cabooses(&conn).await.unwrap(); - assert_eq!(collection4.baseboards.len(), nbaseboards); - assert_eq!(collection4.cabooses.len(), ncabooses); + let coll_counts = CollectionCounts::new(&conn).await.unwrap(); + assert_eq!(collection4.baseboards.len(), coll_counts.baseboards); + assert_eq!(collection4.cabooses.len(), coll_counts.cabooses); + assert_eq!(collection4.rot_pages.len(), coll_counts.rot_pages); // This time, go back to our earlier collection. This logically removes // some baseboards. They should still be present in the database, but @@ -1338,12 +1564,14 @@ mod test { assert_eq!(collection5, collection_read); assert_eq!(collection5.baseboards.len(), collection3.baseboards.len()); assert_eq!(collection5.cabooses.len(), collection3.cabooses.len()); + assert_eq!(collection5.rot_pages.len(), collection3.rot_pages.len()); assert_ne!(collection5.baseboards.len(), collection4.baseboards.len()); assert_ne!(collection5.cabooses.len(), collection4.cabooses.len()); - let (nbaseboards, ncabooses) = - count_baseboards_cabooses(&conn).await.unwrap(); - assert_eq!(collection4.baseboards.len(), nbaseboards); - assert_eq!(collection4.cabooses.len(), ncabooses); + assert_ne!(collection5.rot_pages.len(), collection4.rot_pages.len()); + let coll_counts = CollectionCounts::new(&conn).await.unwrap(); + assert_eq!(collection4.baseboards.len(), coll_counts.baseboards); + assert_eq!(collection4.cabooses.len(), coll_counts.cabooses); + assert_eq!(collection4.rot_pages.len(), coll_counts.rot_pages); // Try to insert the same collection again and make sure it fails. let error = datastore @@ -1536,10 +1764,10 @@ mod test { .expect("failed to check that tables were empty"); // We currently keep the baseboard ids and sw_cabooses around. - let (nbaseboards, ncabooses) = - count_baseboards_cabooses(&conn).await.unwrap(); - assert_ne!(nbaseboards, 0); - assert_ne!(ncabooses, 0); + let coll_counts = CollectionCounts::new(&conn).await.unwrap(); + assert_ne!(coll_counts.baseboards, 0); + assert_ne!(coll_counts.cabooses, 0); + assert_ne!(coll_counts.rot_pages, 0); // Clean up. db.cleanup().await.unwrap(); diff --git a/nexus/inventory/Cargo.toml b/nexus/inventory/Cargo.toml index 202aff49b2..6bb63cf9f7 100644 --- a/nexus/inventory/Cargo.toml +++ b/nexus/inventory/Cargo.toml @@ -6,6 +6,7 @@ license = "MPL-2.0" [dependencies] anyhow.workspace = true +base64.workspace = true chrono.workspace = true gateway-client.workspace = true gateway-messages.workspace = true diff --git a/nexus/inventory/src/builder.rs b/nexus/inventory/src/builder.rs index ad008ee4df..188a48b553 100644 --- a/nexus/inventory/src/builder.rs +++ b/nexus/inventory/src/builder.rs @@ -19,6 +19,9 @@ use nexus_types::inventory::Caboose; use nexus_types::inventory::CabooseFound; use nexus_types::inventory::CabooseWhich; use nexus_types::inventory::Collection; +use nexus_types::inventory::RotPage; +use nexus_types::inventory::RotPageFound; +use nexus_types::inventory::RotPageWhich; use nexus_types::inventory::RotState; use nexus_types::inventory::ServiceProcessor; use std::collections::BTreeMap; @@ -39,10 +42,13 @@ pub struct CollectionBuilder { collector: String, baseboards: BTreeSet>, cabooses: BTreeSet>, + rot_pages: BTreeSet>, sps: BTreeMap, ServiceProcessor>, rots: BTreeMap, RotState>, cabooses_found: BTreeMap, CabooseFound>>, + rot_pages_found: + BTreeMap, RotPageFound>>, } impl CollectionBuilder { @@ -58,9 +64,11 @@ impl CollectionBuilder { collector: collector.to_owned(), baseboards: BTreeSet::new(), cabooses: BTreeSet::new(), + rot_pages: BTreeSet::new(), sps: BTreeMap::new(), rots: BTreeMap::new(), cabooses_found: BTreeMap::new(), + rot_pages_found: BTreeMap::new(), } } @@ -78,9 +86,11 @@ impl CollectionBuilder { collector: self.collector, baseboards: self.baseboards, cabooses: self.cabooses, + rot_pages: self.rot_pages, sps: self.sps, rots: self.rots, cabooses_found: self.cabooses_found, + rot_pages_found: self.rot_pages_found, } } @@ -251,6 +261,75 @@ impl CollectionBuilder { } } + /// Returns true if we already found the root of trust page for `which` for + /// baseboard `baseboard` + /// + /// This is used to avoid requesting it multiple times (from multiple MGS + /// instances). + pub fn found_rot_page_already( + &self, + baseboard: &BaseboardId, + which: RotPageWhich, + ) -> bool { + self.rot_pages_found + .get(&which) + .map(|map| map.contains_key(baseboard)) + .unwrap_or(false) + } + + /// Record the given root of trust page found for the given baseboard + /// + /// The baseboard must previously have been reported using + /// `found_sp_state()`. + /// + /// `source` is an arbitrary string for debugging that describes the MGS + /// that reported this data (generally a URL string). + pub fn found_rot_page( + &mut self, + baseboard: &BaseboardId, + which: RotPageWhich, + source: &str, + page: RotPage, + ) -> Result<(), anyhow::Error> { + // Normalize the page contents: i.e., if we've seen this exact page + // before, use the same record from before. Otherwise, make a new one. + let sw_rot_page = Self::normalize_item(&mut self.rot_pages, page); + let (baseboard, _) = + self.sps.get_key_value(baseboard).ok_or_else(|| { + anyhow!( + "reporting rot page for unknown baseboard: {:?} ({:?})", + baseboard, + sw_rot_page + ) + })?; + let by_id = self.rot_pages_found.entry(which).or_default(); + if let Some(previous) = by_id.insert( + baseboard.clone(), + RotPageFound { + time_collected: now(), + source: source.to_owned(), + page: sw_rot_page.clone(), + }, + ) { + let error = if *previous.page == *sw_rot_page { + anyhow!("reported multiple times (same value)",) + } else { + anyhow!( + "reported rot page multiple times (previously {:?}, \ + now {:?})", + previous, + sw_rot_page + ) + }; + Err(error.context(format!( + "baseboard {:?} rot page {:?}", + baseboard, which + ))) + } else { + Ok(()) + } + } + /// Helper function for normalizing items /// /// If `item` (or its equivalent) is not already in `items`, insert it. @@ -301,6 +380,8 @@ mod test { use crate::examples::representative; use crate::examples::sp_state; use crate::examples::Representative; + use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; + use base64::Engine; use gateway_client::types::PowerState; use gateway_client::types::RotSlot; use gateway_client::types::RotState; @@ -310,6 +391,8 @@ mod test { use nexus_types::inventory::BaseboardId; use nexus_types::inventory::Caboose; use nexus_types::inventory::CabooseWhich; + use nexus_types::inventory::RotPage; + use nexus_types::inventory::RotPageWhich; // Verify the contents of an empty collection. #[test] @@ -326,9 +409,11 @@ mod test { assert_eq!(collection.collector, "test_empty"); assert!(collection.baseboards.is_empty()); assert!(collection.cabooses.is_empty()); + assert!(collection.rot_pages.is_empty()); assert!(collection.sps.is_empty()); assert!(collection.rots.is_empty()); assert!(collection.cabooses_found.is_empty()); + assert!(collection.rot_pages_found.is_empty()); } // Simple test of a single, fairly typical collection that contains just @@ -428,6 +513,33 @@ mod test { } assert!(collection.cabooses.contains(&common_caboose)); + // Verify the common RoT page data. + let common_rot_page_baseboards = [&sled1_bb, &sled3_bb, &switch]; + let common_rot_page = nexus_types::inventory::RotPage { + // base64("1") == "MQ==" + data_base64: "MQ==".to_string(), + }; + for bb in &common_rot_page_baseboards { + let _ = collection.sps.get(*bb).unwrap(); + let p0 = collection.rot_page_for(RotPageWhich::Cmpa, bb).unwrap(); + let p1 = + collection.rot_page_for(RotPageWhich::CfpaActive, bb).unwrap(); + let p2 = collection + .rot_page_for(RotPageWhich::CfpaInactive, bb) + .unwrap(); + let p3 = + collection.rot_page_for(RotPageWhich::CfpaScratch, bb).unwrap(); + assert_eq!(p0.source, "test suite"); + assert_eq!(*p0.page, common_rot_page); + assert_eq!(p1.source, "test suite"); + assert_eq!(*p1.page, common_rot_page); + assert_eq!(p2.source, "test suite"); + assert_eq!(*p2.page, common_rot_page); + assert_eq!(p3.source, "test suite"); + assert_eq!(*p3.page, common_rot_page); + } + assert!(collection.rot_pages.contains(&common_rot_page)); + // Verify the specific, different data for the healthy SPs and RoTs that // we reported. // sled1 @@ -474,6 +586,20 @@ mod test { ); assert_eq!(rot.transient_boot_preference, Some(RotSlot::B)); + // sled 2 did not have any RoT pages reported + assert!(collection + .rot_page_for(RotPageWhich::Cmpa, &sled2_bb) + .is_none()); + assert!(collection + .rot_page_for(RotPageWhich::CfpaActive, &sled2_bb) + .is_none()); + assert!(collection + .rot_page_for(RotPageWhich::CfpaInactive, &sled2_bb) + .is_none()); + assert!(collection + .rot_page_for(RotPageWhich::CfpaScratch, &sled2_bb) + .is_none()); + // switch let sp = collection.sps.get(&switch).unwrap(); assert_eq!(sp.source, "fake MGS 2"); @@ -544,6 +670,38 @@ mod test { assert!(collection.cabooses.contains(c)); assert_eq!(c.board, "board_psc_rot_b"); + // The PSC also has four different RoT pages! + let p = + &collection.rot_page_for(RotPageWhich::Cmpa, &psc).unwrap().page; + assert_eq!( + BASE64_STANDARD.decode(&p.data_base64).unwrap(), + b"psc cmpa" + ); + let p = &collection + .rot_page_for(RotPageWhich::CfpaActive, &psc) + .unwrap() + .page; + assert_eq!( + BASE64_STANDARD.decode(&p.data_base64).unwrap(), + b"psc cfpa active" + ); + let p = &collection + .rot_page_for(RotPageWhich::CfpaInactive, &psc) + .unwrap() + .page; + assert_eq!( + BASE64_STANDARD.decode(&p.data_base64).unwrap(), + b"psc cfpa inactive" + ); + let p = &collection + .rot_page_for(RotPageWhich::CfpaScratch, &psc) + .unwrap() + .page; + assert_eq!( + BASE64_STANDARD.decode(&p.data_base64).unwrap(), + b"psc cfpa scratch" + ); + // Verify the reported SP state for sled3, which did not have a healthy // RoT, nor any cabooses. let sp = collection.sps.get(&sled3_bb).unwrap(); @@ -565,8 +723,9 @@ mod test { assert_eq!(collection.sps.len(), collection.rots.len() + 1); // There should be five cabooses: the four used for the PSC (see above), - // plus the common one. + // plus the common one; same for RoT pages. assert_eq!(collection.cabooses.len(), 5); + assert_eq!(collection.rot_pages.len(), 5); } // Exercises all the failure cases that shouldn't happen in real systems. @@ -704,7 +863,7 @@ mod test { assert_eq!(error.to_string(), error2.to_string(),); // report the same caboose twice with the same contents - let _ = builder + builder .found_caboose( &sled1_bb, CabooseWhich::SpSlot0, @@ -747,12 +906,74 @@ mod test { )); assert!(message.contains(", now ")); + // report RoT page for an unknown baseboard + let rot_page1 = RotPage { data_base64: "page1".to_string() }; + let rot_page2 = RotPage { data_base64: "page2".to_string() }; + assert!(!builder + .found_rot_page_already(&bogus_baseboard, RotPageWhich::Cmpa)); + let error = builder + .found_rot_page( + &bogus_baseboard, + RotPageWhich::Cmpa, + "dummy", + rot_page1.clone(), + ) + .unwrap_err(); + assert_eq!( + error.to_string(), + "reporting rot page for unknown baseboard: \ + BaseboardId { part_number: \"p1\", serial_number: \"bogus\" } \ + (RotPage { data_base64: \"page1\" })" + ); + assert!(!builder + .found_rot_page_already(&bogus_baseboard, RotPageWhich::Cmpa)); + + // report the same rot page twice with the same contents + builder + .found_rot_page( + &sled1_bb, + RotPageWhich::Cmpa, + "dummy", + rot_page1.clone(), + ) + .unwrap(); + let error = builder + .found_rot_page( + &sled1_bb, + RotPageWhich::Cmpa, + "dummy", + rot_page1.clone(), + ) + .unwrap_err(); + assert_eq!( + format!("{:#}", error), + "baseboard BaseboardId { part_number: \"model1\", \ + serial_number: \"s1\" } rot page Cmpa: reported multiple \ + times (same value)" + ); + // report the same rot page again with different contents + let error = builder + .found_rot_page( + &sled1_bb, + RotPageWhich::Cmpa, + "dummy", + rot_page2.clone(), + ) + .unwrap_err(); + let message = format!("{:#}", error); + println!("found error: {}", message); + assert!(message.contains( + "rot page Cmpa: reported rot page multiple times (previously" + )); + assert!(message.contains(", now RotPage { data_base64: \"page2\" }")); + // We should still get a valid collection. let collection = builder.build(); println!("{:#?}", collection); assert_eq!(collection.collector, "test_problems"); - // We should still have the one sled and its SP slot0 caboose. + // We should still have the one sled, its SP slot0 caboose, and its Cmpa + // RoT page. assert!(collection.baseboards.contains(&sled1_bb)); let _ = collection.sps.get(&sled1_bb).unwrap(); let caboose = @@ -769,6 +990,28 @@ mod test { assert!(collection .caboose_for(CabooseWhich::RotSlotB, &sled1_bb) .is_none()); + let rot_page = + collection.rot_page_for(RotPageWhich::Cmpa, &sled1_bb).unwrap(); + assert!(collection.rot_pages.contains(&rot_page.page)); + + // TODO-correctness Is this test correct? We reported the same RoT page + // with different data (rot_page1, then rot_page2). The second + // `found_rot_page` returned an error, but we overwrote the original + // data and did not record the error in `collection.errors`. Should we + // either have kept the original data or returned Ok while returning an + // error? It seems a little strange we returned Err but accepted the new + // data. + assert_eq!(rot_page.page.data_base64, rot_page2.data_base64); + + assert!(collection + .rot_page_for(RotPageWhich::CfpaActive, &sled1_bb) + .is_none()); + assert!(collection + .rot_page_for(RotPageWhich::CfpaInactive, &sled1_bb) + .is_none()); + assert!(collection + .rot_page_for(RotPageWhich::CfpaScratch, &sled1_bb) + .is_none()); // We should see an error. assert_eq!( diff --git a/nexus/inventory/src/collector.rs b/nexus/inventory/src/collector.rs index 1676f44083..7c6570436a 100644 --- a/nexus/inventory/src/collector.rs +++ b/nexus/inventory/src/collector.rs @@ -6,8 +6,13 @@ use crate::builder::CollectionBuilder; use anyhow::Context; +use gateway_client::types::GetCfpaParams; +use gateway_client::types::RotCfpaSlot; +use gateway_messages::SpComponent; use nexus_types::inventory::CabooseWhich; use nexus_types::inventory::Collection; +use nexus_types::inventory::RotPage; +use nexus_types::inventory::RotPageWhich; use slog::{debug, error}; use std::sync::Arc; use strum::IntoEnumIterator; @@ -195,6 +200,84 @@ impl Collector { ); } } + + // For each kind of RoT page that we care about, if it hasn't been + // fetched already, fetch it and record it. Generally, we'd only + // get here for the first MGS client. Assuming that one succeeds, + // the other(s) will skip this loop. + for which in RotPageWhich::iter() { + if self.in_progress.found_rot_page_already(&baseboard_id, which) + { + continue; + } + + let component = SpComponent::ROT.const_as_str(); + + let result = match which { + RotPageWhich::Cmpa => client + .sp_rot_cmpa_get(sp.type_, sp.slot, component) + .await + .map(|response| response.into_inner().base64_data), + RotPageWhich::CfpaActive => client + .sp_rot_cfpa_get( + sp.type_, + sp.slot, + component, + &GetCfpaParams { slot: RotCfpaSlot::Active }, + ) + .await + .map(|response| response.into_inner().base64_data), + RotPageWhich::CfpaInactive => client + .sp_rot_cfpa_get( + sp.type_, + sp.slot, + component, + &GetCfpaParams { slot: RotCfpaSlot::Inactive }, + ) + .await + .map(|response| response.into_inner().base64_data), + RotPageWhich::CfpaScratch => client + .sp_rot_cfpa_get( + sp.type_, + sp.slot, + component, + &GetCfpaParams { slot: RotCfpaSlot::Scratch }, + ) + .await + .map(|response| response.into_inner().base64_data), + } + .with_context(|| { + format!( + "MGS {:?}: SP {:?}: rot page {:?}", + client.baseurl(), + sp, + which + ) + }); + + let page = match result { + Err(error) => { + self.in_progress.found_error(error); + continue; + } + Ok(data_base64) => RotPage { data_base64 }, + }; + if let Err(error) = self.in_progress.found_rot_page( + &baseboard_id, + which, + client.baseurl(), + page, + ) { + error!( + &self.log, + "error reporting rot page: {:?} {:?} {:?}: {:#}", + baseboard_id, + which, + client.baseurl(), + error + ); + } + } } } } @@ -236,6 +319,11 @@ mod test { .unwrap(); } + write!(&mut s, "\nrot pages:\n").unwrap(); + for p in &collection.rot_pages { + write!(&mut s, " data_base64 {:?}\n", p.data_base64).unwrap(); + } + // All we really need to check here is that we're reporting the right // SPs, RoTs, and cabooses. The actual SP data, RoT data, and caboose // data comes straight from MGS. And proper handling of that data is @@ -272,6 +360,22 @@ mod test { } } + write!(&mut s, "\nrot pages found:\n").unwrap(); + for (kind, bb_to_found) in &collection.rot_pages_found { + for (bb, found) in bb_to_found { + write!( + &mut s, + " {:?} baseboard part {:?} serial {:?}: \ + data_base64 {:?}\n", + kind, + bb.part_number, + bb.serial_number, + found.page.data_base64 + ) + .unwrap(); + } + } + write!(&mut s, "\nerrors:\n").unwrap(); for e in &collection.errors { // Some error strings have OS error numbers in them. We want to diff --git a/nexus/inventory/src/examples.rs b/nexus/inventory/src/examples.rs index 52aca397bb..0ce3712942 100644 --- a/nexus/inventory/src/examples.rs +++ b/nexus/inventory/src/examples.rs @@ -13,6 +13,8 @@ use gateway_client::types::SpState; use gateway_client::types::SpType; use nexus_types::inventory::BaseboardId; use nexus_types::inventory::CabooseWhich; +use nexus_types::inventory::RotPage; +use nexus_types::inventory::RotPageWhich; use std::sync::Arc; use strum::IntoEnumIterator; @@ -164,7 +166,7 @@ pub fn representative() -> Representative { for bb in &common_caboose_baseboards { for which in CabooseWhich::iter() { assert!(!builder.found_caboose_already(bb, which)); - let _ = builder + builder .found_caboose(bb, which, "test suite", caboose("1")) .unwrap(); assert!(builder.found_caboose_already(bb, which)); @@ -174,7 +176,7 @@ pub fn representative() -> Representative { // For the PSC, use different cabooses for both slots of both the SP and // RoT, just to exercise that we correctly keep track of different // cabooses. - let _ = builder + builder .found_caboose( &psc_bb, CabooseWhich::SpSlot0, @@ -182,7 +184,7 @@ pub fn representative() -> Representative { caboose("psc_sp_0"), ) .unwrap(); - let _ = builder + builder .found_caboose( &psc_bb, CabooseWhich::SpSlot1, @@ -190,7 +192,7 @@ pub fn representative() -> Representative { caboose("psc_sp_1"), ) .unwrap(); - let _ = builder + builder .found_caboose( &psc_bb, CabooseWhich::RotSlotA, @@ -198,7 +200,7 @@ pub fn representative() -> Representative { caboose("psc_rot_a"), ) .unwrap(); - let _ = builder + builder .found_caboose( &psc_bb, CabooseWhich::RotSlotB, @@ -209,6 +211,59 @@ pub fn representative() -> Representative { // We deliberately provide no cabooses for sled3. + // Report some RoT pages. + + // We'll use the same RoT pages for most of these components, although + // that's not possible in a real system. We deliberately construct a new + // value each time to make sure the builder correctly normalizes it. + let common_rot_page_baseboards = [&sled1_bb, &sled3_bb, &switch1_bb]; + for bb in common_rot_page_baseboards { + for which in RotPageWhich::iter() { + assert!(!builder.found_rot_page_already(bb, which)); + builder + .found_rot_page(bb, which, "test suite", rot_page("1")) + .unwrap(); + assert!(builder.found_rot_page_already(bb, which)); + } + } + + // For the PSC, use different RoT page data for each kind of page, just to + // exercise that we correctly keep track of different data values. + builder + .found_rot_page( + &psc_bb, + RotPageWhich::Cmpa, + "test suite", + rot_page("psc cmpa"), + ) + .unwrap(); + builder + .found_rot_page( + &psc_bb, + RotPageWhich::CfpaActive, + "test suite", + rot_page("psc cfpa active"), + ) + .unwrap(); + builder + .found_rot_page( + &psc_bb, + RotPageWhich::CfpaInactive, + "test suite", + rot_page("psc cfpa inactive"), + ) + .unwrap(); + builder + .found_rot_page( + &psc_bb, + RotPageWhich::CfpaScratch, + "test suite", + rot_page("psc cfpa scratch"), + ) + .unwrap(); + + // We deliberately provide no RoT pages for sled2. + Representative { builder, sleds: [sled1_bb, sled2_bb, sled3_bb], @@ -252,3 +307,10 @@ pub fn caboose(unique: &str) -> SpComponentCaboose { version: format!("version_{}", unique), } } + +pub fn rot_page(unique: &str) -> RotPage { + use base64::Engine; + RotPage { + data_base64: base64::engine::general_purpose::STANDARD.encode(unique), + } +} diff --git a/nexus/inventory/tests/output/collector_basic.txt b/nexus/inventory/tests/output/collector_basic.txt index 76b929bfba..b9894ff184 100644 --- a/nexus/inventory/tests/output/collector_basic.txt +++ b/nexus/inventory/tests/output/collector_basic.txt @@ -10,6 +10,16 @@ cabooses: board "SimRot" name "SimSidecar" version "0.0.1" git_commit "eeeeeeee" board "SimSidecarSp" name "SimSidecar" version "0.0.1" git_commit "ffffffff" +rot pages: + data_base64 "Z2ltbGV0LWNmcGEtYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "Z2ltbGV0LWNmcGEtaW5hY3RpdmUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "Z2ltbGV0LWNmcGEtc2NyYXRjaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "Z2ltbGV0LWNtcGEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "c2lkZWNhci1jZnBhLWFjdGl2ZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "c2lkZWNhci1jZnBhLWluYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "c2lkZWNhci1jZnBhLXNjcmF0Y2gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "c2lkZWNhci1jbXBhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + SPs: baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00" baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01" @@ -40,4 +50,22 @@ cabooses found: RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimRot" RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimRot" +rot pages found: + Cmpa baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": data_base64 "Z2ltbGV0LWNtcGEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + Cmpa baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNtcGEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + Cmpa baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": data_base64 "c2lkZWNhci1jbXBhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + Cmpa baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": data_base64 "c2lkZWNhci1jbXBhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaActive baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": data_base64 "Z2ltbGV0LWNmcGEtYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaActive baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNmcGEtYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaActive baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": data_base64 "c2lkZWNhci1jZnBhLWFjdGl2ZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaActive baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": data_base64 "c2lkZWNhci1jZnBhLWFjdGl2ZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaInactive baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": data_base64 "Z2ltbGV0LWNmcGEtaW5hY3RpdmUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaInactive baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNmcGEtaW5hY3RpdmUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaInactive baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": data_base64 "c2lkZWNhci1jZnBhLWluYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaInactive baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": data_base64 "c2lkZWNhci1jZnBhLWluYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaScratch baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": data_base64 "Z2ltbGV0LWNmcGEtc2NyYXRjaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaScratch baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNmcGEtc2NyYXRjaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaScratch baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": data_base64 "c2lkZWNhci1jZnBhLXNjcmF0Y2gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaScratch baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": data_base64 "c2lkZWNhci1jZnBhLXNjcmF0Y2gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + errors: diff --git a/nexus/inventory/tests/output/collector_errors.txt b/nexus/inventory/tests/output/collector_errors.txt index c61d2e7c29..a50e24ca30 100644 --- a/nexus/inventory/tests/output/collector_errors.txt +++ b/nexus/inventory/tests/output/collector_errors.txt @@ -10,6 +10,16 @@ cabooses: board "SimRot" name "SimSidecar" version "0.0.1" git_commit "eeeeeeee" board "SimSidecarSp" name "SimSidecar" version "0.0.1" git_commit "ffffffff" +rot pages: + data_base64 "Z2ltbGV0LWNmcGEtYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "Z2ltbGV0LWNmcGEtaW5hY3RpdmUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "Z2ltbGV0LWNmcGEtc2NyYXRjaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "Z2ltbGV0LWNtcGEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "c2lkZWNhci1jZnBhLWFjdGl2ZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "c2lkZWNhci1jZnBhLWluYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "c2lkZWNhci1jZnBhLXNjcmF0Y2gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "c2lkZWNhci1jbXBhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + SPs: baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00" baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01" @@ -40,5 +50,23 @@ cabooses found: RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimRot" RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimRot" +rot pages found: + Cmpa baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": data_base64 "Z2ltbGV0LWNtcGEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + Cmpa baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNtcGEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + Cmpa baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": data_base64 "c2lkZWNhci1jbXBhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + Cmpa baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": data_base64 "c2lkZWNhci1jbXBhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaActive baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": data_base64 "Z2ltbGV0LWNmcGEtYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaActive baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNmcGEtYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaActive baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": data_base64 "c2lkZWNhci1jZnBhLWFjdGl2ZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaActive baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": data_base64 "c2lkZWNhci1jZnBhLWFjdGl2ZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaInactive baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": data_base64 "Z2ltbGV0LWNmcGEtaW5hY3RpdmUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaInactive baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNmcGEtaW5hY3RpdmUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaInactive baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": data_base64 "c2lkZWNhci1jZnBhLWluYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaInactive baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": data_base64 "c2lkZWNhci1jZnBhLWluYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaScratch baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": data_base64 "Z2ltbGV0LWNmcGEtc2NyYXRjaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaScratch baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNmcGEtc2NyYXRjaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaScratch baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": data_base64 "c2lkZWNhci1jZnBhLXNjcmF0Y2gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaScratch baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": data_base64 "c2lkZWNhci1jZnBhLXNjcmF0Y2gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + errors: error: MGS "http://[100::1]:12345": listing ignition targets: Communication Error <> diff --git a/nexus/types/src/inventory.rs b/nexus/types/src/inventory.rs index 112eec3a65..19c323d894 100644 --- a/nexus/types/src/inventory.rs +++ b/nexus/types/src/inventory.rs @@ -56,6 +56,11 @@ pub struct Collection { /// /// In practice, these will be inserted into the `sw_caboose` table. pub cabooses: BTreeSet>, + /// unique root of trust page contents that were found in this collection + /// + /// In practice, these will be inserted into the `sw_root_of_trust_page` + /// table. + pub rot_pages: BTreeSet>, /// all service processors, keyed by baseboard id /// @@ -73,6 +78,14 @@ pub struct Collection { /// In practice, these will be inserted into the `inv_caboose` table. pub cabooses_found: BTreeMap, CabooseFound>>, + /// all root of trust page contents found, keyed first by the kind of page + /// (`RotPageWhich`), then the baseboard id of the sled where they were + /// found + /// + /// In practice, these will be inserted into the `inv_root_of_trust_page` + /// table. + pub rot_pages_found: + BTreeMap, RotPageFound>>, } impl Collection { @@ -85,6 +98,16 @@ impl Collection { .get(&which) .and_then(|by_bb| by_bb.get(baseboard_id)) } + + pub fn rot_page_for( + &self, + which: RotPageWhich, + baseboard_id: &BaseboardId, + ) -> Option<&RotPageFound> { + self.rot_pages_found + .get(&which) + .and_then(|by_bb| by_bb.get(baseboard_id)) + } } /// A unique baseboard id found during a collection @@ -177,3 +200,57 @@ pub enum CabooseWhich { RotSlotA, RotSlotB, } + +/// Root of trust page contents found during a collection +/// +/// These are normalized in the database. Each distinct `RotPage` is assigned a +/// uuid and shared across many possible collections that reference it. +#[derive(Clone, Debug, Ord, Eq, PartialOrd, PartialEq)] +pub struct RotPage { + pub data_base64: String, +} + +/// Indicates that a particular `RotPage` was found (at a particular time from a +/// particular source, but these are only for debugging) +#[derive(Clone, Debug, Ord, Eq, PartialOrd, PartialEq)] +pub struct RotPageFound { + pub time_collected: DateTime, + pub source: String, + pub page: Arc, +} + +/// Describes which root of trust page this is +#[derive(Clone, Copy, Debug, EnumIter, PartialEq, Eq, PartialOrd, Ord)] +pub enum RotPageWhich { + Cmpa, + CfpaActive, + CfpaInactive, + CfpaScratch, +} + +/// Trait to convert between the two MGS root of trust page types and a tuple of +/// `([RotPageWhich], [RotPage])`. +/// +/// This cannot use the standard `From` trait due to orphan rules: we do not own +/// the `gateway_client` type, and tuples are always considered foreign. +pub trait IntoRotPage { + fn into_rot_page(self) -> (RotPageWhich, RotPage); +} + +impl IntoRotPage for gateway_client::types::RotCmpa { + fn into_rot_page(self) -> (RotPageWhich, RotPage) { + (RotPageWhich::Cmpa, RotPage { data_base64: self.base64_data }) + } +} + +impl IntoRotPage for gateway_client::types::RotCfpa { + fn into_rot_page(self) -> (RotPageWhich, RotPage) { + use gateway_client::types::RotCfpaSlot; + let which = match self.slot { + RotCfpaSlot::Active => RotPageWhich::CfpaActive, + RotCfpaSlot::Inactive => RotPageWhich::CfpaInactive, + RotCfpaSlot::Scratch => RotPageWhich::CfpaScratch, + }; + (which, RotPage { data_base64: self.base64_data }) + } +} diff --git a/schema/crdb/13.0.0/up1.sql b/schema/crdb/13.0.0/up1.sql new file mode 100644 index 0000000000..c6ca3bcb13 --- /dev/null +++ b/schema/crdb/13.0.0/up1.sql @@ -0,0 +1,4 @@ +CREATE TABLE IF NOT EXISTS omicron.public.sw_root_of_trust_page ( + id UUID PRIMARY KEY, + data_base64 TEXT NOT NULL +); diff --git a/schema/crdb/13.0.0/up2.sql b/schema/crdb/13.0.0/up2.sql new file mode 100644 index 0000000000..5d8e775038 --- /dev/null +++ b/schema/crdb/13.0.0/up2.sql @@ -0,0 +1,2 @@ +CREATE UNIQUE INDEX IF NOT EXISTS root_of_trust_page_properties + on omicron.public.sw_root_of_trust_page (data_base64); diff --git a/schema/crdb/13.0.0/up3.sql b/schema/crdb/13.0.0/up3.sql new file mode 100644 index 0000000000..9fb407e7b9 --- /dev/null +++ b/schema/crdb/13.0.0/up3.sql @@ -0,0 +1,6 @@ +CREATE TYPE IF NOT EXISTS omicron.public.root_of_trust_page_which AS ENUM ( + 'cmpa', + 'cfpa_active', + 'cfpa_inactive', + 'cfpa_scratch' +); diff --git a/schema/crdb/13.0.0/up4.sql b/schema/crdb/13.0.0/up4.sql new file mode 100644 index 0000000000..9d227c7427 --- /dev/null +++ b/schema/crdb/13.0.0/up4.sql @@ -0,0 +1,17 @@ +CREATE TABLE IF NOT EXISTS omicron.public.inv_root_of_trust_page ( + -- where this observation came from + -- (foreign key into `inv_collection` table) + inv_collection_id UUID NOT NULL, + -- which system this SP reports it is part of + -- (foreign key into `hw_baseboard_id` table) + hw_baseboard_id UUID NOT NULL, + -- when this observation was made + time_collected TIMESTAMPTZ NOT NULL, + -- which MGS instance reported this data + source TEXT NOT NULL, + + which omicron.public.root_of_trust_page_which NOT NULL, + sw_root_of_trust_page_id UUID NOT NULL, + + PRIMARY KEY (inv_collection_id, hw_baseboard_id, which) +); diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 7bd83439e8..fc3bc37fd7 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -2627,13 +2627,20 @@ CREATE TABLE IF NOT EXISTS omicron.public.sw_caboose ( board TEXT NOT NULL, git_commit TEXT NOT NULL, name TEXT NOT NULL, - -- The MGS response that provides this field indicates that it can be NULL. - -- But that's only to support old software that we no longer support. version TEXT NOT NULL ); CREATE UNIQUE INDEX IF NOT EXISTS caboose_properties on omicron.public.sw_caboose (board, git_commit, name, version); +/* root of trust pages: this table assigns unique ids to distinct RoT CMPA + and CFPA page contents, each of which is a 512-byte blob */ +CREATE TABLE IF NOT EXISTS omicron.public.sw_root_of_trust_page ( + id UUID PRIMARY KEY, + data_base64 TEXT NOT NULL +); +CREATE UNIQUE INDEX IF NOT EXISTS root_of_trust_page_properties + on omicron.public.sw_root_of_trust_page (data_base64); + /* Inventory Collections */ -- list of all collections @@ -2741,6 +2748,32 @@ CREATE TABLE IF NOT EXISTS omicron.public.inv_caboose ( PRIMARY KEY (inv_collection_id, hw_baseboard_id, which) ); +CREATE TYPE IF NOT EXISTS omicron.public.root_of_trust_page_which AS ENUM ( + 'cmpa', + 'cfpa_active', + 'cfpa_inactive', + 'cfpa_scratch' +); + +-- root of trust key signing pages found +CREATE TABLE IF NOT EXISTS omicron.public.inv_root_of_trust_page ( + -- where this observation came from + -- (foreign key into `inv_collection` table) + inv_collection_id UUID NOT NULL, + -- which system this SP reports it is part of + -- (foreign key into `hw_baseboard_id` table) + hw_baseboard_id UUID NOT NULL, + -- when this observation was made + time_collected TIMESTAMPTZ NOT NULL, + -- which MGS instance reported this data + source TEXT NOT NULL, + + which omicron.public.root_of_trust_page_which NOT NULL, + sw_root_of_trust_page_id UUID NOT NULL, + + PRIMARY KEY (inv_collection_id, hw_baseboard_id, which) +); + /*******************************************************************/ /* @@ -2919,7 +2952,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '12.0.0', NULL) + ( TRUE, NOW(), NOW(), '13.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/sp-sim/src/gimlet.rs b/sp-sim/src/gimlet.rs index 0c753b62b5..635e8fde6b 100644 --- a/sp-sim/src/gimlet.rs +++ b/sp-sim/src/gimlet.rs @@ -22,8 +22,11 @@ use futures::Future; use gateway_messages::ignition::{self, LinkEvents}; use gateway_messages::sp_impl::SpHandler; use gateway_messages::sp_impl::{BoundsChecked, DeviceDescription}; +use gateway_messages::CfpaPage; use gateway_messages::ComponentAction; use gateway_messages::Header; +use gateway_messages::RotRequest; +use gateway_messages::RotResponse; use gateway_messages::RotSlotId; use gateway_messages::SpComponent; use gateway_messages::SpError; @@ -1371,10 +1374,18 @@ impl SpHandler for Handler { fn read_rot( &mut self, - _request: gateway_messages::RotRequest, - _buf: &mut [u8], - ) -> std::result::Result { - Err(SpError::RequestUnsupportedForSp) + request: RotRequest, + buf: &mut [u8], + ) -> std::result::Result { + let dummy_page = match request { + RotRequest::ReadCmpa => "gimlet-cmpa", + RotRequest::ReadCfpa(CfpaPage::Active) => "gimlet-cfpa-active", + RotRequest::ReadCfpa(CfpaPage::Inactive) => "gimlet-cfpa-inactive", + RotRequest::ReadCfpa(CfpaPage::Scratch) => "gimlet-cfpa-scratch", + }; + buf[..dummy_page.len()].copy_from_slice(dummy_page.as_bytes()); + buf[dummy_page.len()..].fill(0); + Ok(RotResponse::Ok) } } diff --git a/sp-sim/src/sidecar.rs b/sp-sim/src/sidecar.rs index 46fe8b5df7..19e84ffc64 100644 --- a/sp-sim/src/sidecar.rs +++ b/sp-sim/src/sidecar.rs @@ -27,6 +27,7 @@ use gateway_messages::ignition::LinkEvents; use gateway_messages::sp_impl::BoundsChecked; use gateway_messages::sp_impl::DeviceDescription; use gateway_messages::sp_impl::SpHandler; +use gateway_messages::CfpaPage; use gateway_messages::ComponentAction; use gateway_messages::ComponentDetails; use gateway_messages::DiscoverResponse; @@ -34,6 +35,8 @@ use gateway_messages::IgnitionCommand; use gateway_messages::IgnitionState; use gateway_messages::MgsError; use gateway_messages::PowerState; +use gateway_messages::RotRequest; +use gateway_messages::RotResponse; use gateway_messages::RotSlotId; use gateway_messages::SpComponent; use gateway_messages::SpError; @@ -1150,10 +1153,18 @@ impl SpHandler for Handler { fn read_rot( &mut self, - _request: gateway_messages::RotRequest, - _buf: &mut [u8], - ) -> std::result::Result { - Err(SpError::RequestUnsupportedForSp) + request: RotRequest, + buf: &mut [u8], + ) -> std::result::Result { + let dummy_page = match request { + RotRequest::ReadCmpa => "sidecar-cmpa", + RotRequest::ReadCfpa(CfpaPage::Active) => "sidecar-cfpa-active", + RotRequest::ReadCfpa(CfpaPage::Inactive) => "sidecar-cfpa-inactive", + RotRequest::ReadCfpa(CfpaPage::Scratch) => "sidecar-cfpa-scratch", + }; + buf[..dummy_page.len()].copy_from_slice(dummy_page.as_bytes()); + buf[dummy_page.len()..].fill(0); + Ok(RotResponse::Ok) } } From b07a8f593325efe97ddb526c2725d45d480bf7e6 Mon Sep 17 00:00:00 2001 From: Rain Date: Tue, 21 Nov 2023 14:59:21 -0800 Subject: [PATCH 38/56] [meta] a few changes to prevent duplicate dep builds (#4535) This PR has a few changes that make builds and test runs significantly faster: 1. Remove `xtask` from the list of default-members. This makes it so that `cargo nextest run` and `cargo nextest run -p ` use more dependency feature sets in common. 2. Move `opt-level` settings from `profile.test` to `profile.dev`. Again, this results in more cache hits. 3. Set `profile.dev.panic` to `unwind`. This is to unify build units across dev and test builds: tests are always built with `panic = "unwind"` so that proper backtraces can be printed out. Release builds stay as `abort`. 4. For a belt-and-suspenders approach, make the `crdb-seed` script use the `test` profile. If there are any divergences between `dev` and `test` in the future, then crdb-seed should share its build cache with the tests it was presumably invoked for. 5. Set `profile.dev.build-override.debug` to `line-tables-only`. This, along with 3, means that target (normal/dev) and build (host) dependencies are now unified. All of this comes together for a pretty sweet improvement. See #4392 for more details and how I investigated this issue. ## Impact With a fresh build on Linux with mold, I ran three commands in sequence: 1. `cargo nextest run --no-run` 2. `cargo nextest run -p nexus-db-queries` 3. `cargo build -p omicron-nexus` The results were: | **command** | **phase** | **before** | **before, cumul.** | **after** | **after, cumul.** | |-----------------------------------------|-------------------|-----------:|-------------------:|----------:|------------------:| | `cargo nextest run` | build | 173s | 173s | 158s | 158s | | `cargo nextest run -p nexus-db-queries` | build | 61s | 234s | 51s | 209s | | `cargo nextest run -p nexus-db-queries` | `crdb-seed` build | 21s | 255s | 1s | 210s | | `cargo build -p omicron-nexus` | build | 99s | 354s | 69s | 279s | So the cumulative time spent on these three commands went from 354s to 279s. That's a 1.26x speedup. And this should also make other commands better as well (omicron-nexus is a bit of a weird case because it takes a very long time to compile by itself, and that 69s in the "after" column is entirely building omicron-nexus). --- .config/nextest.toml | 4 +- Cargo.toml | 122 ++++++++++++++++++++++++------------------- 2 files changed, 70 insertions(+), 56 deletions(-) diff --git a/.config/nextest.toml b/.config/nextest.toml index 79774e3658..ef296d7ef8 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -17,7 +17,9 @@ setup = 'crdb-seed' fail-fast = false [script.crdb-seed] -command = 'cargo run -p crdb-seed' +# Use the test profile for this executable since that's how almost all +# invocations of nextest happen. +command = 'cargo run -p crdb-seed --profile test' # The ClickHouse cluster tests currently rely on a hard-coded set of ports for # the nodes in the cluster. We would like to relax this in the future, at which diff --git a/Cargo.toml b/Cargo.toml index fb220ba53d..f3da0381df 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,7 +91,9 @@ default-members = [ "dev-tools/omdb", "dev-tools/omicron-dev", "dev-tools/thing-flinger", - "dev-tools/xtask", + # Do not include xtask in the list of default members, because this causes + # hakari to not work as well and build times to be longer. + # See omicron#4392. "dns-server", "gateway-cli", "gateway-test-utils", @@ -391,13 +393,27 @@ zeroize = { version = "1.6.0", features = ["zeroize_derive", "std"] } zip = { version = "0.6.6", default-features = false, features = ["deflate","bzip2"] } zone = { version = "0.3", default-features = false, features = ["async"] } +# NOTE: The test profile inherits from the dev profile, so settings under +# profile.dev get inherited. AVOID setting anything under profile.test: that +# will cause dev and test builds to diverge, which will cause more Cargo build +# cache misses. + [profile.dev] -panic = "abort" +# Note: This used to be panic = "abort" earlier, but that caused a lot of +# duplicate dependency builds. Letting panic be "unwind" causes dependencies +# across `cargo test` and `cargo run` to be unified. See omicron#4392. +panic = "unwind" + # See https://github.com/oxidecomputer/omicron/issues/4009 for some background context here. # By reducing the debug level (though keeping enough to have meaningful # backtraces), we reduce incremental build time and binary size significantly. debug = "line-tables-only" +[profile.dev.build-override] +# Setting this to line-tables-only results in a large improvement in build +# times, because it allows target and host dependencies to be unified. +debug = "line-tables-only" + # `bindgen` is used by `samael`'s build script; building it with optimizations # makes that build script run ~5x faster, more than offsetting the additional # build time added to `bindgen` itself. @@ -428,112 +444,108 @@ panic = "abort" # proptest based test generation and shrinking is expensive. Let's optimize it. [profile.dev.package.proptest] opt-level = 3 -[profile.test.package.proptest] -opt-level = 3 [profile.dev.package.bootstore] opt-level = 3 -[profile.test.package.bootstore] -opt-level = 3 # Crypto stuff always needs optimizations -[profile.test.package.sha3] +[profile.dev.package.sha3] opt-level = 3 -[profile.test.package.sha2] +[profile.dev.package.sha2] opt-level = 3 -[profile.test.package.hkdf] +[profile.dev.package.hkdf] opt-level = 3 -[profile.test.package.chacha20poly1305] +[profile.dev.package.chacha20poly1305] opt-level = 3 -[profile.test.package.chacha20] +[profile.dev.package.chacha20] opt-level = 3 -[profile.test.package.vsss-rs] +[profile.dev.package.vsss-rs] opt-level = 3 -[profile.test.package.curve25519-dalek] +[profile.dev.package.curve25519-dalek] opt-level = 3 -[profile.test.package.aead] +[profile.dev.package.aead] opt-level = 3 -[profile.test.package.aes] +[profile.dev.package.aes] opt-level = 3 -[profile.test.package.aes-gcm] +[profile.dev.package.aes-gcm] opt-level = 3 -[profile.test.package.bcrypt-pbkdf] +[profile.dev.package.bcrypt-pbkdf] opt-level = 3 -[profile.test.package.blake2] +[profile.dev.package.blake2] opt-level = 3 -[profile.test.package.blake2b_simd] +[profile.dev.package.blake2b_simd] opt-level = 3 -[profile.test.package.block-buffer] +[profile.dev.package.block-buffer] opt-level = 3 -[profile.test.package.block-padding] +[profile.dev.package.block-padding] opt-level = 3 -[profile.test.package.blowfish] +[profile.dev.package.blowfish] opt-level = 3 -[profile.test.package.constant_time_eq] +[profile.dev.package.constant_time_eq] opt-level = 3 -[profile.test.package.crypto-bigint] +[profile.dev.package.crypto-bigint] opt-level = 3 -[profile.test.package.crypto-common] +[profile.dev.package.crypto-common] opt-level = 3 -[profile.test.package.ctr] +[profile.dev.package.ctr] opt-level = 3 -[profile.test.package.cbc] +[profile.dev.package.cbc] opt-level = 3 -[profile.test.package.digest] +[profile.dev.package.digest] opt-level = 3 -[profile.test.package.ed25519] +[profile.dev.package.ed25519] opt-level = 3 -[profile.test.package.ed25519-dalek] +[profile.dev.package.ed25519-dalek] opt-level = 3 -[profile.test.package.elliptic-curve] +[profile.dev.package.elliptic-curve] opt-level = 3 -[profile.test.package.generic-array] +[profile.dev.package.generic-array] opt-level = 3 -[profile.test.package.getrandom] +[profile.dev.package.getrandom] opt-level = 3 -[profile.test.package.hmac] +[profile.dev.package.hmac] opt-level = 3 -[profile.test.package.lpc55_sign] +[profile.dev.package.lpc55_sign] opt-level = 3 -[profile.test.package.md5] +[profile.dev.package.md5] opt-level = 3 -[profile.test.package.md-5] +[profile.dev.package.md-5] opt-level = 3 -[profile.test.package.num-bigint] +[profile.dev.package.num-bigint] opt-level = 3 -[profile.test.package.num-bigint-dig] +[profile.dev.package.num-bigint-dig] opt-level = 3 -[profile.test.package.rand] +[profile.dev.package.rand] opt-level = 3 -[profile.test.package.rand_chacha] +[profile.dev.package.rand_chacha] opt-level = 3 -[profile.test.package.rand_core] +[profile.dev.package.rand_core] opt-level = 3 -[profile.test.package.rand_hc] +[profile.dev.package.rand_hc] opt-level = 3 -[profile.test.package.rand_xorshift] +[profile.dev.package.rand_xorshift] opt-level = 3 -[profile.test.package.rsa] +[profile.dev.package.rsa] opt-level = 3 -[profile.test.package.salty] +[profile.dev.package.salty] opt-level = 3 -[profile.test.package.signature] +[profile.dev.package.signature] opt-level = 3 -[profile.test.package.subtle] +[profile.dev.package.subtle] opt-level = 3 -[profile.test.package.tiny-keccak] +[profile.dev.package.tiny-keccak] opt-level = 3 -[profile.test.package.uuid] +[profile.dev.package.uuid] opt-level = 3 -[profile.test.package.cipher] +[profile.dev.package.cipher] opt-level = 3 -[profile.test.package.cpufeatures] +[profile.dev.package.cpufeatures] opt-level = 3 -[profile.test.package.poly1305] +[profile.dev.package.poly1305] opt-level = 3 -[profile.test.package.inout] +[profile.dev.package.inout] opt-level = 3 -[profile.test.package.keccak] +[profile.dev.package.keccak] opt-level = 3 # From c339fc7681bf1337325f5b2fb233f2d314d579ed Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Wed, 22 Nov 2023 02:00:31 +0000 Subject: [PATCH 39/56] Update Rust crate tokio-tungstenite to 0.20 (#4403) Co-authored-by: Rain --- Cargo.lock | 41 +++---------------- Cargo.toml | 2 +- gateway/src/serial_console.rs | 8 +++- .../tests/integration_tests/serial_console.rs | 3 +- 4 files changed, 14 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3c9c31a2ac..07f804b03d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2443,7 +2443,7 @@ dependencies = [ "slog-term", "termios", "tokio", - "tokio-tungstenite 0.18.0", + "tokio-tungstenite", "uuid", ] @@ -4575,7 +4575,7 @@ dependencies = [ "thiserror", "tokio", "tokio-stream", - "tokio-tungstenite 0.18.0", + "tokio-tungstenite", "toml 0.8.8", "uuid", ] @@ -6122,7 +6122,7 @@ dependencies = [ "slog", "thiserror", "tokio", - "tokio-tungstenite 0.20.1", + "tokio-tungstenite", "uuid", ] @@ -6152,7 +6152,7 @@ dependencies = [ "slog-term", "thiserror", "tokio", - "tokio-tungstenite 0.20.1", + "tokio-tungstenite", "uuid", ] @@ -8522,18 +8522,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-tungstenite" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54319c93411147bced34cb5609a80e0a8e44c5999c93903a81cd866630ec0bfd" -dependencies = [ - "futures-util", - "log", - "tokio", - "tungstenite 0.18.0", -] - [[package]] name = "tokio-tungstenite" version = "0.20.1" @@ -8543,7 +8531,7 @@ dependencies = [ "futures-util", "log", "tokio", - "tungstenite 0.20.1", + "tungstenite", ] [[package]] @@ -8901,25 +8889,6 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "tungstenite" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" -dependencies = [ - "base64 0.13.1", - "byteorder", - "bytes", - "http", - "httparse", - "log", - "rand 0.8.5", - "sha1", - "thiserror", - "url", - "utf-8", -] - [[package]] name = "tungstenite" version = "0.20.1" diff --git a/Cargo.toml b/Cargo.toml index f3da0381df..e4588efbde 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -368,7 +368,7 @@ tofino = { git = "http://github.com/oxidecomputer/tofino", branch = "main" } tokio = "1.34.0" tokio-postgres = { version = "0.7", features = [ "with-chrono-0_4", "with-uuid-1" ] } tokio-stream = "0.1.14" -tokio-tungstenite = "0.18" +tokio-tungstenite = "0.20" tokio-util = { version = "0.7.10", features = ["io", "io-util"] } toml = "0.8.8" toml_edit = "0.21.0" diff --git a/gateway/src/serial_console.rs b/gateway/src/serial_console.rs index eb6183fdfb..3e49f8526a 100644 --- a/gateway/src/serial_console.rs +++ b/gateway/src/serial_console.rs @@ -48,8 +48,12 @@ pub(crate) async fn run( log: Logger, ) -> WebsocketChannelResult { let upgraded = conn.into_inner(); - let config = - WebSocketConfig { max_send_queue: Some(4096), ..Default::default() }; + let config = WebSocketConfig { + // Maintain a max write buffer size of 2 MB (this is only relevant if + // writes are failing). + max_write_buffer_size: 2 * 1024 * 1024, + ..Default::default() + }; let ws_stream = WebSocketStream::from_raw_socket(upgraded, Role::Server, Some(config)) .await; diff --git a/gateway/tests/integration_tests/serial_console.rs b/gateway/tests/integration_tests/serial_console.rs index 9ab26bef4a..11cb9674a7 100644 --- a/gateway/tests/integration_tests/serial_console.rs +++ b/gateway/tests/integration_tests/serial_console.rs @@ -100,11 +100,12 @@ async fn serial_console_detach() { } tungstenite::Error::ConnectionClosed | tungstenite::Error::AlreadyClosed + | tungstenite::Error::AttackAttempt | tungstenite::Error::Io(_) | tungstenite::Error::Tls(_) | tungstenite::Error::Capacity(_) | tungstenite::Error::Protocol(_) - | tungstenite::Error::SendQueueFull(_) + | tungstenite::Error::WriteBufferFull(_) | tungstenite::Error::Utf8 | tungstenite::Error::Url(_) | tungstenite::Error::HttpFormat(_) => panic!("unexpected error"), From 36f6abe06548eae0f954f1b968f98def846f7d93 Mon Sep 17 00:00:00 2001 From: Ryan Goodfellow Date: Wed, 22 Nov 2023 10:04:29 -0800 Subject: [PATCH 40/56] factor out a common port settings saga function missed in #4528 (#4549) --- .../app/sagas/switch_port_settings_apply.rs | 60 ++++--------------- .../app/sagas/switch_port_settings_clear.rs | 15 +++-- .../app/sagas/switch_port_settings_common.rs | 37 ++++++++++++ 3 files changed, 57 insertions(+), 55 deletions(-) diff --git a/nexus/src/app/sagas/switch_port_settings_apply.rs b/nexus/src/app/sagas/switch_port_settings_apply.rs index aba62b6937..0d6bb52421 100644 --- a/nexus/src/app/sagas/switch_port_settings_apply.rs +++ b/nexus/src/app/sagas/switch_port_settings_apply.rs @@ -6,8 +6,8 @@ use super::{NexusActionContext, NEXUS_DPD_TAG}; use crate::app::sagas::retry_until_known_result; use crate::app::sagas::switch_port_settings_common::{ api_to_dpd_port_settings, ensure_switch_port_bgp_settings, - ensure_switch_port_uplink, select_mg_client, switch_sled_agent, - write_bootstore_config, + ensure_switch_port_uplink, select_dendrite_client, select_mg_client, + switch_sled_agent, write_bootstore_config, }; use crate::app::sagas::{ declare_saga_actions, ActionRegistry, NexusSaga, SagaInitError, @@ -19,9 +19,7 @@ use nexus_db_model::NETWORK_KEY; use nexus_db_queries::db::datastore::UpdatePrecondition; use nexus_db_queries::{authn, db}; use omicron_common::api::external::{self, NameOrId}; -use omicron_common::api::internal::shared::{ - ParseSwitchLocationError, SwitchLocation, -}; +use omicron_common::api::internal::shared::SwitchLocation; use serde::{Deserialize, Serialize}; use std::net::IpAddr; use std::str::FromStr; @@ -160,6 +158,10 @@ async fn spa_ensure_switch_port_settings( ) -> Result<(), ActionError> { let params = sagactx.saga_params::()?; let log = sagactx.user_data().log(); + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); let settings = sagactx .lookup::("switch_port_settings")?; @@ -170,7 +172,7 @@ async fn spa_ensure_switch_port_settings( })?; let dpd_client: Arc = - select_dendrite_client(&sagactx).await?; + select_dendrite_client(&sagactx, &opctx, params.switch_port_id).await?; let dpd_port_settings = api_to_dpd_port_settings(&settings).map_err(|e| { @@ -227,8 +229,8 @@ async fn spa_undo_ensure_switch_port_settings( .lookup::>("original_switch_port_settings_id") .map_err(|e| external::Error::internal_error(&e.to_string()))?; - let dpd_client: Arc = - select_dendrite_client(&sagactx).await?; + let dpd_client = + select_dendrite_client(&sagactx, &opctx, params.switch_port_id).await?; let id = match orig_port_settings_id { Some(id) => id, @@ -471,48 +473,6 @@ async fn spa_disassociate_switch_port( Ok(()) } -pub(crate) async fn select_dendrite_client( - sagactx: &NexusActionContext, -) -> Result, ActionError> { - let osagactx = sagactx.user_data(); - let params = sagactx.saga_params::()?; - let nexus = osagactx.nexus(); - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); - - let switch_port = nexus - .get_switch_port(&opctx, params.switch_port_id) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "get switch port for dendrite client selection {e}" - )) - })?; - - let switch_location: SwitchLocation = - switch_port.switch_location.parse().map_err( - |e: ParseSwitchLocationError| { - ActionError::action_failed(format!( - "get switch location for uplink: {e:?}", - )) - }, - )?; - - let dpd_client: Arc = osagactx - .nexus() - .dpd_clients - .get(&switch_location) - .ok_or_else(|| { - ActionError::action_failed(format!( - "requested switch not available: {switch_location}" - )) - })? - .clone(); - Ok(dpd_client) -} - async fn spa_ensure_switch_port_bgp_settings( sagactx: NexusActionContext, ) -> Result<(), ActionError> { diff --git a/nexus/src/app/sagas/switch_port_settings_clear.rs b/nexus/src/app/sagas/switch_port_settings_clear.rs index bcbd5bf894..0d876f8159 100644 --- a/nexus/src/app/sagas/switch_port_settings_clear.rs +++ b/nexus/src/app/sagas/switch_port_settings_clear.rs @@ -2,14 +2,13 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use super::switch_port_settings_apply::select_dendrite_client; use super::{NexusActionContext, NEXUS_DPD_TAG}; use crate::app::sagas::retry_until_known_result; use crate::app::sagas::switch_port_settings_common::{ api_to_dpd_port_settings, apply_bootstore_update, bootstore_update, ensure_switch_port_bgp_settings, ensure_switch_port_uplink, - read_bootstore_config, select_mg_client, switch_sled_agent, - write_bootstore_config, + read_bootstore_config, select_dendrite_client, select_mg_client, + switch_sled_agent, write_bootstore_config, }; use crate::app::sagas::{ declare_saga_actions, ActionRegistry, NexusSaga, SagaInitError, @@ -147,11 +146,16 @@ async fn spa_clear_switch_port_settings( ) -> Result<(), ActionError> { let params = sagactx.saga_params::()?; let log = sagactx.user_data().log(); + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); let port_id: PortId = PortId::from_str(¶ms.port_name) .map_err(|e| ActionError::action_failed(e.to_string()))?; - let dpd_client = select_dendrite_client(&sagactx).await?; + let dpd_client = + select_dendrite_client(&sagactx, &opctx, params.switch_port_id).await?; retry_until_known_result(log, || async { dpd_client.port_settings_clear(&port_id, Some(NEXUS_DPD_TAG)).await @@ -191,7 +195,8 @@ async fn spa_undo_clear_switch_port_settings( .await .map_err(ActionError::action_failed)?; - let dpd_client = select_dendrite_client(&sagactx).await?; + let dpd_client = + select_dendrite_client(&sagactx, &opctx, params.switch_port_id).await?; let dpd_port_settings = api_to_dpd_port_settings(&settings) .map_err(ActionError::action_failed)?; diff --git a/nexus/src/app/sagas/switch_port_settings_common.rs b/nexus/src/app/sagas/switch_port_settings_common.rs index 8e66aa12f8..b328c6d1ac 100644 --- a/nexus/src/app/sagas/switch_port_settings_common.rs +++ b/nexus/src/app/sagas/switch_port_settings_common.rs @@ -575,3 +575,40 @@ pub struct EarlyNetworkPortUpdate { port: PortConfigV1, bgp_configs: Vec, } + +pub(crate) async fn select_dendrite_client( + sagactx: &NexusActionContext, + opctx: &OpContext, + switch_port_id: Uuid, +) -> Result, ActionError> { + let osagactx = sagactx.user_data(); + let nexus = osagactx.nexus(); + + let switch_port = + nexus.get_switch_port(&opctx, switch_port_id).await.map_err(|e| { + ActionError::action_failed(format!( + "get switch port for dendrite client selection {e}" + )) + })?; + + let switch_location: SwitchLocation = + switch_port.switch_location.parse().map_err( + |e: ParseSwitchLocationError| { + ActionError::action_failed(format!( + "get switch location for uplink: {e:?}", + )) + }, + )?; + + let dpd_client: Arc = osagactx + .nexus() + .dpd_clients + .get(&switch_location) + .ok_or_else(|| { + ActionError::action_failed(format!( + "requested switch not available: {switch_location}" + )) + })? + .clone(); + Ok(dpd_client) +} From 3f702ef442a2cb6522684c8b4028bc8a8b11ed6d Mon Sep 17 00:00:00 2001 From: Rain Date: Wed, 22 Nov 2023 17:26:19 -0800 Subject: [PATCH 41/56] [omicron-dev] increase test timeout to 30 seconds (#4557) On my machine (Ryzen 7950X) I saw that under load (32 tests running at the same time), the timeout would quite reliably be hit likely because cockroach was starved. Increasing it seems pretty harmless. --- dev-tools/omicron-dev/tests/test_omicron_dev.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/omicron-dev/tests/test_omicron_dev.rs b/dev-tools/omicron-dev/tests/test_omicron_dev.rs index f1e8177243..7e78e5dc5a 100644 --- a/dev-tools/omicron-dev/tests/test_omicron_dev.rs +++ b/dev-tools/omicron-dev/tests/test_omicron_dev.rs @@ -27,7 +27,7 @@ use subprocess::Redirection; const CMD_OMICRON_DEV: &str = env!("CARGO_BIN_EXE_omicron-dev"); /// timeout used for various things that should be pretty quick -const TIMEOUT: Duration = Duration::from_secs(15); +const TIMEOUT: Duration = Duration::from_secs(30); fn path_to_omicron_dev() -> PathBuf { path_to_executable(CMD_OMICRON_DEV) From 47968b8e17a1a16c1da605da0d418efd8fa6026e Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Sat, 25 Nov 2023 01:57:43 -0500 Subject: [PATCH 42/56] [Nexus] Add a sled to an initialized rack (#4545) This commit provides an external API for adding a sled to an already initialized rack. --- nexus/db-model/src/lib.rs | 2 + nexus/db-model/src/rack.rs | 19 +- nexus/db-model/src/schema.rs | 12 +- nexus/db-model/src/sled.rs | 4 +- .../src/sled_underlay_subnet_allocation.rs | 16 ++ nexus/db-model/src/switch.rs | 4 +- .../db-queries/src/db/datastore/inventory.rs | 73 ++++- nexus/db-queries/src/db/datastore/rack.rs | 254 ++++++++++++++++++ nexus/src/app/rack.rs | 139 ++++++++-- nexus/src/app/sled.rs | 3 + nexus/src/external_api/http_entrypoints.rs | 31 ++- nexus/tests/integration_tests/endpoints.rs | 17 +- nexus/tests/integration_tests/rack.rs | 66 +++++ nexus/tests/output/nexus_tags.txt | 1 + nexus/types/src/external_api/shared.rs | 36 +++ nexus/types/src/external_api/views.rs | 38 +-- nexus/types/src/internal_api/params.rs | 4 +- nexus/types/src/inventory.rs | 8 + openapi/nexus.json | 30 ++- schema/crdb/14.0.0/up1.sql | 37 +++ schema/crdb/14.0.0/up2.sql | 5 + schema/crdb/dbinit.sql | 47 +++- 22 files changed, 753 insertions(+), 93 deletions(-) create mode 100644 nexus/db-model/src/sled_underlay_subnet_allocation.rs create mode 100644 schema/crdb/14.0.0/up1.sql create mode 100644 schema/crdb/14.0.0/up2.sql diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index 6b65eb87ec..ac5bad26f8 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -72,6 +72,7 @@ mod sled; mod sled_instance; mod sled_resource; mod sled_resource_kind; +mod sled_underlay_subnet_allocation; mod snapshot; mod ssh_key; mod switch; @@ -153,6 +154,7 @@ pub use sled::*; pub use sled_instance::*; pub use sled_resource::*; pub use sled_resource_kind::*; +pub use sled_underlay_subnet_allocation::*; pub use snapshot::*; pub use ssh_key::*; pub use switch::*; diff --git a/nexus/db-model/src/rack.rs b/nexus/db-model/src/rack.rs index f2bc7528d2..580ec155b4 100644 --- a/nexus/db-model/src/rack.rs +++ b/nexus/db-model/src/rack.rs @@ -4,9 +4,8 @@ use crate::schema::rack; use db_macros::Asset; -use ipnetwork::{IpNetwork, Ipv6Network}; +use ipnetwork::IpNetwork; use nexus_types::{external_api::views, identity::Asset}; -use omicron_common::api; use uuid::Uuid; /// Information about a local rack. @@ -29,22 +28,6 @@ impl Rack { rack_subnet: None, } } - - pub fn subnet(&self) -> Result { - match self.rack_subnet { - Some(IpNetwork::V6(subnet)) => Ok(subnet), - Some(IpNetwork::V4(_)) => { - return Err(api::external::Error::InternalError { - internal_message: "rack subnet not IPv6".into(), - }) - } - None => { - return Err(api::external::Error::InternalError { - internal_message: "rack subnet not set".into(), - }) - } - } - } } impl From for views::Rack { diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 7f7dd57027..afeac5e6cd 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -755,6 +755,16 @@ table! { } } +table! { + sled_underlay_subnet_allocation (rack_id, sled_id) { + rack_id -> Uuid, + sled_id -> Uuid, + subnet_octet -> Int2, + hw_baseboard_id -> Uuid, + } +} +allow_tables_to_appear_in_same_query!(rack, sled_underlay_subnet_allocation); + table! { switch (id) { id -> Uuid, @@ -1289,7 +1299,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(13, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(14, 0, 0); allow_tables_to_appear_in_same_query!( system_update, diff --git a/nexus/db-model/src/sled.rs b/nexus/db-model/src/sled.rs index ba572901c6..4c82aa5d23 100644 --- a/nexus/db-model/src/sled.rs +++ b/nexus/db-model/src/sled.rs @@ -8,7 +8,7 @@ use crate::ipv6; use crate::schema::{physical_disk, service, sled, zpool}; use chrono::{DateTime, Utc}; use db_macros::Asset; -use nexus_types::{external_api::views, identity::Asset}; +use nexus_types::{external_api::shared, external_api::views, identity::Asset}; use std::net::Ipv6Addr; use std::net::SocketAddrV6; use uuid::Uuid; @@ -88,7 +88,7 @@ impl From for views::Sled { Self { identity: sled.identity(), rack_id: sled.rack_id, - baseboard: views::Baseboard { + baseboard: shared::Baseboard { serial: sled.serial_number, part: sled.part_number, revision: sled.revision, diff --git a/nexus/db-model/src/sled_underlay_subnet_allocation.rs b/nexus/db-model/src/sled_underlay_subnet_allocation.rs new file mode 100644 index 0000000000..4da0bea669 --- /dev/null +++ b/nexus/db-model/src/sled_underlay_subnet_allocation.rs @@ -0,0 +1,16 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::schema::sled_underlay_subnet_allocation; +use uuid::Uuid; + +/// Underlay allocation for a sled added to an initialized rack +#[derive(Queryable, Insertable, Debug, Clone, Selectable)] +#[diesel(table_name = sled_underlay_subnet_allocation)] +pub struct SledUnderlaySubnetAllocation { + pub rack_id: Uuid, + pub sled_id: Uuid, + pub subnet_octet: i16, + pub hw_baseboard_id: Uuid, +} diff --git a/nexus/db-model/src/switch.rs b/nexus/db-model/src/switch.rs index c9db100b0a..159888d91e 100644 --- a/nexus/db-model/src/switch.rs +++ b/nexus/db-model/src/switch.rs @@ -2,7 +2,7 @@ use super::Generation; use crate::schema::switch; use chrono::{DateTime, Utc}; use db_macros::Asset; -use nexus_types::{external_api::views, identity::Asset}; +use nexus_types::{external_api::shared, external_api::views, identity::Asset}; use uuid::Uuid; /// Baseboard information about a switch. @@ -57,7 +57,7 @@ impl From for views::Switch { Self { identity: switch.identity(), rack_id: switch.rack_id, - baseboard: views::Baseboard { + baseboard: shared::Baseboard { serial: switch.serial_number, part: switch.part_number, revision: switch.revision, diff --git a/nexus/db-queries/src/db/datastore/inventory.rs b/nexus/db-queries/src/db/datastore/inventory.rs index 28a438629e..31b24a7e75 100644 --- a/nexus/db-queries/src/db/datastore/inventory.rs +++ b/nexus/db-queries/src/db/datastore/inventory.rs @@ -7,6 +7,7 @@ use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::error::public_error_from_diesel; +use crate::db::error::public_error_from_diesel_lookup; use crate::db::error::ErrorHandler; use crate::db::queries::ALLOW_FULL_TABLE_SCAN_SQL; use crate::db::TransactionError; @@ -21,6 +22,7 @@ use diesel::ExpressionMethods; use diesel::IntoSql; use diesel::JoinOnDsl; use diesel::NullableExpressionMethods; +use diesel::OptionalExtension; use diesel::QueryDsl; use diesel::Table; use futures::future::BoxFuture; @@ -42,9 +44,12 @@ use nexus_db_model::SpType; use nexus_db_model::SpTypeEnum; use nexus_db_model::SwCaboose; use nexus_db_model::SwRotPage; +use nexus_types::inventory::BaseboardId; use nexus_types::inventory::Collection; use omicron_common::api::external::Error; use omicron_common::api::external::InternalContext; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; use omicron_common::bail_unless; use std::collections::BTreeMap; use std::collections::BTreeSet; @@ -910,30 +915,62 @@ impl DataStore { Ok(()) } + // Find the primary key for `hw_baseboard_id` given a `BaseboardId` + pub async fn find_hw_baseboard_id( + &self, + opctx: &OpContext, + baseboard_id: BaseboardId, + ) -> Result { + opctx.authorize(authz::Action::Read, &authz::INVENTORY).await?; + let conn = self.pool_connection_authorized(opctx).await?; + use db::schema::hw_baseboard_id::dsl; + dsl::hw_baseboard_id + .filter(dsl::serial_number.eq(baseboard_id.serial_number.clone())) + .filter(dsl::part_number.eq(baseboard_id.part_number.clone())) + .select(dsl::id) + .first_async::(&*conn) + .await + .map_err(|e| { + public_error_from_diesel_lookup( + e, + ResourceType::Sled, + &LookupType::ByCompositeId(format!("{baseboard_id:?}")), + ) + }) + } + /// Attempt to read the latest collection while limiting queries to `limit` /// records + /// + /// If there aren't any collections, return `Ok(None)`. pub async fn inventory_get_latest_collection( &self, opctx: &OpContext, limit: NonZeroU32, - ) -> Result { + ) -> Result, Error> { opctx.authorize(authz::Action::Read, &authz::INVENTORY).await?; let conn = self.pool_connection_authorized(opctx).await?; use db::schema::inv_collection::dsl; let collection_id = dsl::inv_collection .select(dsl::id) .order_by(dsl::time_started.desc()) - .limit(1) .first_async::(&*conn) .await + .optional() .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; - self.inventory_collection_read_all_or_nothing( - opctx, - collection_id, - limit, - ) - .await + let Some(collection_id) = collection_id else { + return Ok(None); + }; + + Ok(Some( + self.inventory_collection_read_all_or_nothing( + opctx, + collection_id, + limit, + ) + .await?, + )) } /// Attempt to read the given collection while limiting queries to `limit` @@ -1335,9 +1372,11 @@ mod test { use nexus_inventory::examples::Representative; use nexus_test_utils::db::test_setup_database; use nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL; + use nexus_types::inventory::BaseboardId; use nexus_types::inventory::CabooseWhich; use nexus_types::inventory::Collection; use nexus_types::inventory::RotPageWhich; + use omicron_common::api::external::Error; use omicron_test_utils::dev; use std::num::NonZeroU32; use uuid::Uuid; @@ -1393,6 +1432,24 @@ mod test { } } + #[tokio::test] + async fn test_find_hw_baseboard_id_missing_returns_not_found() { + let logctx = dev::test_setup_log("inventory_insert"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + let baseboard_id = BaseboardId { + serial_number: "some-serial".into(), + part_number: "some-part".into(), + }; + let err = datastore + .find_hw_baseboard_id(&opctx, baseboard_id) + .await + .unwrap_err(); + assert!(matches!(err, Error::ObjectNotFound { .. })); + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + /// Tests inserting several collections, reading them back, and making sure /// they look the same. #[tokio::test] diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 2cc5880470..e11377f11a 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -41,6 +41,7 @@ use nexus_db_model::InitialDnsGroup; use nexus_db_model::PasswordHashString; use nexus_db_model::SiloUser; use nexus_db_model::SiloUserPasswordHash; +use nexus_db_model::SledUnderlaySubnetAllocation; use nexus_types::external_api::params as external_params; use nexus_types::external_api::shared; use nexus_types::external_api::shared::IdentityType; @@ -55,6 +56,7 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; +use omicron_common::bail_unless; use std::net::IpAddr; use uuid::Uuid; @@ -214,6 +216,126 @@ impl DataStore { Ok(()) } + // Return the subnet for the rack + pub async fn rack_subnet( + &self, + opctx: &OpContext, + rack_id: Uuid, + ) -> Result { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + let conn = self.pool_connection_authorized(opctx).await?; + use db::schema::rack::dsl; + // It's safe to unwrap the returned `rack_subnet` because + // we filter on `rack_subnet.is_not_null()` + let subnet = dsl::rack + .filter(dsl::id.eq(rack_id)) + .filter(dsl::rack_subnet.is_not_null()) + .select(dsl::rack_subnet) + .first_async::>(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + match subnet { + Some(subnet) => Ok(subnet), + None => Err(Error::internal_error( + "DB Error(bug): returned a null subnet for {rack_id}", + )), + } + } + + /// Allocate a rack subnet octet to a given sled + /// + /// 1. Find the existing allocations + /// 2. Calculate the new allocation + /// 3. Save the new allocation, if there isn't one for the given + /// `hw_baseboard_id` + /// 4. Return the new allocation + /// + // TODO: This could all actually be done in SQL using a `next_item` query. + // See https://github.com/oxidecomputer/omicron/issues/4544 + pub async fn allocate_sled_underlay_subnet_octets( + &self, + opctx: &OpContext, + rack_id: Uuid, + hw_baseboard_id: Uuid, + ) -> Result { + // Fetch all the existing allocations via self.rack_id + let allocations = self.rack_subnet_allocations(opctx, rack_id).await?; + + // Calculate the allocation for the new sled by choosing the minimum + // octet. The returned allocations are ordered by octet, so we will know + // when we have a free one. However, if we already have an allocation + // for the given sled then reuse that one. + const MIN_SUBNET_OCTET: i16 = 33; + let mut new_allocation = SledUnderlaySubnetAllocation { + rack_id, + sled_id: Uuid::new_v4(), + subnet_octet: MIN_SUBNET_OCTET, + hw_baseboard_id, + }; + let mut allocation_already_exists = false; + for allocation in allocations { + if allocation.hw_baseboard_id == new_allocation.hw_baseboard_id { + // We already have an allocation for this sled. + new_allocation = allocation; + allocation_already_exists = true; + break; + } + if allocation.subnet_octet == new_allocation.subnet_octet { + bail_unless!( + new_allocation.subnet_octet < 255, + "Too many sled subnets allocated" + ); + new_allocation.subnet_octet += 1; + } + } + + // Write the new allocation row to CRDB. The UNIQUE constraint + // on `subnet_octet` will prevent dueling administrators reusing + // allocations when sleds are being added. We will need another + // mechanism ala generation numbers when we must interleave additions + // and removals of sleds. + if !allocation_already_exists { + self.sled_subnet_allocation_insert(opctx, &new_allocation).await?; + } + + Ok(new_allocation) + } + + /// Return all current underlay allocations for the rack. + /// + /// Order allocations by `subnet_octet` + pub async fn rack_subnet_allocations( + &self, + opctx: &OpContext, + rack_id: Uuid, + ) -> Result, Error> { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + use db::schema::sled_underlay_subnet_allocation::dsl as subnet_dsl; + subnet_dsl::sled_underlay_subnet_allocation + .filter(subnet_dsl::rack_id.eq(rack_id)) + .select(SledUnderlaySubnetAllocation::as_select()) + .order_by(subnet_dsl::subnet_octet.asc()) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// Store a new sled subnet allocation in the database + pub async fn sled_subnet_allocation_insert( + &self, + opctx: &OpContext, + allocation: &SledUnderlaySubnetAllocation, + ) -> Result<(), Error> { + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + use db::schema::sled_underlay_subnet_allocation::dsl; + diesel::insert_into(dsl::sled_underlay_subnet_allocation) + .values(allocation.clone()) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + Ok(()) + } + // The following methods which return a `TxnError` take a `conn` parameter // which comes from the transaction created in `rack_set_initialized`. @@ -1518,4 +1640,136 @@ mod test { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + + #[tokio::test] + async fn rack_sled_subnet_allocations() { + let logctx = dev::test_setup_log("rack_sled_subnet_allocations"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let rack_id = Uuid::new_v4(); + + // Ensure we get an empty list when there are no allocations + let allocations = + datastore.rack_subnet_allocations(&opctx, rack_id).await.unwrap(); + assert!(allocations.is_empty()); + + // Add 5 allocations + for i in 0..5i16 { + let allocation = SledUnderlaySubnetAllocation { + rack_id, + sled_id: Uuid::new_v4(), + subnet_octet: 33 + i, + hw_baseboard_id: Uuid::new_v4(), + }; + datastore + .sled_subnet_allocation_insert(&opctx, &allocation) + .await + .unwrap(); + } + + // List all 5 allocations + let allocations = + datastore.rack_subnet_allocations(&opctx, rack_id).await.unwrap(); + + assert_eq!(5, allocations.len()); + + // Try to add another allocation for the same octet, but with a distinct + // sled_id. Ensure we get an error due to a unique constraint. + let mut should_fail_allocation = SledUnderlaySubnetAllocation { + rack_id, + sled_id: Uuid::new_v4(), + subnet_octet: 37, + hw_baseboard_id: Uuid::new_v4(), + }; + let _err = datastore + .sled_subnet_allocation_insert(&opctx, &should_fail_allocation) + .await + .unwrap_err(); + + // Adding an allocation for the same {rack_id, sled_id} pair fails + // the second time, even with a distinct subnet_epoch + let mut allocation = should_fail_allocation.clone(); + allocation.subnet_octet = 38; + datastore + .sled_subnet_allocation_insert(&opctx, &allocation) + .await + .unwrap(); + + should_fail_allocation.subnet_octet = 39; + should_fail_allocation.hw_baseboard_id = Uuid::new_v4(); + let _err = datastore + .sled_subnet_allocation_insert(&opctx, &should_fail_allocation) + .await + .unwrap_err(); + + // Allocations outside our expected range fail + let mut should_fail_allocation = SledUnderlaySubnetAllocation { + rack_id, + sled_id: Uuid::new_v4(), + subnet_octet: 32, + hw_baseboard_id: Uuid::new_v4(), + }; + let _err = datastore + .sled_subnet_allocation_insert(&opctx, &should_fail_allocation) + .await + .unwrap_err(); + should_fail_allocation.subnet_octet = 256; + let _err = datastore + .sled_subnet_allocation_insert(&opctx, &should_fail_allocation) + .await + .unwrap_err(); + + // We should have 6 allocations + let allocations = + datastore.rack_subnet_allocations(&opctx, rack_id).await.unwrap(); + + assert_eq!(6, allocations.len()); + assert_eq!( + vec![33, 34, 35, 36, 37, 38], + allocations.iter().map(|a| a.subnet_octet).collect::>() + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn allocate_sled_underlay_subnet_octets() { + let logctx = dev::test_setup_log("rack_sled_subnet_allocations"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let rack_id = Uuid::new_v4(); + + let mut allocated_octets = vec![]; + for _ in 0..5 { + allocated_octets.push( + datastore + .allocate_sled_underlay_subnet_octets( + &opctx, + rack_id, + Uuid::new_v4(), + ) + .await + .unwrap() + .subnet_octet, + ); + } + + let expected = vec![33, 34, 35, 36, 37]; + assert_eq!(expected, allocated_octets); + + // We should have 5 allocations in the DB, sorted appropriately + let allocations = + datastore.rack_subnet_allocations(&opctx, rack_id).await.unwrap(); + assert_eq!(5, allocations.len()); + assert_eq!( + expected, + allocations.iter().map(|a| a.subnet_octet).collect::>() + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } } diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 1c2e49e260..984ece2d0c 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -10,7 +10,7 @@ use crate::external_api::params::CertificateCreate; use crate::external_api::shared::ServiceUsingCertificate; use crate::internal_api::params::RackInitializationRequest; use gateway_client::types::SpType; -use ipnetwork::IpNetwork; +use ipnetwork::{IpNetwork, Ipv6Network}; use nexus_db_model::DnsGroup; use nexus_db_model::InitialDnsGroup; use nexus_db_model::{SwitchLinkFec, SwitchLinkSpeed}; @@ -29,13 +29,14 @@ use nexus_types::external_api::params::{ AddressLotCreate, LoopbackAddressCreate, Route, SiloCreate, SwitchPortSettingsCreate, }; +use nexus_types::external_api::shared::Baseboard; use nexus_types::external_api::shared::FleetRole; use nexus_types::external_api::shared::SiloIdentityMode; use nexus_types::external_api::shared::SiloRole; +use nexus_types::external_api::shared::UninitializedSled; use nexus_types::external_api::views; -use nexus_types::external_api::views::Baseboard; -use nexus_types::external_api::views::UninitializedSled; use nexus_types::internal_api::params::DnsRecord; +use omicron_common::address::{get_64_subnet, Ipv6Subnet, RACK_PREFIX}; use omicron_common::api::external::AddressLotKind; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; @@ -45,7 +46,10 @@ use omicron_common::api::external::LookupResult; use omicron_common::api::external::Name; use omicron_common::api::external::NameOrId; use omicron_common::api::internal::shared::ExternalPortDiscovery; +use sled_agent_client::types::AddSledRequest; use sled_agent_client::types::EarlyNetworkConfigBody; +use sled_agent_client::types::StartSledAgentRequest; +use sled_agent_client::types::StartSledAgentRequestBody; use sled_agent_client::types::{ BgpConfig, BgpPeerConfig, EarlyNetworkConfig, PortConfigV1, RackNetworkConfigV1, RouteConfig as SledRouteConfig, @@ -584,20 +588,7 @@ impl super::Nexus { if rack.rack_subnet.is_some() { return Ok(()); } - let addr = self - .sled_list(opctx, &DataPageParams::max_page()) - .await? - .get(0) - .ok_or(Error::InternalError { - internal_message: "no sleds at time of bootstore sync".into(), - })? - .address(); - - let sa = sled_agent_client::Client::new( - &format!("http://{}", addr), - self.log.clone(), - ); - + let sa = self.get_any_sled_agent(opctx).await?; let result = sa .read_network_bootstore_config_cache() .await @@ -619,7 +610,7 @@ impl super::Nexus { opctx: &OpContext, ) -> Result { let rack = self.rack_lookup(opctx, &self.rack_id).await?; - let subnet = rack.subnet()?; + let subnet = rack_subnet(rack.rack_subnet)?; let db_ports = self.active_port_settings(opctx).await?; let mut ports = Vec::new(); @@ -726,18 +717,28 @@ impl super::Nexus { &self, opctx: &OpContext, ) -> ListResultVec { + debug!(self.log, "Getting latest collection"); // Grab the SPs from the last collection let limit = NonZeroU32::new(50).unwrap(); let collection = self .db_datastore .inventory_get_latest_collection(opctx, limit) .await?; + + // There can't be any uninitialized sleds we know about + // if there is no inventory. + let Some(collection) = collection else { + return Ok(vec![]); + }; + let pagparams = DataPageParams { marker: None, direction: dropshot::PaginationOrder::Descending, // TODO: This limit is only suitable for a single sled cluster limit: NonZeroU32::new(32).unwrap(), }; + + debug!(self.log, "Listing sleds"); let sleds = self.db_datastore.sled_list(opctx, &pagparams).await?; let mut uninitialized_sleds: Vec = collection @@ -767,4 +768,106 @@ impl super::Nexus { uninitialized_sleds.retain(|s| !sled_baseboards.contains(&s.baseboard)); Ok(uninitialized_sleds) } + + /// Add a sled to an intialized rack + pub(crate) async fn add_sled_to_initialized_rack( + &self, + opctx: &OpContext, + sled: UninitializedSled, + ) -> Result<(), Error> { + let baseboard_id = sled.baseboard.clone().into(); + let hw_baseboard_id = + self.db_datastore.find_hw_baseboard_id(opctx, baseboard_id).await?; + + let subnet = self.db_datastore.rack_subnet(opctx, sled.rack_id).await?; + let rack_subnet = + Ipv6Subnet::::from(rack_subnet(Some(subnet))?); + + let allocation = self + .db_datastore + .allocate_sled_underlay_subnet_octets( + opctx, + sled.rack_id, + hw_baseboard_id, + ) + .await?; + + // Convert the baseboard as necessary + let baseboard = sled_agent_client::types::Baseboard::Gimlet { + identifier: sled.baseboard.serial.clone(), + model: sled.baseboard.part.clone(), + revision: sled.baseboard.revision, + }; + + // Make the call to sled-agent + let req = AddSledRequest { + sled_id: baseboard, + start_request: StartSledAgentRequest { + generation: 0, + schema_version: 1, + body: StartSledAgentRequestBody { + id: allocation.sled_id, + rack_id: allocation.rack_id, + use_trust_quorum: true, + is_lrtq_learner: true, + subnet: sled_agent_client::types::Ipv6Subnet { + net: get_64_subnet( + rack_subnet, + allocation.subnet_octet.try_into().unwrap(), + ) + .net() + .into(), + }, + }, + }, + }; + let sa = self.get_any_sled_agent(opctx).await?; + sa.add_sled_to_initialized_rack(&req).await.map_err(|e| { + Error::InternalError { + internal_message: format!( + "failed to add sled with baseboard {:?} to rack {}: {e}", + sled.baseboard, allocation.rack_id + ), + } + })?; + + Ok(()) + } + + async fn get_any_sled_agent( + &self, + opctx: &OpContext, + ) -> Result { + let addr = self + .sled_list(opctx, &DataPageParams::max_page()) + .await? + .get(0) + .ok_or(Error::InternalError { + internal_message: "no sled agents available".into(), + })? + .address(); + + Ok(sled_agent_client::Client::new( + &format!("http://{}", addr), + self.log.clone(), + )) + } +} + +pub fn rack_subnet( + rack_subnet: Option, +) -> Result { + match rack_subnet { + Some(IpNetwork::V6(subnet)) => Ok(subnet), + Some(IpNetwork::V4(_)) => { + return Err(Error::InternalError { + internal_message: "rack subnet not IPv6".into(), + }) + } + None => { + return Err(Error::InternalError { + internal_message: "rack subnet not set".into(), + }) + } + } } diff --git a/nexus/src/app/sled.rs b/nexus/src/app/sled.rs index 8189c0a93d..c2931f1441 100644 --- a/nexus/src/app/sled.rs +++ b/nexus/src/app/sled.rs @@ -38,6 +38,9 @@ impl super::Nexus { // TODO-robustness we should have a limit on how many sled agents there can // be (for graceful degradation at large scale). + // + // TODO-multisled: This should not use the rack_id for the given nexus, + // unless the DNS lookups at sled-agent are only for rack-local nexuses. pub(crate) async fn upsert_sled( &self, opctx: &OpContext, diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 428632bcf5..78f675c28a 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -6,10 +6,11 @@ use super::{ console_api, device_auth, params, + shared::UninitializedSled, views::{ self, Certificate, Group, IdentityProvider, Image, IpPool, IpPoolRange, - PhysicalDisk, Project, Rack, Role, Silo, Sled, Snapshot, SshKey, - UninitializedSled, User, UserBuiltin, Vpc, VpcRouter, VpcSubnet, + PhysicalDisk, Project, Rack, Role, Silo, Sled, Snapshot, SshKey, User, + UserBuiltin, Vpc, VpcRouter, VpcSubnet, }, }; use crate::external_api::shared; @@ -223,6 +224,7 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(switch_list)?; api.register(switch_view)?; api.register(uninitialized_sled_list)?; + api.register(add_sled_to_initialized_rack)?; api.register(user_builtin_list)?; api.register(user_builtin_view)?; @@ -4402,6 +4404,31 @@ async fn uninitialized_sled_list( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } +/// Add a sled to an initialized rack +// +// TODO: In the future this should really be a PUT request, once we resolve +// https://github.com/oxidecomputer/omicron/issues/4494. It should also +// explicitly be tied to a rack via a `rack_id` path param. For now we assume +// we are only operating on single rack systems. +#[endpoint { + method = POST, + path = "/v1/system/hardware/sleds/", + tags = ["system/hardware"] +}] +async fn add_sled_to_initialized_rack( + rqctx: RequestContext>, + sled: TypedBody, +) -> Result { + let apictx = rqctx.context(); + let nexus = &apictx.nexus; + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + nexus.add_sled_to_initialized_rack(&opctx, sled.into_inner()).await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + // Sleds /// List sleds diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 64790c49c2..5dfdcc151d 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -21,8 +21,10 @@ use nexus_test_utils::SLED_AGENT_UUID; use nexus_test_utils::SWITCH_UUID; use nexus_types::external_api::params; use nexus_types::external_api::shared; +use nexus_types::external_api::shared::Baseboard; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::shared::Ipv4Range; +use nexus_types::external_api::shared::UninitializedSled; use omicron_common::api::external::AddressLotKind; use omicron_common::api::external::ByteCount; use omicron_common::api::external::IdentityMetadataCreateParams; @@ -39,6 +41,7 @@ use omicron_test_utils::certificates::CertificateChain; use std::net::IpAddr; use std::net::Ipv4Addr; use std::str::FromStr; +use uuid::Uuid; lazy_static! { pub static ref HARDWARE_RACK_URL: String = @@ -57,6 +60,16 @@ lazy_static! { pub static ref SLED_INSTANCES_URL: String = format!("/v1/system/hardware/sleds/{}/instances", SLED_AGENT_UUID); + pub static ref DEMO_UNINITIALIZED_SLED: UninitializedSled = UninitializedSled { + baseboard: Baseboard { + serial: "demo-serial".to_string(), + part: "demo-part".to_string(), + revision: 6 + }, + rack_id: Uuid::new_v4(), + cubby: 1 + }; + // Global policy pub static ref SYSTEM_POLICY_URL: &'static str = "/v1/system/policy"; @@ -1577,7 +1590,9 @@ lazy_static! { url: "/v1/system/hardware/sleds", visibility: Visibility::Public, unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], + allowed_methods: vec![AllowedMethod::Get, AllowedMethod::Post( + serde_json::to_value(&*DEMO_UNINITIALIZED_SLED).unwrap() + )], }, VerifyEndpoint { diff --git a/nexus/tests/integration_tests/rack.rs b/nexus/tests/integration_tests/rack.rs index 2c191f27ae..9f77223871 100644 --- a/nexus/tests/integration_tests/rack.rs +++ b/nexus/tests/integration_tests/rack.rs @@ -10,8 +10,14 @@ use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::TEST_SUITE_PASSWORD; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; +use nexus_types::external_api::shared::UninitializedSled; use nexus_types::external_api::views::Rack; +use nexus_types::internal_api::params::Baseboard; +use nexus_types::internal_api::params::SledAgentStartupInfo; +use nexus_types::internal_api::params::SledRole; +use omicron_common::api::external::ByteCount; use omicron_nexus::TestInterfaces; +use uuid::Uuid; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -77,3 +83,63 @@ async fn test_rack_initialization(cptestctx: &ControlPlaneTestContext) { ) .await; } + +#[nexus_test] +async fn test_uninitialized_sled_list(cptestctx: &ControlPlaneTestContext) { + let internal_client = &cptestctx.internal_client; + let external_client = &cptestctx.external_client; + let list_url = "/v1/system/hardware/uninitialized-sleds"; + let mut uninitialized_sleds = + NexusRequest::object_get(external_client, &list_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to get uninitialized sleds") + .parsed_body::>() + .unwrap(); + debug!(cptestctx.logctx.log, "{:#?}", uninitialized_sleds); + + // There are currently two fake sim gimlets created in the latest inventory + // collection as part of test setup. + assert_eq!(2, uninitialized_sleds.len()); + + // Insert one of these fake sleds into the `sled` table. + // Just pick some random fields other than `baseboard` + let baseboard = uninitialized_sleds.pop().unwrap().baseboard; + let sled_uuid = Uuid::new_v4(); + let sa = SledAgentStartupInfo { + sa_address: "[fd00:1122:3344:01::1]:8080".parse().unwrap(), + role: SledRole::Gimlet, + baseboard: Baseboard { + serial_number: baseboard.serial, + part_number: baseboard.part, + revision: baseboard.revision, + }, + usable_hardware_threads: 32, + usable_physical_ram: ByteCount::from_gibibytes_u32(100), + reservoir_size: ByteCount::from_mebibytes_u32(100), + }; + internal_client + .make_request( + Method::POST, + format!("/sled-agents/{sled_uuid}").as_str(), + Some(&sa), + StatusCode::NO_CONTENT, + ) + .await + .unwrap(); + + // Ensure there's only one unintialized sled remaining, and it's not + // the one that was just added into the `sled` table + let uninitialized_sleds_2 = + NexusRequest::object_get(external_client, &list_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to get uninitialized sleds") + .parsed_body::>() + .unwrap(); + debug!(cptestctx.logctx.log, "{:#?}", uninitialized_sleds); + assert_eq!(1, uninitialized_sleds_2.len()); + assert_eq!(uninitialized_sleds, uninitialized_sleds_2); +} diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 7f0c30c471..dd387ab979 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -110,6 +110,7 @@ snapshot_view GET /v1/snapshots/{snapshot} API operations found with tag "system/hardware" OPERATION ID METHOD URL PATH +add_sled_to_initialized_rack POST /v1/system/hardware/sleds networking_switch_port_apply_settings POST /v1/system/hardware/switch-port/{port}/settings networking_switch_port_clear_settings DELETE /v1/system/hardware/switch-port/{port}/settings networking_switch_port_list GET /v1/system/hardware/switch-port diff --git a/nexus/types/src/external_api/shared.rs b/nexus/types/src/external_api/shared.rs index 48fbb9c10d..a4c5ae1e62 100644 --- a/nexus/types/src/external_api/shared.rs +++ b/nexus/types/src/external_api/shared.rs @@ -245,6 +245,42 @@ pub enum UpdateableComponentType { HostOmicron, } +/// Properties that uniquely identify an Oxide hardware component +#[derive( + Clone, + Debug, + Serialize, + Deserialize, + JsonSchema, + PartialOrd, + Ord, + PartialEq, + Eq, +)] +pub struct Baseboard { + pub serial: String, + pub part: String, + pub revision: i64, +} + +/// A sled that has not been added to an initialized rack yet +#[derive( + Clone, + Debug, + Serialize, + Deserialize, + JsonSchema, + PartialOrd, + Ord, + PartialEq, + Eq, +)] +pub struct UninitializedSled { + pub baseboard: Baseboard, + pub rack_id: Uuid, + pub cubby: u16, +} + #[cfg(test)] mod test { use super::Policy; diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index b34fc7a542..9dfe36d63b 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -5,7 +5,7 @@ //! Views are response bodies, most of which are public lenses onto DB models. use crate::external_api::shared::{ - self, IpKind, IpRange, ServiceUsingCertificate, + self, Baseboard, IpKind, IpRange, ServiceUsingCertificate, }; use crate::identity::AssetIdentityMetadata; use api_identity::ObjectIdentity; @@ -274,44 +274,8 @@ pub struct Rack { pub identity: AssetIdentityMetadata, } -/// View of a sled that has not been added to an initialized rack yet -#[derive( - Clone, - Debug, - Serialize, - Deserialize, - JsonSchema, - PartialOrd, - Ord, - PartialEq, - Eq, -)] -pub struct UninitializedSled { - pub baseboard: Baseboard, - pub rack_id: Uuid, - pub cubby: u16, -} - // FRUs -/// Properties that uniquely identify an Oxide hardware component -#[derive( - Clone, - Debug, - Serialize, - Deserialize, - JsonSchema, - PartialOrd, - Ord, - PartialEq, - Eq, -)] -pub struct Baseboard { - pub serial: String, - pub part: String, - pub revision: i64, -} - // SLEDS /// An operator's view of a Sled. diff --git a/nexus/types/src/internal_api/params.rs b/nexus/types/src/internal_api/params.rs index c0991ebb17..bc25e8d4bd 100644 --- a/nexus/types/src/internal_api/params.rs +++ b/nexus/types/src/internal_api/params.rs @@ -25,7 +25,7 @@ use uuid::Uuid; /// /// Note that this may change if the sled is physically moved /// within the rack. -#[derive(Serialize, Deserialize, JsonSchema)] +#[derive(Serialize, Deserialize, JsonSchema, Debug)] #[serde(rename_all = "snake_case")] pub enum SledRole { /// The sled is a general compute sled. @@ -45,7 +45,7 @@ pub struct Baseboard { } /// Sent by a sled agent on startup to Nexus to request further instruction -#[derive(Serialize, Deserialize, JsonSchema)] +#[derive(Serialize, Deserialize, Debug, JsonSchema)] pub struct SledAgentStartupInfo { /// The address of the sled agent's API endpoint pub sa_address: SocketAddrV6, diff --git a/nexus/types/src/inventory.rs b/nexus/types/src/inventory.rs index 19c323d894..9401727162 100644 --- a/nexus/types/src/inventory.rs +++ b/nexus/types/src/inventory.rs @@ -20,6 +20,8 @@ use std::sync::Arc; use strum::EnumIter; use uuid::Uuid; +use crate::external_api::shared::Baseboard; + /// Results of collecting hardware/software inventory from various Omicron /// components /// @@ -131,6 +133,12 @@ pub struct BaseboardId { pub serial_number: String, } +impl From for BaseboardId { + fn from(value: Baseboard) -> Self { + BaseboardId { part_number: value.part, serial_number: value.serial } + } +} + /// Caboose contents found during a collection /// /// These are normalized in the database. Each distinct `Caboose` is assigned a diff --git a/openapi/nexus.json b/openapi/nexus.json index 0d19e81d9a..704aa393db 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -3610,6 +3610,34 @@ "x-dropshot-pagination": { "required": [] } + }, + "post": { + "tags": [ + "system/hardware" + ], + "summary": "Add a sled to an initialized rack", + "operationId": "add_sled_to_initialized_rack", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UninitializedSled" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } } }, "/v1/system/hardware/sleds/{sled_id}": { @@ -13971,7 +13999,7 @@ ] }, "UninitializedSled": { - "description": "View of a sled that has not been added to an initialized rack yet", + "description": "A sled that has not been added to an initialized rack yet", "type": "object", "properties": { "baseboard": { diff --git a/schema/crdb/14.0.0/up1.sql b/schema/crdb/14.0.0/up1.sql new file mode 100644 index 0000000000..3bff831ceb --- /dev/null +++ b/schema/crdb/14.0.0/up1.sql @@ -0,0 +1,37 @@ +-- Table of all sled subnets allocated for sleds added to an already initialized +-- rack. The sleds in this table and their allocated subnets are created before +-- a sled is added to the `sled` table. Addition to the `sled` table occurs +-- after the sled is initialized and notifies Nexus about itself. +-- +-- For simplicity and space savings, this table doesn't actually contain the +-- full subnets for a given sled, but only the octet that extends a /56 rack +-- subnet to a /64 sled subnet. The rack subnet is maintained in the `rack` +-- table. +-- +-- This table does not include subnet octets allocated during RSS and therefore +-- all of the octets start at 33. This makes the data in this table purely additive +-- post-RSS, which also implies that we cannot re-use subnet octets if an original +-- sled that was part of RSS was removed from the cluster. +CREATE TABLE IF NOT EXISTS omicron.public.sled_underlay_subnet_allocation ( + -- The physical identity of the sled + -- (foreign key into `hw_baseboard_id` table) + hw_baseboard_id UUID PRIMARY KEY, + + -- The rack to which a sled is being added + -- (foreign key into `rack` table) + -- + -- We require this because the sled is not yet part of the sled table when + -- we first allocate a subnet for it. + rack_id UUID NOT NULL, + + -- The sled to which a subnet is being allocated + -- + -- Eventually will be a foreign key into the `sled` table when the sled notifies nexus + -- about itself after initialization. + sled_id UUID NOT NULL, + + -- The octet that extends a /56 rack subnet to a /64 sled subnet + -- + -- Always between 33 and 255 inclusive + subnet_octet INT2 NOT NULL UNIQUE CHECK (subnet_octet BETWEEN 33 AND 255) +); diff --git a/schema/crdb/14.0.0/up2.sql b/schema/crdb/14.0.0/up2.sql new file mode 100644 index 0000000000..c3e18fa166 --- /dev/null +++ b/schema/crdb/14.0.0/up2.sql @@ -0,0 +1,5 @@ +-- Add an index which allows pagination by {rack_id, sled_id} pairs. +CREATE UNIQUE INDEX IF NOT EXISTS lookup_subnet_allocation_by_rack_and_sled ON omicron.public.sled_underlay_subnet_allocation ( + rack_id, + sled_id +); diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index fc3bc37fd7..728b084982 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -158,6 +158,51 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_resource_by_sled ON omicron.public.sled id ); + +-- Table of all sled subnets allocated for sleds added to an already initialized +-- rack. The sleds in this table and their allocated subnets are created before +-- a sled is added to the `sled` table. Addition to the `sled` table occurs +-- after the sled is initialized and notifies Nexus about itself. +-- +-- For simplicity and space savings, this table doesn't actually contain the +-- full subnets for a given sled, but only the octet that extends a /56 rack +-- subnet to a /64 sled subnet. The rack subnet is maintained in the `rack` +-- table. +-- +-- This table does not include subnet octets allocated during RSS and therefore +-- all of the octets start at 33. This makes the data in this table purely additive +-- post-RSS, which also implies that we cannot re-use subnet octets if an original +-- sled that was part of RSS was removed from the cluster. +CREATE TABLE IF NOT EXISTS omicron.public.sled_underlay_subnet_allocation ( + -- The physical identity of the sled + -- (foreign key into `hw_baseboard_id` table) + hw_baseboard_id UUID PRIMARY KEY, + + -- The rack to which a sled is being added + -- (foreign key into `rack` table) + -- + -- We require this because the sled is not yet part of the sled table when + -- we first allocate a subnet for it. + rack_id UUID NOT NULL, + + -- The sled to which a subnet is being allocated + -- + -- Eventually will be a foreign key into the `sled` table when the sled notifies nexus + -- about itself after initialization. + sled_id UUID NOT NULL, + + -- The octet that extends a /56 rack subnet to a /64 sled subnet + -- + -- Always between 33 and 255 inclusive + subnet_octet INT2 NOT NULL UNIQUE CHECK (subnet_octet BETWEEN 33 AND 255) +); + +-- Add an index which allows pagination by {rack_id, sled_id} pairs. +CREATE UNIQUE INDEX IF NOT EXISTS lookup_subnet_allocation_by_rack_and_sled ON omicron.public.sled_underlay_subnet_allocation ( + rack_id, + sled_id +); + /* * Switches */ @@ -2952,7 +2997,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '13.0.0', NULL) + ( TRUE, NOW(), NOW(), '14.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; From f03c7d5b460f149f626dd82bcf72cdc47d5a4552 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 28 Nov 2023 01:39:26 +0000 Subject: [PATCH 43/56] Update Rust to v1.74.0 (#4543) Co-authored-by: Rain --- bootstore/src/schemes/v0/request_manager.rs | 2 +- common/src/api/external/mod.rs | 2 +- illumos-utils/src/running_zone.rs | 5 +++-- nexus/db-queries/src/db/queries/volume.rs | 7 ++++--- nexus/db-queries/src/db/saga_recovery.rs | 3 +-- oximeter/instruments/src/kstat/link.rs | 4 ++-- rust-toolchain.toml | 2 +- wicket/src/ui/widgets/popup.rs | 2 +- 8 files changed, 14 insertions(+), 13 deletions(-) diff --git a/bootstore/src/schemes/v0/request_manager.rs b/bootstore/src/schemes/v0/request_manager.rs index 780213430c..90466fdc07 100644 --- a/bootstore/src/schemes/v0/request_manager.rs +++ b/bootstore/src/schemes/v0/request_manager.rs @@ -109,7 +109,7 @@ impl RequestManager { let expiry = now + self.config.rack_init_timeout; let mut acks = InitAcks::default(); acks.expected = - packages.keys().cloned().filter(|id| id != &self.id).collect(); + packages.keys().filter(|&id| id != &self.id).cloned().collect(); let req = TrackableRequest::InitRack { rack_uuid, packages: packages.clone(), diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index adf661516a..3e58d1d4d4 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -409,7 +409,7 @@ impl SemverVersion { /// This is the official ECMAScript-compatible validation regex for /// semver: /// - const VALIDATION_REGEX: &str = r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$"; + const VALIDATION_REGEX: &'static str = r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$"; } impl JsonSchema for SemverVersion { diff --git a/illumos-utils/src/running_zone.rs b/illumos-utils/src/running_zone.rs index bdf7ed0cbf..ba8cd009e8 100644 --- a/illumos-utils/src/running_zone.rs +++ b/illumos-utils/src/running_zone.rs @@ -214,7 +214,7 @@ mod zenter { // the contracts used for this come from templates that define becoming // empty as a critical event. pub fn contract_reaper(log: Logger) { - const EVENT_PATH: &[u8] = b"/system/contract/process/pbundle"; + const EVENT_PATH: &'static [u8] = b"/system/contract/process/pbundle"; const CT_PR_EV_EMPTY: u64 = 1; let cpath = CString::new(EVENT_PATH).unwrap(); @@ -327,7 +327,8 @@ mod zenter { } impl Template { - const TEMPLATE_PATH: &[u8] = b"/system/contract/process/template\0"; + const TEMPLATE_PATH: &'static [u8] = + b"/system/contract/process/template\0"; // Constants related to how the contract below is managed. See // `usr/src/uts/common/sys/contract/process.h` in the illumos sources diff --git a/nexus/db-queries/src/db/queries/volume.rs b/nexus/db-queries/src/db/queries/volume.rs index 31882dca89..2c1a9af19b 100644 --- a/nexus/db-queries/src/db/queries/volume.rs +++ b/nexus/db-queries/src/db/queries/volume.rs @@ -412,10 +412,11 @@ pub struct DecreaseCrucibleResourceCountAndSoftDeleteVolume { } impl DecreaseCrucibleResourceCountAndSoftDeleteVolume { - const UPDATED_REGION_SNAPSHOTS_TABLE: &str = "updated_region_snapshots"; - const REGION_SNAPSHOTS_TO_CLEAN_UP_TABLE: &str = + const UPDATED_REGION_SNAPSHOTS_TABLE: &'static str = + "updated_region_snapshots"; + const REGION_SNAPSHOTS_TO_CLEAN_UP_TABLE: &'static str = "region_snapshots_to_clean_up"; - const UPDATED_VOLUME_TABLE: &str = "updated_volume"; + const UPDATED_VOLUME_TABLE: &'static str = "updated_volume"; pub fn new(volume_id: Uuid, snapshot_addrs: Vec) -> Self { Self { diff --git a/nexus/db-queries/src/db/saga_recovery.rs b/nexus/db-queries/src/db/saga_recovery.rs index f3eada1645..802093b889 100644 --- a/nexus/db-queries/src/db/saga_recovery.rs +++ b/nexus/db-queries/src/db/saga_recovery.rs @@ -143,8 +143,7 @@ where .await }); - let mut completion_futures = vec![]; - completion_futures.reserve(recovery_futures.len()); + let mut completion_futures = Vec::with_capacity(recovery_futures.len()); // Loads and resumes all sagas in serial. for recovery_future in recovery_futures { let saga_complete_future = recovery_future.await?; diff --git a/oximeter/instruments/src/kstat/link.rs b/oximeter/instruments/src/kstat/link.rs index d22ac60378..03397c4108 100644 --- a/oximeter/instruments/src/kstat/link.rs +++ b/oximeter/instruments/src/kstat/link.rs @@ -268,8 +268,8 @@ mod tests { } impl TestEtherstub { - const PFEXEC: &str = "/usr/bin/pfexec"; - const DLADM: &str = "/usr/sbin/dladm"; + const PFEXEC: &'static str = "/usr/bin/pfexec"; + const DLADM: &'static str = "/usr/sbin/dladm"; fn new() -> Self { let name = format!( "kstest{}0", diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 804ff08cce..65ee8a9912 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -4,5 +4,5 @@ # # We choose a specific toolchain (rather than "stable") for repeatability. The # intent is to keep this up-to-date with recently-released stable Rust. -channel = "1.73.0" +channel = "1.74.0" profile = "default" diff --git a/wicket/src/ui/widgets/popup.rs b/wicket/src/ui/widgets/popup.rs index 19d7aa18b1..fb8c0f1f24 100644 --- a/wicket/src/ui/widgets/popup.rs +++ b/wicket/src/ui/widgets/popup.rs @@ -464,7 +464,7 @@ pub fn draw_buttons( let button_rects = Layout::default() .direction(Direction::Horizontal) .horizontal_margin(2) - .constraints(constraints.as_ref()) + .constraints(constraints) .split(rect); let block = Block::default() From b9d8b8f9c3e8f4b33cf11b546b96b5fe134906eb Mon Sep 17 00:00:00 2001 From: Rain Date: Mon, 27 Nov 2023 19:08:38 -0800 Subject: [PATCH 44/56] [update-engine] fix GroupDisplayStats to avoid integer underflow (#4561) This could happen if an empty `EventReport` is passed in -- in that case we'd transition to `Running` but return `NotStarted`. Fix this by not transitioning `self.kind` to `Running` if we're going to return `NotStarted`. This does bloat up the code a little but I think is clearer overall. Thanks to @jgallagher for all the help debugging this! Also clean up some related logic and add tests. Fixes #4507. --- .../examples/update-engine-basic/display.rs | 1 + update-engine/src/buffer.rs | 228 +-------- update-engine/src/display/group_display.rs | 454 +++++++++++++++--- update-engine/src/test_utils.rs | 284 ++++++++++- wicket/src/cli/rack_update.rs | 1 + 5 files changed, 683 insertions(+), 285 deletions(-) diff --git a/update-engine/examples/update-engine-basic/display.rs b/update-engine/examples/update-engine-basic/display.rs index 122777211b..891bdce6d3 100644 --- a/update-engine/examples/update-engine-basic/display.rs +++ b/update-engine/examples/update-engine-basic/display.rs @@ -88,6 +88,7 @@ async fn display_group( slog::info!(log, "setting up display"); let mut display = GroupDisplay::new( + log, [ (GroupDisplayKey::Example, "example"), (GroupDisplayKey::Other, "other"), diff --git a/update-engine/src/buffer.rs b/update-engine/src/buffer.rs index 6e0e66d6d0..36a0626963 100644 --- a/update-engine/src/buffer.rs +++ b/update-engine/src/buffer.rs @@ -1627,6 +1627,16 @@ pub enum TerminalKind { Aborted, } +impl fmt::Display for TerminalKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Completed => write!(f, "completed"), + Self::Failed => write!(f, "failed"), + Self::Aborted => write!(f, "aborted"), + } + } +} + impl ExecutionStatus { /// Returns the terminal status and the total amount of time elapsed, or /// None if the execution has not reached a terminal state. @@ -1671,17 +1681,13 @@ mod tests { use std::collections::HashSet; use anyhow::{bail, ensure, Context}; - use futures::StreamExt; use indexmap::IndexSet; use omicron_test_utils::dev::test_setup_log; use serde::{de::IntoDeserializer, Deserialize}; - use tokio::sync::mpsc; - use tokio_stream::wrappers::ReceiverStream; use crate::{ - events::{ProgressCounter, ProgressUnits, StepProgress}, - test_utils::TestSpec, - StepContext, StepSuccess, UpdateEngine, + events::ProgressCounter, + test_utils::{generate_test_events, GenerateTestEventsKind, TestSpec}, }; use super::*; @@ -1689,108 +1695,11 @@ mod tests { #[tokio::test] async fn test_buffer() { let logctx = test_setup_log("test_buffer"); - // The channel is big enough to contain all possible events. - let (sender, receiver) = mpsc::channel(512); - let engine: UpdateEngine = - UpdateEngine::new(&logctx.log, sender); - - engine - .new_step("foo".to_owned(), 1, "Step 1", move |_cx| async move { - StepSuccess::new(()).into() - }) - .register(); - - engine - .new_step("bar".to_owned(), 2, "Step 2", move |cx| async move { - for _ in 0..20 { - cx.send_progress(StepProgress::with_current_and_total( - 5, - 20, - ProgressUnits::BYTES, - Default::default(), - )) - .await; - - cx.send_progress(StepProgress::reset( - Default::default(), - "reset step 2", - )) - .await; - - cx.send_progress(StepProgress::retry("retry step 2")).await; - } - StepSuccess::new(()).into() - }) - .register(); - - engine - .new_step( - "nested".to_owned(), - 3, - "Step 3 (this is nested)", - move |parent_cx| async move { - parent_cx - .with_nested_engine(|engine| { - define_nested_engine(&parent_cx, engine); - Ok(()) - }) - .await - .expect_err("this is expected to fail"); - - StepSuccess::new(()).into() - }, - ) - .register(); - - let log = logctx.log.clone(); - engine - .new_step( - "remote-nested".to_owned(), - 20, - "Step 4 (remote nested)", - move |cx| async move { - let (sender, mut receiver) = mpsc::channel(16); - let mut engine = UpdateEngine::new(&log, sender); - define_remote_nested_engine(&mut engine, 20); - - let mut buffer = EventBuffer::default(); - - let mut execute_fut = std::pin::pin!(engine.execute()); - let mut execute_done = false; - loop { - tokio::select! { - res = &mut execute_fut, if !execute_done => { - res.expect("remote nested engine completed successfully"); - execute_done = true; - } - Some(event) = receiver.recv() => { - // Generate complete reports to ensure deduping - // happens within StepContexts. - buffer.add_event(event); - cx.send_nested_report(buffer.generate_report()).await?; - } - else => { - break; - } - } - } - - StepSuccess::new(()).into() - }, - ) - .register(); - - // The step index here (100) is large enough to be higher than all nested - // steps. - engine - .new_step("baz".to_owned(), 100, "Step 5", move |_cx| async move { - StepSuccess::new(()).into() - }) - .register(); - - engine.execute().await.expect("execution successful"); - let generated_events: Vec<_> = - ReceiverStream::new(receiver).collect().await; + let generated_events = generate_test_events( + &logctx.log, + GenerateTestEventsKind::Completed, + ) + .await; let test_cx = BufferTestContext::new(generated_events); @@ -2417,71 +2326,6 @@ mod tests { } } - fn define_nested_engine<'a>( - parent_cx: &'a StepContext, - engine: &mut UpdateEngine<'a, TestSpec>, - ) { - engine - .new_step( - "nested-foo".to_owned(), - 4, - "Nested step 1", - move |cx| async move { - parent_cx - .send_progress(StepProgress::with_current_and_total( - 1, - 3, - "steps", - Default::default(), - )) - .await; - cx.send_progress( - StepProgress::progress(Default::default()), - ) - .await; - StepSuccess::new(()).into() - }, - ) - .register(); - - engine - .new_step::<_, _, ()>( - "nested-bar".to_owned(), - 5, - "Nested step 2 (fails)", - move |cx| async move { - // This is used by NestedProgressCheck below. - parent_cx - .send_progress(StepProgress::with_current_and_total( - 2, - 3, - "steps", - Default::default(), - )) - .await; - - cx.send_progress(StepProgress::with_current( - 50, - "units", - Default::default(), - )) - .await; - - parent_cx - .send_progress(StepProgress::with_current_and_total( - 3, - 3, - "steps", - Default::default(), - )) - .await; - - bail!("failing step") - }, - ) - .register(); - } - #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum NestedProgressCheck { Initial, @@ -2530,42 +2374,4 @@ mod tests { ); } } - - fn define_remote_nested_engine( - engine: &mut UpdateEngine<'_, TestSpec>, - start_id: usize, - ) { - engine - .new_step( - "nested-foo".to_owned(), - start_id + 1, - "Nested step 1", - move |cx| async move { - cx.send_progress( - StepProgress::progress(Default::default()), - ) - .await; - StepSuccess::new(()).into() - }, - ) - .register(); - - engine - .new_step::<_, _, ()>( - "nested-bar".to_owned(), - start_id + 2, - "Nested step 2", - move |cx| async move { - cx.send_progress(StepProgress::with_current( - 20, - "units", - Default::default(), - )) - .await; - - StepSuccess::new(()).into() - }, - ) - .register(); - } } diff --git a/update-engine/src/display/group_display.rs b/update-engine/src/display/group_display.rs index 0d50489a9f..cfd37aac16 100644 --- a/update-engine/src/display/group_display.rs +++ b/update-engine/src/display/group_display.rs @@ -30,6 +30,7 @@ use super::{ pub struct GroupDisplay { // We don't need to add any buffering here because we already write data to // the writer in a line-buffered fashion (see Self::write_events). + log: slog::Logger, writer: W, max_width: usize, // This is set to the highest value of root_total_elapsed seen from any event reports. @@ -45,6 +46,7 @@ impl GroupDisplay { /// /// The function passed in is expected to create a writer. pub fn new( + log: &slog::Logger, keys_and_prefixes: impl IntoIterator, writer: W, ) -> Self @@ -70,6 +72,7 @@ impl GroupDisplay { let not_started = single_states.len(); Self { + log: log.new(slog::o!("component" => "GroupDisplay")), writer, max_width, // This creates the stopwatch in the stopped state with duration 0 -- i.e. a minimal @@ -84,6 +87,7 @@ impl GroupDisplay { /// Creates a new `GroupDisplay` with the provided report keys, using the /// `Display` impl to obtain the respective prefixes. pub fn new_with_display( + log: &slog::Logger, keys: impl IntoIterator, writer: W, ) -> Self @@ -91,6 +95,7 @@ impl GroupDisplay { K: fmt::Display, { Self::new( + log, keys.into_iter().map(|k| { let prefix = k.to_string(); (k, prefix) @@ -144,7 +149,30 @@ impl GroupDisplay { TokioSw::with_elapsed_started(root_total_elapsed); } } + self.stats.apply_result(result); + + if result.before != result.after { + slog::info!( + self.log, + "add_event_report caused state transition"; + "prefix" => &state.prefix, + "before" => %result.before, + "after" => %result.after, + "current_stats" => ?self.stats, + "root_total_elapsed" => ?result.root_total_elapsed, + ); + } else { + slog::trace!( + self.log, + "add_event_report called, state did not change"; + "prefix" => &state.prefix, + "state" => %result.before, + "current_stats" => ?self.stats, + "root_total_elapsed" => ?result.root_total_elapsed, + ); + } + Ok(()) } else { Err(UnknownReportKey {}) @@ -179,7 +207,7 @@ impl GroupDisplay { } } -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct GroupDisplayStats { /// The total number of reports. pub total: usize, @@ -236,18 +264,9 @@ impl GroupDisplayStats { } fn apply_result(&mut self, result: AddEventReportResult) { - // Process result.after first to avoid integer underflow. - match result.after { - SingleStateTag::NotStarted => self.not_started += 1, - SingleStateTag::Running => self.running += 1, - SingleStateTag::Terminal(TerminalKind::Completed) => { - self.completed += 1 - } - SingleStateTag::Terminal(TerminalKind::Failed) => self.failed += 1, - SingleStateTag::Terminal(TerminalKind::Aborted) => { - self.aborted += 1 - } - SingleStateTag::Overwritten => self.overwritten += 1, + if result.before == result.after { + // Nothing to do. + return; } match result.before { @@ -262,6 +281,19 @@ impl GroupDisplayStats { } SingleStateTag::Overwritten => self.overwritten -= 1, } + + match result.after { + SingleStateTag::NotStarted => self.not_started += 1, + SingleStateTag::Running => self.running += 1, + SingleStateTag::Terminal(TerminalKind::Completed) => { + self.completed += 1 + } + SingleStateTag::Terminal(TerminalKind::Failed) => self.failed += 1, + SingleStateTag::Terminal(TerminalKind::Aborted) => { + self.aborted += 1 + } + SingleStateTag::Overwritten => self.overwritten += 1, + } } fn format_line( @@ -336,92 +368,139 @@ impl SingleState { &mut self, event_report: EventReport, ) -> AddEventReportResult { - let before = match &self.kind { + match &mut self.kind { SingleStateKind::NotStarted { .. } => { - self.kind = SingleStateKind::Running { - event_buffer: EventBuffer::new(8), + // We're starting a new update. + let before = SingleStateTag::NotStarted; + let mut event_buffer = EventBuffer::default(); + let (after, root_total_elapsed) = + match Self::apply_report(&mut event_buffer, event_report) { + ApplyReportResult::NotStarted => { + // This means that the event report was empty. Don't + // update `self.kind`. + (SingleStateTag::NotStarted, None) + } + ApplyReportResult::Running(root_total_elapsed) => { + self.kind = + SingleStateKind::Running { event_buffer }; + (SingleStateTag::Running, Some(root_total_elapsed)) + } + ApplyReportResult::Terminal(info) => { + let terminal_kind = info.kind; + let root_total_elapsed = info.root_total_elapsed; + + self.kind = SingleStateKind::Terminal { + info, + pending_event_buffer: Some(event_buffer), + }; + ( + SingleStateTag::Terminal(terminal_kind), + root_total_elapsed, + ) + } + ApplyReportResult::Overwritten => { + self.kind = SingleStateKind::Overwritten { + displayed: false, + }; + (SingleStateTag::Overwritten, None) + } + }; + + AddEventReportResult { before, after, root_total_elapsed } + } + SingleStateKind::Running { event_buffer } => { + // We're in the middle of an update. + let before = SingleStateTag::Running; + let (after, root_total_elapsed) = match Self::apply_report( + event_buffer, + event_report, + ) { + ApplyReportResult::NotStarted => { + // This is an illegal state transition: once a + // non-empty event report has been received, the + // event buffer never goes back to the NotStarted + // state. + unreachable!("illegal state transition from Running to NotStarted") + } + ApplyReportResult::Running(root_total_elapsed) => { + (SingleStateTag::Running, Some(root_total_elapsed)) + } + ApplyReportResult::Terminal(info) => { + let terminal_kind = info.kind; + let root_total_elapsed = info.root_total_elapsed; + + // Grab the event buffer so we can store it in the + // Terminal state below. + let event_buffer = std::mem::replace( + event_buffer, + EventBuffer::new(0), + ); + + self.kind = SingleStateKind::Terminal { + info, + pending_event_buffer: Some(event_buffer), + }; + ( + SingleStateTag::Terminal(terminal_kind), + root_total_elapsed, + ) + } + ApplyReportResult::Overwritten => { + self.kind = + SingleStateKind::Overwritten { displayed: false }; + (SingleStateTag::Overwritten, None) + } }; - SingleStateTag::NotStarted + AddEventReportResult { before, after, root_total_elapsed } } - SingleStateKind::Running { .. } => SingleStateTag::Running, - SingleStateKind::Terminal { info, .. } => { // Once we've reached a terminal state, we don't record any more // events. - return AddEventReportResult::unchanged( + AddEventReportResult::unchanged( SingleStateTag::Terminal(info.kind), info.root_total_elapsed, - ); + ) } SingleStateKind::Overwritten { .. } => { // This update has already completed -- assume that the event // buffer is for a new update, which we don't show. - return AddEventReportResult::unchanged( + AddEventReportResult::unchanged( SingleStateTag::Overwritten, None, - ); + ) } - }; - - let SingleStateKind::Running { event_buffer } = &mut self.kind else { - unreachable!("other branches were handled above"); - }; + } + } + /// The internal logic used by [`Self::add_event_report`]. + fn apply_report( + event_buffer: &mut EventBuffer, + event_report: EventReport, + ) -> ApplyReportResult { if let Some(root_execution_id) = event_buffer.root_execution_id() { if event_report.root_execution_id != Some(root_execution_id) { // The report is for a different execution ID -- assume that // this event is completed and mark our current execution as // completed. - self.kind = SingleStateKind::Overwritten { displayed: false }; - return AddEventReportResult { - before, - after: SingleStateTag::Overwritten, - root_total_elapsed: None, - }; + return ApplyReportResult::Overwritten; } } event_buffer.add_event_report(event_report); - let (after, max_total_elapsed) = - match event_buffer.root_execution_summary() { - Some(summary) => { - match summary.execution_status { - ExecutionStatus::NotStarted => { - (SingleStateTag::NotStarted, None) - } - ExecutionStatus::Running { - root_total_elapsed: max_total_elapsed, - .. - } => (SingleStateTag::Running, Some(max_total_elapsed)), - ExecutionStatus::Terminal(info) => { - // Grab the event buffer to store it in the terminal state. - let event_buffer = std::mem::replace( - event_buffer, - EventBuffer::new(0), - ); - let terminal_kind = info.kind; - let root_total_elapsed = info.root_total_elapsed; - self.kind = SingleStateKind::Terminal { - info, - pending_event_buffer: Some(event_buffer), - }; - ( - SingleStateTag::Terminal(terminal_kind), - root_total_elapsed, - ) - } - } + match event_buffer.root_execution_summary() { + Some(summary) => match summary.execution_status { + ExecutionStatus::NotStarted => ApplyReportResult::NotStarted, + ExecutionStatus::Running { root_total_elapsed, .. } => { + ApplyReportResult::Running(root_total_elapsed) } - None => { - // We don't have a summary yet. - (SingleStateTag::NotStarted, None) + ExecutionStatus::Terminal(info) => { + ApplyReportResult::Terminal(info) } - }; - - AddEventReportResult { - before, - after, - root_total_elapsed: max_total_elapsed, + }, + None => { + // We don't have a summary yet. + ApplyReportResult::NotStarted + } } } @@ -488,6 +567,7 @@ enum SingleStateKind { }, } +#[derive(Clone, Copy, Debug, Eq, PartialEq)] struct AddEventReportResult { before: SingleStateTag, after: SingleStateTag, @@ -503,10 +583,238 @@ impl AddEventReportResult { } } -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, Eq, PartialEq)] enum SingleStateTag { NotStarted, Running, Terminal(TerminalKind), Overwritten, } + +impl fmt::Display for SingleStateTag { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::NotStarted => write!(f, "not started"), + Self::Running => write!(f, "running"), + Self::Terminal(kind) => write!(f, "{kind}"), + Self::Overwritten => write!(f, "overwritten"), + } + } +} + +#[derive(Clone, Debug)] +enum ApplyReportResult { + NotStarted, + Running(Duration), + Terminal(ExecutionTerminalInfo), + Overwritten, +} + +#[cfg(test)] +mod tests { + use omicron_test_utils::dev::test_setup_log; + + use super::*; + + use crate::test_utils::{generate_test_events, GenerateTestEventsKind}; + + #[tokio::test] + async fn test_stats() { + let logctx = test_setup_log("test_stats"); + // Generate three sets of events, one for each kind. + let generated_completed = generate_test_events( + &logctx.log, + GenerateTestEventsKind::Completed, + ) + .await; + let generated_failed = + generate_test_events(&logctx.log, GenerateTestEventsKind::Failed) + .await; + let generated_aborted = + generate_test_events(&logctx.log, GenerateTestEventsKind::Aborted) + .await; + + // Set up a `GroupDisplay` with three keys. + let mut group_display = GroupDisplay::new_with_display( + &logctx.log, + vec![ + GroupDisplayKey::Completed, + GroupDisplayKey::Failed, + GroupDisplayKey::Aborted, + GroupDisplayKey::Overwritten, + ], + std::io::stdout(), + ); + + let mut expected_stats = GroupDisplayStats { + total: 4, + not_started: 4, + running: 0, + completed: 0, + failed: 0, + aborted: 0, + overwritten: 0, + }; + assert_eq!(group_display.stats(), &expected_stats); + assert!(!expected_stats.is_terminal()); + assert!(!expected_stats.has_failures()); + + // Pass in an empty EventReport -- ensure that this doesn't move it to + // a Running state. + + group_display + .add_event_report( + &GroupDisplayKey::Completed, + EventReport::default(), + ) + .unwrap(); + assert_eq!(group_display.stats(), &expected_stats); + + // Pass in events one by one -- ensure that we're always in the running + // state until we've completed. + { + expected_stats.not_started -= 1; + expected_stats.running += 1; + + let n = generated_completed.len(); + + let mut buffer = EventBuffer::default(); + let mut last_seen = None; + + for (i, event) in + generated_completed.clone().into_iter().enumerate() + { + buffer.add_event(event); + let report = buffer.generate_report_since(&mut last_seen); + group_display + .add_event_report(&GroupDisplayKey::Completed, report) + .unwrap(); + if i == n - 1 { + // The last event should have moved us to the completed + // state. + expected_stats.running -= 1; + expected_stats.completed += 1; + } else { + // We should still be in the running state. + } + assert_eq!(group_display.stats(), &expected_stats); + assert!(!expected_stats.is_terminal()); + assert!(!expected_stats.has_failures()); + } + } + + // Pass in failed events, this time using buffer.generate_report() + // rather than buffer.generate_report_since(). + { + expected_stats.not_started -= 1; + expected_stats.running += 1; + + let n = generated_failed.len(); + + let mut buffer = EventBuffer::default(); + for (i, event) in generated_failed.clone().into_iter().enumerate() { + buffer.add_event(event); + let report = buffer.generate_report(); + group_display + .add_event_report(&GroupDisplayKey::Failed, report) + .unwrap(); + if i == n - 1 { + // The last event should have moved us to the failed state. + expected_stats.running -= 1; + expected_stats.failed += 1; + assert!(expected_stats.has_failures()); + } else { + // We should still be in the running state. + assert!(!expected_stats.has_failures()); + } + assert_eq!(group_display.stats(), &expected_stats); + } + } + + // Pass in aborted events all at once. + { + expected_stats.not_started -= 1; + expected_stats.running += 1; + + let mut buffer = EventBuffer::default(); + for event in generated_aborted { + buffer.add_event(event); + } + let report = buffer.generate_report(); + group_display + .add_event_report(&GroupDisplayKey::Aborted, report) + .unwrap(); + // The aborted events should have moved us to the aborted state. + expected_stats.running -= 1; + expected_stats.aborted += 1; + assert_eq!(group_display.stats(), &expected_stats); + + // Try passing in one of the events that, if we were running, would + // cause us to move to an overwritten state. Ensure that that does + // not happen (i.e. expected_stats stays the same) + let mut buffer = EventBuffer::default(); + buffer.add_event(generated_failed.first().unwrap().clone()); + let report = buffer.generate_report(); + group_display + .add_event_report(&GroupDisplayKey::Aborted, report) + .unwrap(); + assert_eq!(group_display.stats(), &expected_stats); + } + + // For the overwritten state, pass in half of the completed events, and + // then pass in all of the failed events. + + { + expected_stats.not_started -= 1; + expected_stats.running += 1; + + let mut buffer = EventBuffer::default(); + let n = generated_completed.len() / 2; + for event in generated_completed.into_iter().take(n) { + buffer.add_event(event); + } + let report = buffer.generate_report(); + group_display + .add_event_report(&GroupDisplayKey::Overwritten, report) + .unwrap(); + assert_eq!(group_display.stats(), &expected_stats); + + // Now pass in a single failed event, which has a different + // execution ID. + let mut buffer = EventBuffer::default(); + buffer.add_event(generated_failed.first().unwrap().clone()); + let report = buffer.generate_report(); + group_display + .add_event_report(&GroupDisplayKey::Overwritten, report) + .unwrap(); + // The overwritten event should have moved us to the overwritten + // state. + expected_stats.running -= 1; + expected_stats.overwritten += 1; + } + + assert!(expected_stats.has_failures()); + assert!(expected_stats.is_terminal()); + + logctx.cleanup_successful(); + } + + #[derive(Debug, Eq, PartialEq, Ord, PartialOrd)] + enum GroupDisplayKey { + Completed, + Failed, + Aborted, + Overwritten, + } + + impl fmt::Display for GroupDisplayKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Completed => write!(f, "completed"), + Self::Failed => write!(f, "failed"), + Self::Aborted => write!(f, "aborted"), + Self::Overwritten => write!(f, "overwritten"), + } + } + } +} diff --git a/update-engine/src/test_utils.rs b/update-engine/src/test_utils.rs index 0bacfbeb8d..b943d1ddfe 100644 --- a/update-engine/src/test_utils.rs +++ b/update-engine/src/test_utils.rs @@ -4,9 +4,16 @@ // Copyright 2023 Oxide Computer Company +use anyhow::bail; +use futures::StreamExt; use schemars::JsonSchema; +use tokio::sync::{mpsc, oneshot}; +use tokio_stream::wrappers::ReceiverStream; -use crate::{ExecutionId, StepSpec}; +use crate::{ + events::{Event, ProgressUnits, StepProgress}, + EventBuffer, ExecutionId, StepContext, StepSpec, StepSuccess, UpdateEngine, +}; #[derive(JsonSchema)] pub(crate) enum TestSpec {} @@ -27,3 +34,278 @@ pub(crate) static TEST_EXECUTION_UUID: &str = pub fn test_execution_id() -> ExecutionId { ExecutionId(TEST_EXECUTION_UUID.parse().expect("valid UUID")) } + +#[derive(Copy, Clone, Debug)] +pub(crate) enum GenerateTestEventsKind { + Completed, + Failed, + Aborted, +} + +pub(crate) async fn generate_test_events( + log: &slog::Logger, + kind: GenerateTestEventsKind, +) -> Vec> { + // The channel is big enough to contain all possible events. + let (sender, receiver) = mpsc::channel(512); + let engine = UpdateEngine::new(log, sender); + + match kind { + GenerateTestEventsKind::Completed => { + define_test_steps(log, &engine, LastStepOutcome::Completed); + engine.execute().await.expect("execution successful"); + } + GenerateTestEventsKind::Failed => { + define_test_steps(log, &engine, LastStepOutcome::Failed); + engine.execute().await.expect_err("execution failed"); + } + GenerateTestEventsKind::Aborted => { + // In this case, the last step signals that it has been reached via + // sending a message over this channel, and then waits forever. We + // abort execution by calling into the AbortHandle. + let (sender, receiver) = oneshot::channel(); + define_test_steps(log, &engine, LastStepOutcome::Aborted(sender)); + let abort_handle = engine.abort_handle(); + let mut execute_fut = std::pin::pin!(engine.execute()); + let mut receiver = std::pin::pin!(receiver); + let mut receiver_done = false; + loop { + tokio::select! { + res = &mut execute_fut => { + res.expect_err("execution should have been aborted, but completed successfully"); + break; + } + _ = &mut receiver, if !receiver_done => { + receiver_done = true; + abort_handle + .abort("test engine deliberately aborted") + .expect("engine should still be alive"); + } + } + } + } + } + + ReceiverStream::new(receiver).collect().await +} + +#[derive(Debug)] +enum LastStepOutcome { + Completed, + Failed, + Aborted(oneshot::Sender<()>), +} + +#[derive(Debug)] +enum Never {} + +fn define_test_steps( + log: &slog::Logger, + engine: &UpdateEngine, + last_step_outcome: LastStepOutcome, +) { + engine + .new_step("foo".to_owned(), 1, "Step 1", move |_cx| async move { + StepSuccess::new(()).into() + }) + .register(); + + engine + .new_step("bar".to_owned(), 2, "Step 2", move |cx| async move { + for _ in 0..20 { + cx.send_progress(StepProgress::with_current_and_total( + 5, + 20, + ProgressUnits::BYTES, + Default::default(), + )) + .await; + + cx.send_progress(StepProgress::reset( + Default::default(), + "reset step 2", + )) + .await; + + cx.send_progress(StepProgress::retry("retry step 2")).await; + } + StepSuccess::new(()).into() + }) + .register(); + + engine + .new_step( + "nested".to_owned(), + 3, + "Step 3 (this is nested)", + move |parent_cx| async move { + parent_cx + .with_nested_engine(|engine| { + define_nested_engine(&parent_cx, engine); + Ok(()) + }) + .await + .expect_err("this is expected to fail"); + + StepSuccess::new(()).into() + }, + ) + .register(); + + let log = log.clone(); + engine + .new_step( + "remote-nested".to_owned(), + 20, + "Step 4 (remote nested)", + move |cx| async move { + let (sender, mut receiver) = mpsc::channel(16); + let mut engine = UpdateEngine::new(&log, sender); + define_remote_nested_engine(&mut engine, 20); + + let mut buffer = EventBuffer::default(); + + let mut execute_fut = std::pin::pin!(engine.execute()); + let mut execute_done = false; + loop { + tokio::select! { + res = &mut execute_fut, if !execute_done => { + res.expect("remote nested engine completed successfully"); + execute_done = true; + } + Some(event) = receiver.recv() => { + // Generate complete reports to ensure deduping + // happens within StepContexts. + buffer.add_event(event); + cx.send_nested_report(buffer.generate_report()).await?; + } + else => { + break; + } + } + } + + StepSuccess::new(()).into() + }, + ) + .register(); + + // The step index here (100) is large enough to be higher than all nested + // steps. + engine + .new_step("baz".to_owned(), 100, "Step 5", move |_cx| async move { + match last_step_outcome { + LastStepOutcome::Completed => StepSuccess::new(()).into(), + LastStepOutcome::Failed => { + bail!("last step failed") + } + LastStepOutcome::Aborted(sender) => { + sender.send(()).expect("receiver should be alive"); + // The driver of the engine is responsible for aborting it + // at this point. + std::future::pending::().await; + unreachable!("pending future can never resolve"); + } + } + }) + .register(); +} + +fn define_nested_engine<'a>( + parent_cx: &'a StepContext, + engine: &mut UpdateEngine<'a, TestSpec>, +) { + engine + .new_step( + "nested-foo".to_owned(), + 4, + "Nested step 1", + move |cx| async move { + parent_cx + .send_progress(StepProgress::with_current_and_total( + 1, + 3, + "steps", + Default::default(), + )) + .await; + cx.send_progress(StepProgress::progress(Default::default())) + .await; + StepSuccess::new(()).into() + }, + ) + .register(); + + engine + .new_step::<_, _, ()>( + "nested-bar".to_owned(), + 5, + "Nested step 2 (fails)", + move |cx| async move { + // This is used by NestedProgressCheck below. + parent_cx + .send_progress(StepProgress::with_current_and_total( + 2, + 3, + "steps", + Default::default(), + )) + .await; + + cx.send_progress(StepProgress::with_current( + 50, + "units", + Default::default(), + )) + .await; + + parent_cx + .send_progress(StepProgress::with_current_and_total( + 3, + 3, + "steps", + Default::default(), + )) + .await; + + bail!("failing step") + }, + ) + .register(); +} + +fn define_remote_nested_engine( + engine: &mut UpdateEngine<'_, TestSpec>, + start_id: usize, +) { + engine + .new_step( + "nested-foo".to_owned(), + start_id + 1, + "Nested step 1", + move |cx| async move { + cx.send_progress(StepProgress::progress(Default::default())) + .await; + StepSuccess::new(()).into() + }, + ) + .register(); + + engine + .new_step::<_, _, ()>( + "nested-bar".to_owned(), + start_id + 2, + "Nested step 2", + move |cx| async move { + cx.send_progress(StepProgress::with_current( + 20, + "units", + Default::default(), + )) + .await; + + StepSuccess::new(()).into() + }, + ) + .register(); +} diff --git a/wicket/src/cli/rack_update.rs b/wicket/src/cli/rack_update.rs index fa41fa7b8c..cac0f09ee5 100644 --- a/wicket/src/cli/rack_update.rs +++ b/wicket/src/cli/rack_update.rs @@ -174,6 +174,7 @@ async fn do_attach_to_updates( output: CommandOutput<'_>, ) -> Result<()> { let mut display = GroupDisplay::new_with_display( + &log, update_ids.iter().copied(), output.stderr, ); From 9dcc32d98ec9a9bc2c137c6b4ac77730ebe38c8f Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 28 Nov 2023 05:24:00 +0000 Subject: [PATCH 45/56] Update taiki-e/install-action digest to c1dd9c9 (#4562) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`8f354f3` -> `c1dd9c9`](https://togithub.com/taiki-e/install-action/compare/8f354f3...c1dd9c9) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index d79c836fba..c006a41f35 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@8f354f35e51028c902e8ab954045e37739acf562 # v2 + uses: taiki-e/install-action@c1dd9c9e59427252db32b9ece987f4eebc3a021a # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From 19a01c20253044b73e1cb8846fd8b6d77543fdf4 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 28 Nov 2023 06:43:07 +0000 Subject: [PATCH 46/56] Update Rust crate percent-encoding to 2.3.1 (#4563) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index e4588efbde..04d7a1374d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -282,7 +282,7 @@ p256 = "0.13" parse-display = "0.8.2" partial-io = { version = "0.5.4", features = ["proptest1", "tokio1"] } paste = "1.0.14" -percent-encoding = "2.3.0" +percent-encoding = "2.3.1" pem = "1.1" petgraph = "0.6.4" postgres-protocol = "0.6.6" From 55b39533cfe9a3f2fc1185adaa9c2118efaee6bf Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Tue, 28 Nov 2023 10:01:57 -0800 Subject: [PATCH 47/56] Update Rust crate camino-tempfile to 1.1.1 (#4565) --- Cargo.lock | 40 ++++++++++++++++++++++++--------------- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 22 +++++++++++++-------- 3 files changed, 40 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 07f804b03d..76107c8f4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -324,7 +324,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4d45f362125ed144544e57b0ec6de8fd6a296d41a6252fc4a20c0cf12e9ed3a" dependencies = [ - "rustix 0.38.9", + "rustix 0.38.25", "tempfile", "windows-sys 0.48.0", ] @@ -754,9 +754,9 @@ dependencies = [ [[package]] name = "camino-tempfile" -version = "1.0.2" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2ab15a83d13f75dbd86f082bdefd160b628476ef58d3b900a0ef74e001bb097" +checksum = "cb905055fa81e4d427f919b2cd0d76a998267de7d225ea767a1894743a5263c2" dependencies = [ "camino", "tempfile", @@ -2151,7 +2151,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef033ed5e9bad94e55838ca0ca906db0e043f517adda0c8b79c7a8c66c93c1b5" dependencies = [ "cfg-if 1.0.0", - "rustix 0.38.9", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -3383,7 +3383,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.2", - "rustix 0.38.9", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -3636,9 +3636,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.5" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" [[package]] name = "lock_api" @@ -4935,6 +4935,7 @@ dependencies = [ "diesel", "digest", "either", + "errno", "flate2", "futures", "futures-channel", @@ -4979,7 +4980,7 @@ dependencies = [ "regex-syntax 0.8.2", "reqwest", "ring 0.16.20", - "rustix 0.38.9", + "rustix 0.38.25", "schemars", "semver 1.0.20", "serde", @@ -6421,6 +6422,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_users" version = "0.4.3" @@ -6872,14 +6882,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.9" +version = "0.38.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bfe0f2582b4931a45d1fa608f8a8722e8b3c7ac54dd6d5f3b3212791fedef49" +checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e" dependencies = [ "bitflags 2.4.0", "errno", "libc", - "linux-raw-sys 0.4.5", + "linux-raw-sys 0.4.11", "windows-sys 0.48.0", ] @@ -8170,14 +8180,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if 1.0.0", "fastrand", - "redox_syscall 0.3.5", - "rustix 0.38.9", + "redox_syscall 0.4.1", + "rustix 0.38.25", "windows-sys 0.48.0", ] diff --git a/Cargo.toml b/Cargo.toml index 04d7a1374d..239fb453dc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -160,7 +160,7 @@ byteorder = "1.5.0" bytes = "1.5.0" bytesize = "1.3.0" camino = "1.1" -camino-tempfile = "1.0.2" +camino-tempfile = "1.1.1" cancel-safe-futures = "0.1.5" chacha20poly1305 = "0.10.1" ciborium = "0.2.1" diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 1a289bd0cb..7757b4ad8b 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -209,58 +209,64 @@ bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-f hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.x86_64-unknown-linux-gnu.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.x86_64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.x86_64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.aarch64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.aarch64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.x86_64-unknown-illumos.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] } [target.x86_64-unknown-illumos.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } mio = { version = "0.8.9", features = ["net", "os-ext"] } once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] } From 30d41911f3682e21f34ec041a651c3f206600894 Mon Sep 17 00:00:00 2001 From: liffy <629075+lifning@users.noreply.github.com> Date: Tue, 28 Nov 2023 12:02:08 -0800 Subject: [PATCH 48/56] Refactor InstalledZone::install to use a builder pattern, per TODO. (#4325) Additionally, make a builder-factory with an option to create fake builders, in service of refactoring some things to enable some unit tests being written. --- Cargo.lock | 1 + illumos-utils/Cargo.toml | 1 + illumos-utils/src/running_zone.rs | 238 +++++++++++++++++++++++++---- sled-agent/src/instance.rs | 48 +++--- sled-agent/src/instance_manager.rs | 9 ++ sled-agent/src/services.rs | 41 ++--- sled-agent/src/sled_agent.rs | 2 + 7 files changed, 272 insertions(+), 68 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 76107c8f4e..108c8b182d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3072,6 +3072,7 @@ dependencies = [ "bhyve_api", "byteorder", "camino", + "camino-tempfile", "cfg-if 1.0.0", "crucible-smf", "futures", diff --git a/illumos-utils/Cargo.toml b/illumos-utils/Cargo.toml index 497454e047..8296eace5c 100644 --- a/illumos-utils/Cargo.toml +++ b/illumos-utils/Cargo.toml @@ -11,6 +11,7 @@ async-trait.workspace = true bhyve_api.workspace = true byteorder.workspace = true camino.workspace = true +camino-tempfile.workspace = true cfg-if.workspace = true crucible-smf.workspace = true futures.workspace = true diff --git a/illumos-utils/src/running_zone.rs b/illumos-utils/src/running_zone.rs index ba8cd009e8..ea80a6d34b 100644 --- a/illumos-utils/src/running_zone.rs +++ b/illumos-utils/src/running_zone.rs @@ -11,10 +11,12 @@ use crate::opte::{Port, PortTicket}; use crate::svc::wait_for_service; use crate::zone::{AddressRequest, IPADM, ZONE_PREFIX}; use camino::{Utf8Path, Utf8PathBuf}; +use camino_tempfile::Utf8TempDir; use ipnetwork::IpNetwork; use omicron_common::backoff; use slog::{error, info, o, warn, Logger}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +use std::sync::Arc; #[cfg(target_os = "illumos")] use std::sync::OnceLock; #[cfg(target_os = "illumos")] @@ -1043,7 +1045,7 @@ pub struct ServiceProcess { pub log_file: Utf8PathBuf, } -/// Errors returned from [`InstalledZone::install`]. +/// Errors returned from [`ZoneBuilder::install`]. #[derive(thiserror::Error, Debug)] pub enum InstallZoneError { #[error("Cannot create '{zone}': failed to create control VNIC: {err}")] @@ -1063,6 +1065,9 @@ pub enum InstallZoneError { #[error("Failed to find zone image '{image}' from {paths:?}")] ImageNotFound { image: String, paths: Vec }, + + #[error("Attempted to call install() on underspecified ZoneBuilder")] + IncompleteBuilder, } pub struct InstalledZone { @@ -1119,24 +1124,208 @@ impl InstalledZone { &self.zonepath } - // TODO: This would benefit from a "builder-pattern" interface. - #[allow(clippy::too_many_arguments)] - pub async fn install( - log: &Logger, - underlay_vnic_allocator: &VnicAllocator, - zone_root_path: &Utf8Path, - zone_image_paths: &[Utf8PathBuf], - zone_type: &str, - unique_name: Option, - datasets: &[zone::Dataset], - filesystems: &[zone::Fs], - data_links: &[String], - devices: &[zone::Device], - opte_ports: Vec<(Port, PortTicket)>, - bootstrap_vnic: Option, - links: Vec, - limit_priv: Vec, - ) -> Result { + pub fn site_profile_xml_path(&self) -> Utf8PathBuf { + let mut path: Utf8PathBuf = self.zonepath().into(); + path.push("root/var/svc/profile/site.xml"); + path + } +} + +#[derive(Clone)] +pub struct FakeZoneBuilderConfig { + temp_dir: Arc, +} + +#[derive(Clone, Default)] +pub struct ZoneBuilderFactory { + // Why this is part of this builder/factory and not some separate builder + // type: At time of writing, to the best of my knowledge: + // - If we want builder pattern, we need to return some type of `Self`. + // - If we have a trait that returns `Self` type, we can't turn it into a + // trait object (i.e. Box). + // - Plumbing concrete types as generics through every other type that + // needs to construct zones (and anything else with a lot of parameters) + // seems like a worse idea. + fake_cfg: Option, +} + +impl ZoneBuilderFactory { + /// For use in unit tests that don't require actual zone creation to occur. + pub fn fake() -> Self { + Self { + fake_cfg: Some(FakeZoneBuilderConfig { + temp_dir: Arc::new(Utf8TempDir::new().unwrap()), + }), + } + } + + /// Create a [ZoneBuilder] that inherits this factory's fakeness. + pub fn builder<'a>(&self) -> ZoneBuilder<'a> { + ZoneBuilder { fake_cfg: self.fake_cfg.clone(), ..Default::default() } + } +} + +/// Builder-pattern construct for creating an [InstalledZone]. +/// Created by [ZoneBuilderFactory]. +#[derive(Default)] +pub struct ZoneBuilder<'a> { + log: Option, + underlay_vnic_allocator: Option<&'a VnicAllocator>, + zone_root_path: Option<&'a Utf8Path>, + zone_image_paths: Option<&'a [Utf8PathBuf]>, + zone_type: Option<&'a str>, + unique_name: Option, // actually optional + datasets: Option<&'a [zone::Dataset]>, + filesystems: Option<&'a [zone::Fs]>, + data_links: Option<&'a [String]>, + devices: Option<&'a [zone::Device]>, + opte_ports: Option>, + bootstrap_vnic: Option, // actually optional + links: Option>, + limit_priv: Option>, + fake_cfg: Option, +} + +impl<'a> ZoneBuilder<'a> { + pub fn with_log(mut self, log: Logger) -> Self { + self.log = Some(log); + self + } + + pub fn with_underlay_vnic_allocator( + mut self, + vnic_allocator: &'a VnicAllocator, + ) -> Self { + self.underlay_vnic_allocator = Some(vnic_allocator); + self + } + + pub fn with_zone_root_path(mut self, root_path: &'a Utf8Path) -> Self { + self.zone_root_path = Some(root_path); + self + } + + pub fn with_zone_image_paths( + mut self, + image_paths: &'a [Utf8PathBuf], + ) -> Self { + self.zone_image_paths = Some(image_paths); + self + } + + pub fn with_zone_type(mut self, zone_type: &'a str) -> Self { + self.zone_type = Some(zone_type); + self + } + + pub fn with_unique_name(mut self, uuid: Uuid) -> Self { + self.unique_name = Some(uuid); + self + } + + pub fn with_datasets(mut self, datasets: &'a [zone::Dataset]) -> Self { + self.datasets = Some(datasets); + self + } + + pub fn with_filesystems(mut self, filesystems: &'a [zone::Fs]) -> Self { + self.filesystems = Some(filesystems); + self + } + + pub fn with_data_links(mut self, links: &'a [String]) -> Self { + self.data_links = Some(links); + self + } + + pub fn with_devices(mut self, devices: &'a [zone::Device]) -> Self { + self.devices = Some(devices); + self + } + + pub fn with_opte_ports(mut self, ports: Vec<(Port, PortTicket)>) -> Self { + self.opte_ports = Some(ports); + self + } + + pub fn with_bootstrap_vnic(mut self, vnic: Link) -> Self { + self.bootstrap_vnic = Some(vnic); + self + } + + pub fn with_links(mut self, links: Vec) -> Self { + self.links = Some(links); + self + } + + pub fn with_limit_priv(mut self, limit_priv: Vec) -> Self { + self.limit_priv = Some(limit_priv); + self + } + + fn fake_install(self) -> Result { + let zone = self + .zone_type + .ok_or(InstallZoneError::IncompleteBuilder)? + .to_string(); + let control_vnic = self + .underlay_vnic_allocator + .ok_or(InstallZoneError::IncompleteBuilder)? + .new_control(None) + .map_err(move |err| InstallZoneError::CreateVnic { zone, err })?; + let fake_cfg = self.fake_cfg.unwrap(); + let temp_dir = fake_cfg.temp_dir.path().to_path_buf(); + (|| { + let full_zone_name = InstalledZone::get_zone_name( + self.zone_type?, + self.unique_name, + ); + let zonepath = temp_dir + .join(self.zone_root_path?.strip_prefix("/").unwrap()) + .join(&full_zone_name); + let iz = InstalledZone { + log: self.log?, + zonepath, + name: full_zone_name, + control_vnic, + bootstrap_vnic: self.bootstrap_vnic, + opte_ports: self.opte_ports?, + links: self.links?, + }; + let xml_path = iz.site_profile_xml_path().parent()?.to_path_buf(); + std::fs::create_dir_all(&xml_path) + .unwrap_or_else(|_| panic!("ZoneBuilder::fake_install couldn't create site profile xml path {:?}", xml_path)); + Some(iz) + })() + .ok_or(InstallZoneError::IncompleteBuilder) + } + + pub async fn install(self) -> Result { + if self.fake_cfg.is_some() { + return self.fake_install(); + } + + let Self { + log: Some(log), + underlay_vnic_allocator: Some(underlay_vnic_allocator), + zone_root_path: Some(zone_root_path), + zone_image_paths: Some(zone_image_paths), + zone_type: Some(zone_type), + unique_name, + datasets: Some(datasets), + filesystems: Some(filesystems), + data_links: Some(data_links), + devices: Some(devices), + opte_ports: Some(opte_ports), + bootstrap_vnic, + links: Some(links), + limit_priv: Some(limit_priv), + .. + } = self + else { + return Err(InstallZoneError::IncompleteBuilder); + }; + let control_vnic = underlay_vnic_allocator.new_control(None).map_err(|err| { InstallZoneError::CreateVnic { @@ -1145,7 +1334,8 @@ impl InstalledZone { } })?; - let full_zone_name = Self::get_zone_name(zone_type, unique_name); + let full_zone_name = + InstalledZone::get_zone_name(zone_type, unique_name); // Looks for the image within `zone_image_path`, in order. let image = format!("{}.tar.gz", zone_type); @@ -1183,7 +1373,7 @@ impl InstalledZone { net_device_names.dedup(); Zones::install_omicron_zone( - log, + &log, &zone_root_path, &full_zone_name, &zone_image_path, @@ -1210,12 +1400,6 @@ impl InstalledZone { links, }) } - - pub fn site_profile_xml_path(&self) -> Utf8PathBuf { - let mut path: Utf8PathBuf = self.zonepath().into(); - path.push("root/var/svc/profile/site.xml"); - path - } } /// Return true if the service with the given FMRI appears to be an diff --git a/sled-agent/src/instance.rs b/sled-agent/src/instance.rs index a6f022f5f2..c37f0ffde6 100644 --- a/sled-agent/src/instance.rs +++ b/sled-agent/src/instance.rs @@ -26,7 +26,7 @@ use futures::lock::{Mutex, MutexGuard}; use illumos_utils::dladm::Etherstub; use illumos_utils::link::VnicAllocator; use illumos_utils::opte::{DhcpCfg, PortManager}; -use illumos_utils::running_zone::{InstalledZone, RunningZone}; +use illumos_utils::running_zone::{RunningZone, ZoneBuilderFactory}; use illumos_utils::svc::wait_for_service; use illumos_utils::zone::Zones; use illumos_utils::zone::PROPOLIS_ZONE_PREFIX; @@ -226,6 +226,9 @@ struct InstanceInner { // Storage resources storage: StorageHandle, + // Used to create propolis zones + zone_builder_factory: ZoneBuilderFactory, + // Object used to collect zone bundles from this instance when terminated. zone_bundler: ZoneBundler, @@ -611,6 +614,7 @@ impl Instance { port_manager, storage, zone_bundler, + zone_builder_factory, } = services; let mut dhcp_config = DhcpCfg { @@ -678,6 +682,7 @@ impl Instance { running_state: None, nexus_client, storage, + zone_builder_factory, zone_bundler, instance_ticket: ticket, }; @@ -904,31 +909,28 @@ impl Instance { .choose(&mut rng) .ok_or_else(|| Error::U2NotFound)? .clone(); - let installed_zone = InstalledZone::install( - &inner.log, - &inner.vnic_allocator, - &root, - &["/opt/oxide".into()], - "propolis-server", - Some(*inner.propolis_id()), - // dataset= - &[], - // filesystems= - &[], - // data_links= - &[], - &[ + let installed_zone = inner + .zone_builder_factory + .builder() + .with_log(inner.log.clone()) + .with_underlay_vnic_allocator(&inner.vnic_allocator) + .with_zone_root_path(&root) + .with_zone_image_paths(&["/opt/oxide".into()]) + .with_zone_type("propolis-server") + .with_unique_name(*inner.propolis_id()) + .with_datasets(&[]) + .with_filesystems(&[]) + .with_data_links(&[]) + .with_devices(&[ zone::Device { name: "/dev/vmm/*".to_string() }, zone::Device { name: "/dev/vmmctl".to_string() }, zone::Device { name: "/dev/viona".to_string() }, - ], - opte_ports, - // physical_nic= - None, - vec![], - vec![], - ) - .await?; + ]) + .with_opte_ports(opte_ports) + .with_links(vec![]) + .with_limit_priv(vec![]) + .install() + .await?; let gateway = inner.port_manager.underlay_ip(); diff --git a/sled-agent/src/instance_manager.rs b/sled-agent/src/instance_manager.rs index fa40a876f0..c1b7e402a4 100644 --- a/sled-agent/src/instance_manager.rs +++ b/sled-agent/src/instance_manager.rs @@ -17,6 +17,7 @@ use crate::zone_bundle::ZoneBundler; use illumos_utils::dladm::Etherstub; use illumos_utils::link::VnicAllocator; use illumos_utils::opte::PortManager; +use illumos_utils::running_zone::ZoneBuilderFactory; use illumos_utils::vmm_reservoir; use omicron_common::api::external::ByteCount; use omicron_common::api::internal::nexus::InstanceRuntimeState; @@ -76,6 +77,7 @@ struct InstanceManagerInternal { port_manager: PortManager, storage: StorageHandle, zone_bundler: ZoneBundler, + zone_builder_factory: ZoneBuilderFactory, } pub(crate) struct InstanceManagerServices { @@ -84,6 +86,7 @@ pub(crate) struct InstanceManagerServices { pub port_manager: PortManager, pub storage: StorageHandle, pub zone_bundler: ZoneBundler, + pub zone_builder_factory: ZoneBuilderFactory, } /// All instances currently running on the sled. @@ -100,6 +103,7 @@ impl InstanceManager { port_manager: PortManager, storage: StorageHandle, zone_bundler: ZoneBundler, + zone_builder_factory: ZoneBuilderFactory, ) -> Result { Ok(InstanceManager { inner: Arc::new(InstanceManagerInternal { @@ -113,6 +117,7 @@ impl InstanceManager { port_manager, storage, zone_bundler, + zone_builder_factory, }), }) } @@ -266,6 +271,10 @@ impl InstanceManager { port_manager: self.inner.port_manager.clone(), storage: self.inner.storage.clone(), zone_bundler: self.inner.zone_bundler.clone(), + zone_builder_factory: self + .inner + .zone_builder_factory + .clone(), }; let state = crate::instance::InstanceInitialState { diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index b87c91768b..2caa640e22 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -53,7 +53,7 @@ use illumos_utils::dladm::{ use illumos_utils::link::{Link, VnicAllocator}; use illumos_utils::opte::{DhcpCfg, Port, PortManager, PortTicket}; use illumos_utils::running_zone::{ - InstalledZone, RunCommandError, RunningZone, + InstalledZone, RunCommandError, RunningZone, ZoneBuilderFactory, }; use illumos_utils::zfs::ZONE_ZFS_RAMDISK_DATASET_MOUNTPOINT; use illumos_utils::zone::AddressRequest; @@ -1103,23 +1103,28 @@ impl ServiceManager { .push(boot_zpool.dataset_mountpoint(INSTALL_DATASET)); } - let installed_zone = InstalledZone::install( - &self.inner.log, - &self.inner.underlay_vnic_allocator, - &request.root, - zone_image_paths.as_slice(), - &request.zone.zone_type.to_string(), - unique_name, - datasets.as_slice(), - &filesystems, - &data_links, - &devices, - opte_ports, - bootstrap_vnic, - links, - limit_priv, - ) - .await?; + let mut zone_builder = ZoneBuilderFactory::default().builder(); + if let Some(uuid) = unique_name { + zone_builder = zone_builder.with_unique_name(uuid); + } + if let Some(vnic) = bootstrap_vnic { + zone_builder = zone_builder.with_bootstrap_vnic(vnic); + } + let installed_zone = zone_builder + .with_log(self.inner.log.clone()) + .with_underlay_vnic_allocator(&self.inner.underlay_vnic_allocator) + .with_zone_root_path(&request.root) + .with_zone_image_paths(zone_image_paths.as_slice()) + .with_zone_type(&request.zone.zone_type.to_string()) + .with_datasets(datasets.as_slice()) + .with_filesystems(&filesystems) + .with_data_links(&data_links) + .with_devices(&devices) + .with_opte_ports(opte_ports) + .with_links(links) + .with_limit_priv(limit_priv) + .install() + .await?; // TODO(https://github.com/oxidecomputer/omicron/issues/1898): // diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index cfa8c5d7ca..f5b71106cd 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -68,6 +68,7 @@ use std::sync::Arc; use tokio::sync::oneshot; use uuid::Uuid; +use illumos_utils::running_zone::ZoneBuilderFactory; #[cfg(not(test))] use illumos_utils::{dladm::Dladm, zone::Zones}; #[cfg(test)] @@ -382,6 +383,7 @@ impl SledAgent { port_manager.clone(), storage_manager.clone(), long_running_task_handles.zone_bundler.clone(), + ZoneBuilderFactory::default(), )?; // Configure the VMM reservoir as either a percentage of DRAM or as an From 91b0261ec2446ef74bb7934536784fe65a40ce2c Mon Sep 17 00:00:00 2001 From: "oxide-reflector-bot[bot]" <130185838+oxide-reflector-bot[bot]@users.noreply.github.com> Date: Tue, 28 Nov 2023 13:35:57 -0800 Subject: [PATCH 49/56] Update maghemite to 579592b (#4567) --- package-manifest.toml | 8 ++++---- tools/maghemite_ddm_openapi_version | 2 +- tools/maghemite_mg_openapi_version | 2 +- tools/maghemite_mgd_checksums | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/package-manifest.toml b/package-manifest.toml index ca96341f2a..26c45f0ff7 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -425,7 +425,7 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "12b392be94ff93abc3017bf2610a3b18e2174a2d" +source.commit = "579592bf474ec4b86805ada60c1b920b3beef5a7" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//maghemite.sha256.txt source.sha256 = "38851c79c85d53e997db748520fb27c82299ce7e58a550e35646a548498f1271" @@ -441,7 +441,7 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "12b392be94ff93abc3017bf2610a3b18e2174a2d" +source.commit = "579592bf474ec4b86805ada60c1b920b3beef5a7" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt source.sha256 = "8cd94e9a6f6175081ce78f0281085a08a5306cde453d8e21deb28050945b1d88" @@ -456,10 +456,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "12b392be94ff93abc3017bf2610a3b18e2174a2d" +source.commit = "579592bf474ec4b86805ada60c1b920b3beef5a7" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt -source.sha256 = "c4a7a626c84a28de3d2c6bfd85592bda2abad8cf5b41b2ce90b9c03904ccd3df" +source.sha256 = "82aa1ca1d7701b2221c442d58f912be59798258d574effcb866ffab22753cf38" output.type = "zone" output.intermediate_only = true diff --git a/tools/maghemite_ddm_openapi_version b/tools/maghemite_ddm_openapi_version index 76bdb9ca92..f60ea76380 100644 --- a/tools/maghemite_ddm_openapi_version +++ b/tools/maghemite_ddm_openapi_version @@ -1,2 +1,2 @@ -COMMIT="12b392be94ff93abc3017bf2610a3b18e2174a2d" +COMMIT="579592bf474ec4b86805ada60c1b920b3beef5a7" SHA2="9737906555a60911636532f00f1dc2866dc7cd6553beb106e9e57beabad41cdf" diff --git a/tools/maghemite_mg_openapi_version b/tools/maghemite_mg_openapi_version index d6d1788cbc..649db53f6e 100644 --- a/tools/maghemite_mg_openapi_version +++ b/tools/maghemite_mg_openapi_version @@ -1,2 +1,2 @@ -COMMIT="12b392be94ff93abc3017bf2610a3b18e2174a2d" +COMMIT="579592bf474ec4b86805ada60c1b920b3beef5a7" SHA2="6c1fab8d5028b52a161d8bf02aae47844699cdc5f7b28e1ac519fc4ec1ab3971" diff --git a/tools/maghemite_mgd_checksums b/tools/maghemite_mgd_checksums index 9657147159..08b04d6b67 100644 --- a/tools/maghemite_mgd_checksums +++ b/tools/maghemite_mgd_checksums @@ -1,2 +1,2 @@ -CIDL_SHA256="c4a7a626c84a28de3d2c6bfd85592bda2abad8cf5b41b2ce90b9c03904ccd3df" +CIDL_SHA256="82aa1ca1d7701b2221c442d58f912be59798258d574effcb866ffab22753cf38" MGD_LINUX_SHA256="81231b30872fa1c581aa22c101f32d11f33f335758ac1fd2653436fbc7aab93f" \ No newline at end of file From 0a6966cbfc0bafe5f93a26c480e6223390d4451d Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Wed, 29 Nov 2023 05:21:06 +0000 Subject: [PATCH 50/56] Update taiki-e/install-action digest to f7c663c (#4574) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [taiki-e/install-action](https://togithub.com/taiki-e/install-action) | action | digest | [`c1dd9c9` -> `f7c663c`](https://togithub.com/taiki-e/install-action/compare/c1dd9c9...f7c663c) | --- ### Configuration 📅 **Schedule**: Branch creation - "after 8pm,before 6am" in timezone America/Los_Angeles, Automerge - "after 8pm,before 6am" in timezone America/Los_Angeles. 🚦 **Automerge**: Enabled. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Renovate Bot](https://togithub.com/renovatebot/renovate). Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- .github/workflows/hakari.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index c006a41f35..1805da8ad8 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@c1dd9c9e59427252db32b9ece987f4eebc3a021a # v2 + uses: taiki-e/install-action@f7c663c03b51ed0d93e9cec22a575d3f02175989 # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date From 67cd482cd4f6f15ed3a9b42ba7eed10c57199b84 Mon Sep 17 00:00:00 2001 From: Rain Date: Tue, 28 Nov 2023 23:57:21 -0800 Subject: [PATCH 51/56] [nexus] add sled provision state (#4520) Add the notion of a sled provision state to Nexus. Currently, we will only use this to prevent new resources and regions from being provisioned to sleds. This PR includes: 1. Database updates and schema migrations. 2. Database APIs in `nexus-db-queries`. 3. An HTTP API. 4. Tests for resource and region allocation. --- Cargo.lock | 6 +- nexus/db-model/Cargo.toml | 1 + nexus/db-model/src/lib.rs | 9 +- .../db-model/src/queries/region_allocation.rs | 2 + nexus/db-model/src/schema.rs | 3 +- nexus/db-model/src/sled.rs | 11 +- nexus/db-model/src/sled_provision_state.rs | 58 ++++++ nexus/db-queries/src/db/datastore/mod.rs | 86 ++++++++- nexus/db-queries/src/db/datastore/sled.rs | 171 ++++++++++++++++-- .../src/db/queries/region_allocation.rs | 10 +- nexus/src/app/sled.rs | 15 ++ nexus/src/external_api/http_entrypoints.rs | 42 +++++ nexus/tests/integration_tests/endpoints.rs | 15 ++ nexus/tests/integration_tests/schema.rs | 12 +- nexus/tests/output/nexus_tags.txt | 1 + nexus/types/Cargo.toml | 1 + nexus/types/src/external_api/params.rs | 17 ++ nexus/types/src/external_api/views.rs | 27 +++ openapi/nexus.json | 127 +++++++++++++ schema/crdb/15.0.0/up1.sql | 6 + schema/crdb/15.0.0/up2.sql | 3 + schema/crdb/15.0.0/up3.sql | 5 + schema/crdb/dbinit.sql | 12 +- 23 files changed, 607 insertions(+), 33 deletions(-) create mode 100644 nexus/db-model/src/sled_provision_state.rs create mode 100644 schema/crdb/15.0.0/up1.sql create mode 100644 schema/crdb/15.0.0/up2.sql create mode 100644 schema/crdb/15.0.0/up3.sql diff --git a/Cargo.lock b/Cargo.lock index 108c8b182d..532fcde59f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1671,9 +1671,9 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e054665eaf6d97d1e7125512bb2d35d07c73ac86cc6920174cb42d1ab697a554" +checksum = "ef8337737574f55a468005a83499da720f20c65586241ffea339db9ecdfd2b44" dependencies = [ "diesel_table_macro_syntax", "proc-macro2", @@ -3993,6 +3993,7 @@ dependencies = [ "sled-agent-client", "steno", "strum", + "thiserror", "uuid", ] @@ -4178,6 +4179,7 @@ dependencies = [ "schemars", "serde", "serde_json", + "serde_with", "steno", "strum", "uuid", diff --git a/nexus/db-model/Cargo.toml b/nexus/db-model/Cargo.toml index b7514c4806..477ce7d11f 100644 --- a/nexus/db-model/Cargo.toml +++ b/nexus/db-model/Cargo.toml @@ -26,6 +26,7 @@ serde.workspace = true serde_json.workspace = true steno.workspace = true strum.workspace = true +thiserror.workspace = true uuid.workspace = true db-macros.workspace = true diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index ac5bad26f8..43bf83fd34 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -70,6 +70,7 @@ mod silo_user; mod silo_user_password_hash; mod sled; mod sled_instance; +mod sled_provision_state; mod sled_resource; mod sled_resource_kind; mod sled_underlay_subnet_allocation; @@ -152,6 +153,7 @@ pub use silo_user::*; pub use silo_user_password_hash::*; pub use sled::*; pub use sled_instance::*; +pub use sled_provision_state::*; pub use sled_resource::*; pub use sled_resource_kind::*; pub use sled_underlay_subnet_allocation::*; @@ -287,10 +289,9 @@ macro_rules! impl_enum_type { Ok($model_type::$enum_item) } )* - _ => { - Err(concat!("Unrecognized enum variant for ", - stringify!{$model_type}) - .into()) + other => { + let s = concat!("Unrecognized enum variant for ", stringify!{$model_type}); + Err(format!("{}: (raw bytes: {:?})", s, other).into()) } } } diff --git a/nexus/db-model/src/queries/region_allocation.rs b/nexus/db-model/src/queries/region_allocation.rs index 2025e79fb8..a1b9e0373a 100644 --- a/nexus/db-model/src/queries/region_allocation.rs +++ b/nexus/db-model/src/queries/region_allocation.rs @@ -23,6 +23,7 @@ // a CTE (where we want the alias name to come first). use crate::schema::dataset; +use crate::schema::sled; use crate::schema::zpool; table! { @@ -157,6 +158,7 @@ diesel::allow_tables_to_appear_in_same_query!( diesel::allow_tables_to_appear_in_same_query!( old_zpool_usage, zpool, + sled, proposed_dataset_changes, ); diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index afeac5e6cd..6527da3637 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -741,6 +741,7 @@ table! { ip -> Inet, port -> Int4, last_used_address -> Inet, + provision_state -> crate::SledProvisionStateEnum, } } @@ -1299,7 +1300,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(14, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(15, 0, 0); allow_tables_to_appear_in_same_query!( system_update, diff --git a/nexus/db-model/src/sled.rs b/nexus/db-model/src/sled.rs index 4c82aa5d23..0f6d1b911e 100644 --- a/nexus/db-model/src/sled.rs +++ b/nexus/db-model/src/sled.rs @@ -4,8 +4,8 @@ use super::{ByteCount, Generation, SqlU16, SqlU32}; use crate::collection::DatastoreCollectionConfig; -use crate::ipv6; use crate::schema::{physical_disk, service, sled, zpool}; +use crate::{ipv6, SledProvisionState}; use chrono::{DateTime, Utc}; use db_macros::Asset; use nexus_types::{external_api::shared, external_api::views, identity::Asset}; @@ -59,6 +59,8 @@ pub struct Sled { /// The last IP address provided to an Oxide service on this sled pub last_used_address: ipv6::Ipv6Addr, + + provision_state: SledProvisionState, } impl Sled { @@ -81,6 +83,10 @@ impl Sled { pub fn serial_number(&self) -> &str { &self.serial_number } + + pub fn provision_state(&self) -> SledProvisionState { + self.provision_state + } } impl From for views::Sled { @@ -93,6 +99,7 @@ impl From for views::Sled { part: sled.part_number, revision: sled.revision, }, + provision_state: sled.provision_state.into(), usable_hardware_threads: sled.usable_hardware_threads.0, usable_physical_ram: *sled.usable_physical_ram, } @@ -188,6 +195,8 @@ impl SledUpdate { serial_number: self.serial_number, part_number: self.part_number, revision: self.revision, + // By default, sleds start as provisionable. + provision_state: SledProvisionState::Provisionable, usable_hardware_threads: self.usable_hardware_threads, usable_physical_ram: self.usable_physical_ram, reservoir_size: self.reservoir_size, diff --git a/nexus/db-model/src/sled_provision_state.rs b/nexus/db-model/src/sled_provision_state.rs new file mode 100644 index 0000000000..6cf81b9c70 --- /dev/null +++ b/nexus/db-model/src/sled_provision_state.rs @@ -0,0 +1,58 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::impl_enum_type; +use nexus_types::external_api::views; +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +impl_enum_type!( + #[derive(Clone, SqlType, Debug, QueryId)] + #[diesel(postgres_type(name = "sled_provision_state"))] + pub struct SledProvisionStateEnum; + + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq)] + #[diesel(sql_type = SledProvisionStateEnum)] + pub enum SledProvisionState; + + // Enum values + Provisionable => b"provisionable" + NonProvisionable => b"non_provisionable" +); + +impl From for views::SledProvisionState { + fn from(state: SledProvisionState) -> Self { + match state { + SledProvisionState::Provisionable => { + views::SledProvisionState::Provisionable + } + SledProvisionState::NonProvisionable => { + views::SledProvisionState::NonProvisionable + } + } + } +} + +impl TryFrom for SledProvisionState { + type Error = UnknownSledProvisionState; + + fn try_from(state: views::SledProvisionState) -> Result { + match state { + views::SledProvisionState::Provisionable => { + Ok(SledProvisionState::Provisionable) + } + views::SledProvisionState::NonProvisionable => { + Ok(SledProvisionState::NonProvisionable) + } + views::SledProvisionState::Unknown => { + Err(UnknownSledProvisionState) + } + } + } +} + +/// An unknown [`views::SledProvisionState`] was encountered. +#[derive(Clone, Debug, Error)] +#[error("Unknown SledProvisionState")] +pub struct UnknownSledProvisionState; diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 0612b960c9..44cd7a95b7 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -372,8 +372,8 @@ mod test { BlockSize, ComponentUpdate, ComponentUpdateIdentity, ConsoleSession, Dataset, DatasetKind, ExternalIp, PhysicalDisk, PhysicalDiskKind, Project, Rack, Region, Service, ServiceKind, SiloUser, SledBaseboard, - SledSystemHardware, SledUpdate, SshKey, SystemUpdate, - UpdateableComponentType, VpcSubnet, Zpool, + SledProvisionState, SledSystemHardware, SledUpdate, SshKey, + SystemUpdate, UpdateableComponentType, VpcSubnet, Zpool, }; use crate::db::queries::vpc_subnet::FilterConflictingVpcSubnetRangesQuery; use assert_matches::assert_matches; @@ -610,6 +610,35 @@ mod test { sled_id } + // Marks a sled as non-provisionable. + async fn mark_sled_non_provisionable( + datastore: &DataStore, + opctx: &OpContext, + sled_id: Uuid, + ) { + let (authz_sled, sled) = LookupPath::new(opctx, datastore) + .sled_id(sled_id) + .fetch_for(authz::Action::Modify) + .await + .unwrap(); + println!("sled: {:?}", sled); + let old_state = datastore + .sled_set_provision_state( + &opctx, + &authz_sled, + SledProvisionState::NonProvisionable, + ) + .await + .unwrap_or_else(|error| { + panic!( + "error marking sled {sled_id} as non-provisionable: {error}" + ) + }); + // The old state should always be provisionable since that's where we + // start. + assert_eq!(old_state, SledProvisionState::Provisionable); + } + fn test_zpool_size() -> ByteCount { ByteCount::from_gibibytes_u32(100) } @@ -770,13 +799,24 @@ mod test { let logctx = dev::test_setup_log("test_region_allocation_strat_random"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - create_test_datasets_for_region_allocation( + let test_datasets = create_test_datasets_for_region_allocation( &opctx, datastore.clone(), + // Even though we're going to mark one sled as non-provisionable to + // test that logic, we aren't forcing the datasets to be on + // distinct sleds, so REGION_REDUNDANCY_THRESHOLD is enough. REGION_REDUNDANCY_THRESHOLD, ) .await; + let non_provisionable_dataset_id = test_datasets[0].dataset_id; + mark_sled_non_provisionable( + &datastore, + &opctx, + test_datasets[0].sled_id, + ) + .await; + // Allocate regions from the datasets for this disk. Do it a few times // for good measure. for alloc_seed in 0..10 { @@ -809,6 +849,9 @@ mod test { // Must be 3 unique datasets assert!(disk_datasets.insert(dataset.id())); + // Dataset must not be non-provisionable. + assert_ne!(dataset.id(), non_provisionable_dataset_id); + // Must be 3 unique zpools assert!(disk_zpools.insert(dataset.pool_id)); @@ -837,12 +880,23 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - // Create a rack without enough sleds for a successful allocation when - // we require 3 distinct sleds. + // Create a rack with enough sleds for a successful allocation when we + // require 3 distinct provisionable sleds. let test_datasets = create_test_datasets_for_region_allocation( &opctx, datastore.clone(), - REGION_REDUNDANCY_THRESHOLD, + // We're going to mark one sled as non-provisionable to test that + // logic, and we *are* forcing the datasets to be on distinct + // sleds: hence threshold + 1. + REGION_REDUNDANCY_THRESHOLD + 1, + ) + .await; + + let non_provisionable_dataset_id = test_datasets[0].dataset_id; + mark_sled_non_provisionable( + &datastore, + &opctx, + test_datasets[0].sled_id, ) .await; @@ -884,6 +938,9 @@ mod test { // Must be 3 unique datasets assert!(disk_datasets.insert(dataset.id())); + // Dataset must not be non-provisionable. + assert_ne!(dataset.id(), non_provisionable_dataset_id); + // Must be 3 unique zpools assert!(disk_zpools.insert(dataset.pool_id)); @@ -916,11 +973,22 @@ mod test { let (opctx, datastore) = datastore_test(&logctx, &db).await; // Create a rack without enough sleds for a successful allocation when - // we require 3 distinct sleds. - create_test_datasets_for_region_allocation( + // we require 3 distinct provisionable sleds. + let test_datasets = create_test_datasets_for_region_allocation( &opctx, datastore.clone(), - REGION_REDUNDANCY_THRESHOLD - 1, + // Here, we need to have REGION_REDUNDANCY_THRESHOLD - 1 + // provisionable sleds to test this failure condition. We're going + // to mark one sled as non-provisionable to test that logic, so we + // need to add 1 to that number. + REGION_REDUNDANCY_THRESHOLD, + ) + .await; + + mark_sled_non_provisionable( + &datastore, + &opctx, + test_datasets[0].sled_id, ) .await; diff --git a/nexus/db-queries/src/db/datastore/sled.rs b/nexus/db-queries/src/db/datastore/sled.rs index 130c36b496..406119a636 100644 --- a/nexus/db-queries/src/db/datastore/sled.rs +++ b/nexus/db-queries/src/db/datastore/sled.rs @@ -15,6 +15,7 @@ use crate::db::model::Sled; use crate::db::model::SledResource; use crate::db::model::SledUpdate; use crate::db::pagination::paginated; +use crate::db::update_and_check::UpdateAndCheck; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; @@ -153,6 +154,11 @@ impl DataStore { .and(sled_has_space_in_reservoir), ) .filter(sled_dsl::time_deleted.is_null()) + // Filter out sleds that are not provisionable. + .filter( + sled_dsl::provision_state + .eq(db::model::SledProvisionState::Provisionable), + ) .select(sled_dsl::id) .into_boxed(); @@ -217,6 +223,37 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; Ok(()) } + + /// Sets the provision state for this sled. + /// + /// Returns the previous state. + pub async fn sled_set_provision_state( + &self, + opctx: &OpContext, + authz_sled: &authz::Sled, + state: db::model::SledProvisionState, + ) -> Result { + use db::schema::sled::dsl; + + opctx.authorize(authz::Action::Modify, authz_sled).await?; + + let sled_id = authz_sled.id(); + let query = diesel::update(dsl::sled) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(sled_id)) + .filter(dsl::provision_state.ne(state)) + .set(( + dsl::provision_state.eq(state), + dsl::time_modified.eq(Utc::now()), + )) + .check_if_exists::(sled_id); + let result = query + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(result.found.provision_state()) + } } #[cfg(test)] @@ -226,12 +263,15 @@ mod test { use crate::db::datastore::test::{ sled_baseboard_for_test, sled_system_hardware_for_test, }; + use crate::db::lookup::LookupPath; use crate::db::model::ByteCount; use crate::db::model::SqlU32; use nexus_test_utils::db::test_setup_database; + use nexus_types::identity::Asset; use omicron_common::api::external; use omicron_test_utils::dev; use std::net::{Ipv6Addr, SocketAddrV6}; + use std::num::NonZeroU32; fn rack_id() -> Uuid { Uuid::parse_str(nexus_test_utils::RACK_UUID).unwrap() @@ -243,19 +283,9 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (_opctx, datastore) = datastore_test(&logctx, &db).await; - let sled_id = Uuid::new_v4(); - let addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0); - let mut sled_update = SledUpdate::new( - sled_id, - addr, - sled_baseboard_for_test(), - sled_system_hardware_for_test(), - rack_id(), - ); - let observed_sled = datastore - .sled_upsert(sled_update.clone()) - .await - .expect("Could not upsert sled during test prep"); + let mut sled_update = test_new_sled_update(); + let observed_sled = + datastore.sled_upsert(sled_update.clone()).await.unwrap(); assert_eq!( observed_sled.usable_hardware_threads, sled_update.usable_hardware_threads @@ -301,4 +331,119 @@ mod test { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + + /// Test that new reservations aren't created on non-provisionable sleds. + #[tokio::test] + async fn sled_reservation_create_non_provisionable() { + let logctx = + dev::test_setup_log("sled_reservation_create_non_provisionable"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let sled_update = test_new_sled_update(); + let non_provisionable_sled = + datastore.sled_upsert(sled_update.clone()).await.unwrap(); + + let (authz_sled, _) = LookupPath::new(&opctx, &datastore) + .sled_id(non_provisionable_sled.id()) + .fetch_for(authz::Action::Modify) + .await + .unwrap(); + + let old_state = datastore + .sled_set_provision_state( + &opctx, + &authz_sled, + db::model::SledProvisionState::NonProvisionable, + ) + .await + .unwrap(); + assert_eq!( + old_state, + db::model::SledProvisionState::Provisionable, + "a newly created sled starts as provisionable" + ); + + // This should be an error since there are no provisionable sleds. + let resources = db::model::Resources::new( + 1, + // Just require the bare non-zero amount of RAM. + ByteCount::try_from(1024).unwrap(), + ByteCount::try_from(1024).unwrap(), + ); + let constraints = db::model::SledReservationConstraints::none(); + let error = datastore + .sled_reservation_create( + &opctx, + Uuid::new_v4(), + db::model::SledResourceKind::Instance, + resources.clone(), + constraints, + ) + .await + .unwrap_err(); + assert!(matches!(error, external::Error::ServiceUnavailable { .. })); + + // Now add a provisionable sled and try again. + let sled_update = test_new_sled_update(); + let provisionable_sled = + datastore.sled_upsert(sled_update.clone()).await.unwrap(); + + let sleds = datastore + .sled_list(&opctx, &first_page(NonZeroU32::new(10).unwrap())) + .await + .unwrap(); + println!("sleds: {:?}", sleds); + + // Try a few times to ensure that resources never get allocated to the + // non-provisionable sled. + for _ in 0..10 { + let constraints = db::model::SledReservationConstraints::none(); + let resource = datastore + .sled_reservation_create( + &opctx, + Uuid::new_v4(), + db::model::SledResourceKind::Instance, + resources.clone(), + constraints, + ) + .await + .unwrap(); + assert_eq!( + resource.sled_id, + provisionable_sled.id(), + "resource is always allocated to the provisionable sled" + ); + + datastore + .sled_reservation_delete(&opctx, resource.id) + .await + .unwrap(); + } + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + fn test_new_sled_update() -> SledUpdate { + let sled_id = Uuid::new_v4(); + let addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0); + SledUpdate::new( + sled_id, + addr, + sled_baseboard_for_test(), + sled_system_hardware_for_test(), + rack_id(), + ) + } + + /// Returns pagination parameters to fetch the first page of results for a + /// paginated endpoint + fn first_page<'a, T>(limit: NonZeroU32) -> DataPageParams<'a, T> { + DataPageParams { + marker: None, + direction: dropshot::PaginationOrder::Ascending, + limit, + } + } } diff --git a/nexus/db-queries/src/db/queries/region_allocation.rs b/nexus/db-queries/src/db/queries/region_allocation.rs index a080af4c37..031be92c08 100644 --- a/nexus/db-queries/src/db/queries/region_allocation.rs +++ b/nexus/db-queries/src/db/queries/region_allocation.rs @@ -290,6 +290,7 @@ impl CandidateZpools { seed: u128, distinct_sleds: bool, ) -> Self { + use schema::sled::dsl as sled_dsl; use schema::zpool::dsl as zpool_dsl; // Why are we using raw `diesel::dsl::sql` here? @@ -310,13 +311,20 @@ impl CandidateZpools { + diesel::dsl::sql(&zpool_size_delta.to_string())) .le(diesel::dsl::sql(zpool_dsl::total_size::NAME)); + // We need to join on the sled table to access provision_state. + let with_sled = sled_dsl::sled.on(zpool_dsl::sled_id.eq(sled_dsl::id)); let with_zpool = zpool_dsl::zpool - .on(zpool_dsl::id.eq(old_zpool_usage::dsl::pool_id)); + .on(zpool_dsl::id.eq(old_zpool_usage::dsl::pool_id)) + .inner_join(with_sled); + + let sled_is_provisionable = sled_dsl::provision_state + .eq(crate::db::model::SledProvisionState::Provisionable); let base_query = old_zpool_usage .query_source() .inner_join(with_zpool) .filter(it_will_fit) + .filter(sled_is_provisionable) .select((old_zpool_usage::dsl::pool_id,)); let query = if distinct_sleds { diff --git a/nexus/src/app/sled.rs b/nexus/src/app/sled.rs index c2931f1441..44efc2934e 100644 --- a/nexus/src/app/sled.rs +++ b/nexus/src/app/sled.rs @@ -8,6 +8,7 @@ use crate::internal_api::params::{ PhysicalDiskDeleteRequest, PhysicalDiskPutRequest, SledAgentStartupInfo, SledRole, ZpoolPutRequest, }; +use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; use nexus_db_queries::db::lookup; @@ -142,6 +143,20 @@ impl super::Nexus { .await } + /// Returns the old state. + pub(crate) async fn sled_set_provision_state( + &self, + opctx: &OpContext, + sled_lookup: &lookup::Sled<'_>, + state: db::model::SledProvisionState, + ) -> Result { + let (authz_sled,) = + sled_lookup.lookup_for(authz::Action::Modify).await?; + self.db_datastore + .sled_set_provision_state(opctx, &authz_sled, state) + .await + } + // Physical disks pub(crate) async fn sled_list_physical_disks( diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 78f675c28a..f1302f4a73 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -218,6 +218,7 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(rack_view)?; api.register(sled_list)?; api.register(sled_view)?; + api.register(sled_set_provision_state)?; api.register(sled_instance_list)?; api.register(sled_physical_disk_list)?; api.register(physical_disk_list)?; @@ -4483,6 +4484,47 @@ async fn sled_view( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } +/// Set the sled's provision state. +#[endpoint { + method = PUT, + path = "/v1/system/hardware/sleds/{sled_id}/provision-state", + tags = ["system/hardware"], +}] +async fn sled_set_provision_state( + rqctx: RequestContext>, + path_params: Path, + new_provision_state: TypedBody, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.nexus; + + let path = path_params.into_inner(); + let provision_state = new_provision_state.into_inner().state; + + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + // Convert the external `SledProvisionState` into our internal data model. + let new_state = + db::model::SledProvisionState::try_from(provision_state).map_err( + |error| HttpError::for_bad_request(None, format!("{error}")), + )?; + + let sled_lookup = nexus.sled_lookup(&opctx, &path.sled_id)?; + + let old_state = nexus + .sled_set_provision_state(&opctx, &sled_lookup, new_state) + .await?; + + let response = params::SledProvisionStateResponse { + old_state: old_state.into(), + new_state: new_state.into(), + }; + + Ok(HttpResponseOk(response)) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + /// List instances running on a given sled #[endpoint { method = GET, diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 5dfdcc151d..536b96f7ae 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -50,6 +50,12 @@ lazy_static! { format!("/v1/system/hardware/uninitialized-sleds"); pub static ref HARDWARE_SLED_URL: String = format!("/v1/system/hardware/sleds/{}", SLED_AGENT_UUID); + pub static ref HARDWARE_SLED_PROVISION_STATE_URL: String = + format!("/v1/system/hardware/sleds/{}/provision-state", SLED_AGENT_UUID); + pub static ref DEMO_SLED_PROVISION_STATE: params::SledProvisionStateParams = + params::SledProvisionStateParams { + state: nexus_types::external_api::views::SledProvisionState::NonProvisionable, + }; pub static ref HARDWARE_SWITCH_URL: String = format!("/v1/system/hardware/switches/{}", SWITCH_UUID); pub static ref HARDWARE_DISK_URL: String = @@ -1609,6 +1615,15 @@ lazy_static! { allowed_methods: vec![AllowedMethod::Get], }, + VerifyEndpoint { + url: &HARDWARE_SLED_PROVISION_STATE_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Put( + serde_json::to_value(&*DEMO_SLED_PROVISION_STATE).unwrap() + )], + }, + VerifyEndpoint { url: "/v1/system/hardware/switches", visibility: Visibility::Public, diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index 213e7f9e4f..6feafe415d 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -629,7 +629,17 @@ impl InformationSchema { self.referential_constraints, other.referential_constraints ); - similar_asserts::assert_eq!(self.statistics, other.statistics); + similar_asserts::assert_eq!( + self.statistics, + other.statistics, + "Statistics did not match. This often means that in dbinit.sql, a new \ + column was added into the middle of a table rather than to the end. \ + If that is the case:\n\n \ + \ + * Change dbinit.sql to add the column to the end of the table.\n\ + * Update nexus/db-model/src/schema.rs and the corresponding \ + Queryable/Insertable struct with the new column ordering." + ); similar_asserts::assert_eq!(self.sequences, other.sequences); similar_asserts::assert_eq!(self.pg_indexes, other.pg_indexes); } diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index dd387ab979..7e57d00df2 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -120,6 +120,7 @@ rack_view GET /v1/system/hardware/racks/{rac sled_instance_list GET /v1/system/hardware/sleds/{sled_id}/instances sled_list GET /v1/system/hardware/sleds sled_physical_disk_list GET /v1/system/hardware/sleds/{sled_id}/disks +sled_set_provision_state PUT /v1/system/hardware/sleds/{sled_id}/provision-state sled_view GET /v1/system/hardware/sleds/{sled_id} switch_list GET /v1/system/hardware/switches switch_view GET /v1/system/hardware/switches/{switch_id} diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index 9cb94a8484..8cbbd8626c 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -14,6 +14,7 @@ parse-display.workspace = true schemars = { workspace = true, features = ["chrono", "uuid1"] } serde.workspace = true serde_json.workspace = true +serde_with.workspace = true steno.workspace = true strum.workspace = true uuid.workspace = true diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index a0169ae777..a5f1f3f874 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -75,6 +75,23 @@ pub struct SledSelector { pub sled: Uuid, } +/// Parameters for `sled_set_provision_state`. +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] +pub struct SledProvisionStateParams { + /// The provision state. + pub state: super::views::SledProvisionState, +} + +/// Response to `sled_set_provision_state`. +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] +pub struct SledProvisionStateResponse { + /// The old provision state. + pub old_state: super::views::SledProvisionState, + + /// The new provision state. + pub new_state: super::views::SledProvisionState, +} + pub struct SwitchSelector { /// ID of the switch pub switch: Uuid, diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index 9dfe36d63b..6d02623f34 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -17,6 +17,7 @@ use omicron_common::api::external::{ }; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; +use serde_with::rust::deserialize_ignore_any; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::net::IpAddr; @@ -286,12 +287,38 @@ pub struct Sled { pub baseboard: Baseboard, /// The rack to which this Sled is currently attached pub rack_id: Uuid, + /// The provision state of the sled. + pub provision_state: SledProvisionState, /// The number of hardware threads which can execute on this sled pub usable_hardware_threads: u32, /// Amount of RAM which may be used by the Sled's OS pub usable_physical_ram: ByteCount, } +/// The provision state of a sled. +/// +/// This controls whether new resources are going to be provisioned on this +/// sled. +#[derive( + Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, +)] +#[serde(rename_all = "snake_case")] +pub enum SledProvisionState { + /// New resources will be provisioned on this sled. + Provisionable, + + /// New resources will not be provisioned on this sled. However, existing + /// resources will continue to be on this sled unless manually migrated + /// off. + NonProvisionable, + + /// This is a state that isn't known yet. + /// + /// This is defined to avoid API breakage. + #[serde(other, deserialize_with = "deserialize_ignore_any")] + Unknown, +} + /// An operator's view of an instance running on a given sled #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct SledInstance { diff --git a/openapi/nexus.json b/openapi/nexus.json index 704aa393db..08e6cd7149 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -3817,6 +3817,55 @@ } } }, + "/v1/system/hardware/sleds/{sled_id}/provision-state": { + "put": { + "tags": [ + "system/hardware" + ], + "summary": "Set the sled's provision state.", + "operationId": "sled_set_provision_state", + "parameters": [ + { + "in": "path", + "name": "sled_id", + "description": "ID of the sled", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledProvisionStateParams" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledProvisionStateResponse" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/v1/system/hardware/switch-port": { "get": { "tags": [ @@ -12976,6 +13025,14 @@ "type": "string", "format": "uuid" }, + "provision_state": { + "description": "The provision state of the sled.", + "allOf": [ + { + "$ref": "#/components/schemas/SledProvisionState" + } + ] + }, "rack_id": { "description": "The rack to which this Sled is currently attached", "type": "string", @@ -13009,6 +13066,7 @@ "required": [ "baseboard", "id", + "provision_state", "rack_id", "time_created", "time_modified", @@ -13099,6 +13157,75 @@ "items" ] }, + "SledProvisionState": { + "description": "The provision state of a sled.\n\nThis controls whether new resources are going to be provisioned on this sled.", + "oneOf": [ + { + "description": "New resources will be provisioned on this sled.", + "type": "string", + "enum": [ + "provisionable" + ] + }, + { + "description": "New resources will not be provisioned on this sled. However, existing resources will continue to be on this sled unless manually migrated off.", + "type": "string", + "enum": [ + "non_provisionable" + ] + }, + { + "description": "This is a state that isn't known yet.\n\nThis is defined to avoid API breakage.", + "type": "string", + "enum": [ + "unknown" + ] + } + ] + }, + "SledProvisionStateParams": { + "description": "Parameters for `sled_set_provision_state`.", + "type": "object", + "properties": { + "state": { + "description": "The provision state.", + "allOf": [ + { + "$ref": "#/components/schemas/SledProvisionState" + } + ] + } + }, + "required": [ + "state" + ] + }, + "SledProvisionStateResponse": { + "description": "Response to `sled_set_provision_state`.", + "type": "object", + "properties": { + "new_state": { + "description": "The new provision state.", + "allOf": [ + { + "$ref": "#/components/schemas/SledProvisionState" + } + ] + }, + "old_state": { + "description": "The old provision state.", + "allOf": [ + { + "$ref": "#/components/schemas/SledProvisionState" + } + ] + } + }, + "required": [ + "new_state", + "old_state" + ] + }, "SledResultsPage": { "description": "A single page of results", "type": "object", diff --git a/schema/crdb/15.0.0/up1.sql b/schema/crdb/15.0.0/up1.sql new file mode 100644 index 0000000000..04baa76370 --- /dev/null +++ b/schema/crdb/15.0.0/up1.sql @@ -0,0 +1,6 @@ +CREATE TYPE IF NOT EXISTS omicron.public.sled_provision_state AS ENUM ( + -- New resources can be provisioned onto the sled + 'provisionable', + -- New resources must not be provisioned onto the sled + 'non_provisionable' +); diff --git a/schema/crdb/15.0.0/up2.sql b/schema/crdb/15.0.0/up2.sql new file mode 100644 index 0000000000..e3ea2ba11c --- /dev/null +++ b/schema/crdb/15.0.0/up2.sql @@ -0,0 +1,3 @@ +ALTER TABLE omicron.public.sled + ADD COLUMN IF NOT EXISTS provision_state omicron.public.sled_provision_state + NOT NULL DEFAULT 'provisionable'; diff --git a/schema/crdb/15.0.0/up3.sql b/schema/crdb/15.0.0/up3.sql new file mode 100644 index 0000000000..aaa3feac20 --- /dev/null +++ b/schema/crdb/15.0.0/up3.sql @@ -0,0 +1,5 @@ +-- Drop the default column value for provision_state -- it should always be set +-- by Nexus. +ALTER TABLE omicron.public.sled + ALTER COLUMN provision_state + DROP DEFAULT; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 728b084982..178c7af913 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -73,6 +73,13 @@ CREATE TABLE IF NOT EXISTS omicron.public.rack ( * Sleds */ +CREATE TYPE IF NOT EXISTS omicron.public.sled_provision_state AS ENUM ( + -- New resources can be provisioned onto the sled + 'provisionable', + -- New resources must not be provisioned onto the sled + 'non_provisionable' +); + CREATE TABLE IF NOT EXISTS omicron.public.sled ( /* Identity metadata (asset) */ id UUID PRIMARY KEY, @@ -104,6 +111,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.sled ( /* The last address allocated to an Oxide service on this sled. */ last_used_address INET NOT NULL, + /* The state of whether resources should be provisioned onto the sled */ + provision_state omicron.public.sled_provision_state NOT NULL, + -- This constraint should be upheld, even for deleted disks -- in the fleet. CONSTRAINT serial_part_revision_unique UNIQUE ( @@ -2997,7 +3007,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '14.0.0', NULL) + ( TRUE, NOW(), NOW(), '15.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; From 7f8b82e5ec266d94c5a94e0aa987f4edc81b3116 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Wed, 29 Nov 2023 00:25:00 -0800 Subject: [PATCH 52/56] Update Rust crate zeroize to 1.7.0 (#4542) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- workspace-hack/Cargo.toml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 532fcde59f..a0e8361d79 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9895,9 +9895,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 239fb453dc..78abe273e0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -389,7 +389,7 @@ walkdir = "2.4" wicket = { path = "wicket" } wicket-common = { path = "wicket-common" } wicketd-client = { path = "clients/wicketd-client" } -zeroize = { version = "1.6.0", features = ["zeroize_derive", "std"] } +zeroize = { version = "1.7.0", features = ["zeroize_derive", "std"] } zip = { version = "0.6.6", default-features = false, features = ["deflate","bzip2"] } zone = { version = "0.3", default-features = false, features = ["async"] } diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 7757b4ad8b..fe7c3bdc81 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -105,7 +105,7 @@ unicode-normalization = { version = "0.1.22" } usdt = { version = "0.3.5" } uuid = { version = "1.6.1", features = ["serde", "v4"] } yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } -zeroize = { version = "1.6.0", features = ["std", "zeroize_derive"] } +zeroize = { version = "1.7.0", features = ["std", "zeroize_derive"] } zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } [build-dependencies] @@ -201,7 +201,7 @@ unicode-normalization = { version = "0.1.22" } usdt = { version = "0.3.5" } uuid = { version = "1.6.1", features = ["serde", "v4"] } yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } -zeroize = { version = "1.6.0", features = ["std", "zeroize_derive"] } +zeroize = { version = "1.7.0", features = ["std", "zeroize_derive"] } zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } [target.x86_64-unknown-linux-gnu.dependencies] From bb7ee841d38318a3316c5749babae3112ed074a2 Mon Sep 17 00:00:00 2001 From: "oxide-renovate[bot]" <146848827+oxide-renovate[bot]@users.noreply.github.com> Date: Wed, 29 Nov 2023 10:15:50 -0800 Subject: [PATCH 53/56] Update Rust crate pretty-hex to 0.4.0 (#4576) Co-authored-by: oxide-renovate[bot] <146848827+oxide-renovate[bot]@users.noreply.github.com> --- Cargo.lock | 6 +++--- Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a0e8361d79..6580e1de55 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1791,7 +1791,7 @@ dependencies = [ "omicron-workspace-hack", "openapi-lint", "openapiv3 1.0.3", - "pretty-hex 0.3.0", + "pretty-hex 0.4.0", "schemars", "serde", "serde_json", @@ -5978,9 +5978,9 @@ checksum = "bc5c99d529f0d30937f6f4b8a86d988047327bb88d04d2c4afc356de74722131" [[package]] name = "pretty-hex" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa0831dd7cc608c38a5e323422a0077678fa5744aa2be4ad91c4ece8eec8d5" +checksum = "23c6b968ed37d62e35b4febaba13bfa231b0b7929d68b8a94e65445a17e2d35f" [[package]] name = "pretty_assertions" diff --git a/Cargo.toml b/Cargo.toml index 78abe273e0..694cd2c8dc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -288,7 +288,7 @@ petgraph = "0.6.4" postgres-protocol = "0.6.6" predicates = "3.0.4" pretty_assertions = "1.4.0" -pretty-hex = "0.3.0" +pretty-hex = "0.4.0" proc-macro2 = "1.0" progenitor = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } progenitor-client = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } From a4e12168c6c418317f980c16dea7801660781d7c Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 29 Nov 2023 11:57:01 -0800 Subject: [PATCH 54/56] [nexus] Make 'update_and_check' CTE explicitly request columns (#4572) Related to https://github.com/oxidecomputer/omicron/issues/4570 , but not a direct fix for it This PR removes a usage of ".\*" from a SQL query. Using ".\*" in sql queries is somewhat risky -- it makes an implicit dependency on order, and can make backwards compatibility difficult in certain circumstances. Instead, this PR provides a `ColumnWalker`, for converting a tuple of columns to an iterator, and requests the expected columns explicitly. --- nexus/db-queries/src/db/column_walker.rs | 112 ++++++++++++++++++++ nexus/db-queries/src/db/mod.rs | 1 + nexus/db-queries/src/db/update_and_check.rs | 48 +++++---- 3 files changed, 141 insertions(+), 20 deletions(-) create mode 100644 nexus/db-queries/src/db/column_walker.rs diff --git a/nexus/db-queries/src/db/column_walker.rs b/nexus/db-queries/src/db/column_walker.rs new file mode 100644 index 0000000000..64c3b450c8 --- /dev/null +++ b/nexus/db-queries/src/db/column_walker.rs @@ -0,0 +1,112 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! CTE utility for iterating over all columns in a table. + +use diesel::prelude::*; +use std::marker::PhantomData; + +/// Used to iterate over a tuple of columns ("T"). +/// +/// Diesel exposes "AllColumns" as a tuple, which is difficult to iterate over +/// -- after all, all the types are distinct. However, each of these types +/// implements "Column", so we can use a macro to provide a +/// "convertion-to-iterator" implemenation for our expected tuples. +pub(crate) struct ColumnWalker { + remaining: PhantomData, +} + +impl ColumnWalker { + pub fn new() -> Self { + Self { remaining: PhantomData } + } +} + +macro_rules! impl_column_walker { + ( $len:literal $($column:ident)+ ) => ( + impl<$($column: Column),+> IntoIterator for ColumnWalker<($($column,)+)> { + type Item = &'static str; + type IntoIter = std::array::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + [$($column::NAME,)+].into_iter() + } + } + ); +} + +// implementations for 1 - 32 columns +impl_column_walker! { 1 A } +impl_column_walker! { 2 A B } +impl_column_walker! { 3 A B C } +impl_column_walker! { 4 A B C D } +impl_column_walker! { 5 A B C D E } +impl_column_walker! { 6 A B C D E F } +impl_column_walker! { 7 A B C D E F G } +impl_column_walker! { 8 A B C D E F G H } +impl_column_walker! { 9 A B C D E F G H I } +impl_column_walker! { 10 A B C D E F G H I J } +impl_column_walker! { 11 A B C D E F G H I J K } +impl_column_walker! { 12 A B C D E F G H I J K L } +impl_column_walker! { 13 A B C D E F G H I J K L M } +impl_column_walker! { 14 A B C D E F G H I J K L M N } +impl_column_walker! { 15 A B C D E F G H I J K L M N O } +impl_column_walker! { 16 A B C D E F G H I J K L M N O P } +impl_column_walker! { 17 A B C D E F G H I J K L M N O P Q } +impl_column_walker! { 18 A B C D E F G H I J K L M N O P Q R } +impl_column_walker! { 19 A B C D E F G H I J K L M N O P Q R S } +impl_column_walker! { 20 A B C D E F G H I J K L M N O P Q R S T } +impl_column_walker! { 21 A B C D E F G H I J K L M N O P Q R S T U } +impl_column_walker! { 22 A B C D E F G H I J K L M N O P Q R S T U V } +impl_column_walker! { 23 A B C D E F G H I J K L M N O P Q R S T U V W } +impl_column_walker! { 24 A B C D E F G H I J K L M N O P Q R S T U V W X } +impl_column_walker! { 25 A B C D E F G H I J K L M N O P Q R S T U V W X Y } +impl_column_walker! { 26 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z } +impl_column_walker! { 27 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z A1 } +impl_column_walker! { 28 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z A1 B1 } +impl_column_walker! { 29 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z A1 B1 C1 } +impl_column_walker! { 30 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z A1 B1 C1 D1 } +impl_column_walker! { 31 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z A1 B1 C1 D1 E1 } +impl_column_walker! { 32 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z A1 B1 C1 D1 E1 F1 } + +#[cfg(test)] +mod test { + use super::*; + + table! { + test_schema.test_table (id) { + id -> Uuid, + value -> Int4, + time_deleted -> Nullable, + } + } + + // We can convert all a tables columns into an iteratable format. + #[test] + fn test_walk_table() { + let all_columns = + ColumnWalker::<::AllColumns>::new(); + + let mut iter = all_columns.into_iter(); + assert_eq!(iter.next(), Some("id")); + assert_eq!(iter.next(), Some("value")); + assert_eq!(iter.next(), Some("time_deleted")); + assert_eq!(iter.next(), None); + } + + // We can, if we want to, also make a ColumnWalker out of an arbitrary tuple + // of columns. + #[test] + fn test_walk_columns() { + let all_columns = ColumnWalker::<( + test_table::columns::id, + test_table::columns::value, + )>::new(); + + let mut iter = all_columns.into_iter(); + assert_eq!(iter.next(), Some("id")); + assert_eq!(iter.next(), Some("value")); + assert_eq!(iter.next(), None); + } +} diff --git a/nexus/db-queries/src/db/mod.rs b/nexus/db-queries/src/db/mod.rs index 8b7424a056..b7c7079b54 100644 --- a/nexus/db-queries/src/db/mod.rs +++ b/nexus/db-queries/src/db/mod.rs @@ -12,6 +12,7 @@ pub mod collection_attach; pub mod collection_detach; pub mod collection_detach_many; pub mod collection_insert; +mod column_walker; mod config; mod cte_utils; // This is marked public for use by the integration tests diff --git a/nexus/db-queries/src/db/update_and_check.rs b/nexus/db-queries/src/db/update_and_check.rs index d6bf14c083..fed79d5254 100644 --- a/nexus/db-queries/src/db/update_and_check.rs +++ b/nexus/db-queries/src/db/update_and_check.rs @@ -4,6 +4,7 @@ //! CTE implementation for "UPDATE with extended return status". +use super::column_walker::ColumnWalker; use super::pool::DbConnection; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::associations::HasTable; @@ -21,7 +22,7 @@ use std::marker::PhantomData; /// allows referencing generics with names (and extending usage /// without re-stating those generic parameters everywhere). pub trait UpdateStatementExt { - type Table: QuerySource; + type Table: Table + QuerySource; type WhereClause; type Changeset; @@ -32,7 +33,7 @@ pub trait UpdateStatementExt { impl UpdateStatementExt for UpdateStatement where - T: QuerySource, + T: Table + QuerySource, { type Table = T; type WhereClause = U; @@ -201,11 +202,11 @@ where /// /// ```text /// // WITH found AS (SELECT FROM T WHERE ) -/// // updated AS (UPDATE T SET RETURNING *) +/// // updated AS (UPDATE T SET RETURNING ) /// // SELECT /// // found. /// // updated. -/// // found.* +/// // found. /// // FROM /// // found /// // LEFT JOIN @@ -217,41 +218,48 @@ impl QueryFragment for UpdateAndQueryStatement where US: UpdateStatementExt, US::Table: HasTable + Table, + ColumnWalker<<::Table as Table>::AllColumns>: + IntoIterator, PrimaryKey: diesel::Column, UpdateStatement: QueryFragment, { fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { + let primary_key = as Column>::NAME; + out.push_sql("WITH found AS ("); self.find_subquery.walk_ast(out.reborrow())?; out.push_sql("), updated AS ("); self.update_statement.walk_ast(out.reborrow())?; - // TODO: Only need primary? Or would we actually want - // to pass the returned rows back through the result? - out.push_sql(" RETURNING *) "); + out.push_sql(" RETURNING "); + out.push_identifier(primary_key)?; + out.push_sql(") "); out.push_sql("SELECT"); - let name = as Column>::NAME; out.push_sql(" found."); - out.push_identifier(name)?; + out.push_identifier(primary_key)?; out.push_sql(", updated."); - out.push_identifier(name)?; - // TODO: I'd prefer to list all columns explicitly. But how? - // The types exist within Table::AllColumns, and each one - // has a name as "::Name". - // But Table::AllColumns is a tuple, which makes iteration - // a pain. - // - // TODO: Technically, we're repeating the PK here. - out.push_sql(", found.*"); + out.push_identifier(primary_key)?; + + // List all the "found" columns explicitly. + // This admittedly repeats the primary key, but that keeps the query + // "simple" since it returns all columns in the same order as + // AllColumns. + let all_columns = ColumnWalker::< + <::Table as Table>::AllColumns, + >::new(); + for column in all_columns.into_iter() { + out.push_sql(", found."); + out.push_identifier(column)?; + } out.push_sql(" FROM found LEFT JOIN updated ON"); out.push_sql(" found."); - out.push_identifier(name)?; + out.push_identifier(primary_key)?; out.push_sql(" = "); out.push_sql("updated."); - out.push_identifier(name)?; + out.push_identifier(primary_key)?; Ok(()) } From 22a70e489db5c91f1215535463abed10aa0e9db2 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 29 Nov 2023 11:58:22 -0800 Subject: [PATCH 55/56] Stop panicking when our accounting is wrong (#4568) Prefer to return a 500 error instead of panicking. Since this function is already called from a transactional context, we can rely on the rollback mechanism to "undo" the deletion. Fixes https://github.com/oxidecomputer/omicron/issues/3870 --- .../db/datastore/virtual_provisioning_collection.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs b/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs index 83856e10c7..c5c2751723 100644 --- a/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs +++ b/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs @@ -124,10 +124,12 @@ impl DataStore { .get_result_async(conn) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; - assert!( - collection.is_empty(), - "Collection deleted while non-empty: {collection:?}" - ); + + if !collection.is_empty() { + return Err(Error::internal_error(&format!( + "Collection deleted while non-empty: {collection:?}" + ))); + } Ok(()) } From f24447b0d93d339e70904fccb2f0a2c421db01e0 Mon Sep 17 00:00:00 2001 From: bnaecker Date: Wed, 29 Nov 2023 12:03:48 -0800 Subject: [PATCH 56/56] Improve oximeter self-stat tests (#4577) Reduces the tick interval in calls to `tokio::time::advance()` to ensure all timers complete reliably. See #4566 for context. --- oximeter/collector/src/agent.rs | 51 +++++++++++++++++++-------------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/oximeter/collector/src/agent.rs b/oximeter/collector/src/agent.rs index f6da172909..365527ef08 100644 --- a/oximeter/collector/src/agent.rs +++ b/oximeter/collector/src/agent.rs @@ -659,6 +659,24 @@ mod tests { use tokio::time::Instant; use uuid::Uuid; + // Interval on which oximeter collects from producers in these tests. + const COLLECTION_INTERVAL: Duration = Duration::from_secs(1); + + // Interval in calls to `tokio::time::advance`. This must be sufficiently + // small relative to `COLLECTION_INTERVAL` to ensure all ticks of internal + // timers complete as expected. + const TICK_INTERVAL: Duration = Duration::from_millis(10); + + // Total number of collection attempts. + const N_COLLECTIONS: u64 = 5; + + // Period these tests wait using `tokio::time::advance()` before checking + // their test conditions. + const TEST_WAIT_PERIOD: Duration = Duration::from_millis( + COLLECTION_INTERVAL.as_millis() as u64 * N_COLLECTIONS + + COLLECTION_INTERVAL.as_millis() as u64 / 2, + ); + // Test that we count successful collections from a target correctly. #[tokio::test] async fn test_self_stat_collection_count() { @@ -692,13 +710,12 @@ mod tests { let _task = tokio::task::spawn(server); // Register the dummy producer. - let interval = Duration::from_secs(1); let endpoint = ProducerEndpoint { id: Uuid::new_v4(), kind: Some(ProducerKind::Service), address, base_route: String::from("/"), - interval, + interval: COLLECTION_INTERVAL, }; collector .register_producer(endpoint) @@ -708,10 +725,8 @@ mod tests { // Step time until there has been exactly `N_COLLECTIONS` collections. tokio::time::pause(); let now = Instant::now(); - const N_COLLECTIONS: usize = 5; - let wait_for = interval * N_COLLECTIONS as u32 + interval / 2; - while now.elapsed() < wait_for { - tokio::time::advance(interval / 10).await; + while now.elapsed() < TEST_WAIT_PERIOD { + tokio::time::advance(TICK_INTERVAL).await; } // Request the statistics from the task itself. @@ -729,7 +744,7 @@ mod tests { .await .expect("failed to request statistics from task"); let stats = rx.await.expect("failed to receive statistics from task"); - assert_eq!(stats.collections.datum.value(), N_COLLECTIONS as u64); + assert_eq!(stats.collections.datum.value(), N_COLLECTIONS); assert!(stats.failed_collections.is_empty()); logctx.cleanup_successful(); } @@ -751,7 +766,6 @@ mod tests { // Register a bogus producer, which is equivalent to a producer that is // unreachable. - let interval = Duration::from_secs(1); let endpoint = ProducerEndpoint { id: Uuid::new_v4(), kind: Some(ProducerKind::Service), @@ -762,7 +776,7 @@ mod tests { 0, )), base_route: String::from("/"), - interval, + interval: COLLECTION_INTERVAL, }; collector .register_producer(endpoint) @@ -772,10 +786,8 @@ mod tests { // Step time until there has been exactly `N_COLLECTIONS` collections. tokio::time::pause(); let now = Instant::now(); - const N_COLLECTIONS: usize = 5; - let wait_for = interval * N_COLLECTIONS as u32 + interval / 2; - while now.elapsed() < wait_for { - tokio::time::advance(interval / 10).await; + while now.elapsed() < TEST_WAIT_PERIOD { + tokio::time::advance(TICK_INTERVAL).await; } // Request the statistics from the task itself. @@ -801,7 +813,7 @@ mod tests { .unwrap() .datum .value(), - N_COLLECTIONS as u64 + N_COLLECTIONS, ); assert_eq!(stats.failed_collections.len(), 1); logctx.cleanup_successful(); @@ -840,13 +852,12 @@ mod tests { let _task = tokio::task::spawn(server); // Register the rather flaky producer. - let interval = Duration::from_secs(1); let endpoint = ProducerEndpoint { id: Uuid::new_v4(), kind: Some(ProducerKind::Service), address, base_route: String::from("/"), - interval, + interval: COLLECTION_INTERVAL, }; collector .register_producer(endpoint) @@ -856,10 +867,8 @@ mod tests { // Step time until there has been exactly `N_COLLECTIONS` collections. tokio::time::pause(); let now = Instant::now(); - const N_COLLECTIONS: usize = 5; - let wait_for = interval * N_COLLECTIONS as u32 + interval / 2; - while now.elapsed() < wait_for { - tokio::time::advance(interval / 10).await; + while now.elapsed() < TEST_WAIT_PERIOD { + tokio::time::advance(TICK_INTERVAL).await; } // Request the statistics from the task itself. @@ -885,7 +894,7 @@ mod tests { .unwrap() .datum .value(), - N_COLLECTIONS as u64 + N_COLLECTIONS, ); assert_eq!(stats.failed_collections.len(), 1); logctx.cleanup_successful();