diff --git a/.github/buildomat/jobs/build-and-test-linux.sh b/.github/buildomat/jobs/build-and-test-linux.sh index f33d1a8cfa..715effd080 100755 --- a/.github/buildomat/jobs/build-and-test-linux.sh +++ b/.github/buildomat/jobs/build-and-test-linux.sh @@ -1,8 +1,8 @@ #!/bin/bash #: -#: name = "build-and-test (ubuntu-20.04)" +#: name = "build-and-test (ubuntu-22.04)" #: variety = "basic" -#: target = "ubuntu-20.04" +#: target = "ubuntu-22.04" #: rust_toolchain = "1.72.1" #: output_rules = [ #: "/var/tmp/omicron_tmp/*", diff --git a/.github/buildomat/jobs/clippy.sh b/.github/buildomat/jobs/clippy.sh index dba1021919..5fd31adb76 100755 --- a/.github/buildomat/jobs/clippy.sh +++ b/.github/buildomat/jobs/clippy.sh @@ -29,3 +29,4 @@ ptime -m bash ./tools/install_builder_prerequisites.sh -y banner clippy ptime -m cargo xtask clippy +ptime -m cargo doc diff --git a/.github/buildomat/jobs/deploy.sh b/.github/buildomat/jobs/deploy.sh index c2579d98ea..da8fddc75f 100755 --- a/.github/buildomat/jobs/deploy.sh +++ b/.github/buildomat/jobs/deploy.sh @@ -232,11 +232,11 @@ infra_ip_first = \"$UPLINK_IP\" /^infra_ip_last/c\\ infra_ip_last = \"$UPLINK_IP\" } - /^\\[\\[rack_network_config.uplinks/,/^\$/ { - /^gateway_ip/c\\ -gateway_ip = \"$GATEWAY_IP\" - /^uplink_cidr/c\\ -uplink_cidr = \"$UPLINK_IP/32\" + /^\\[\\[rack_network_config.ports/,/^\$/ { + /^routes/c\\ +routes = \\[{nexthop = \"$GATEWAY_IP\", destination = \"0.0.0.0/0\"}\\] + /^addresses/c\\ +addresses = \\[\"$UPLINK_IP/32\"\\] } " pkg/config-rss.toml diff -u pkg/config-rss.toml{~,} || true diff --git a/.github/buildomat/jobs/package.sh b/.github/buildomat/jobs/package.sh index 64c087524e..c1cb04124d 100755 --- a/.github/buildomat/jobs/package.sh +++ b/.github/buildomat/jobs/package.sh @@ -71,7 +71,7 @@ tarball_src_dir="$(pwd)/out/versioned" stamp_packages() { for package in "$@"; do # TODO: remove once https://github.com/oxidecomputer/omicron-package/pull/54 lands - if [[ $package == maghemite ]]; then + if [[ $package == mg-ddm-gz ]]; then echo "0.0.0" > VERSION tar rvf "out/$package.tar" VERSION rm VERSION @@ -90,7 +90,7 @@ ptime -m cargo run --locked --release --bin omicron-package -- \ -t host target create -i standard -m gimlet -s asic -r multi-sled ptime -m cargo run --locked --release --bin omicron-package -- \ -t host package -stamp_packages omicron-sled-agent maghemite propolis-server overlay +stamp_packages omicron-sled-agent mg-ddm-gz propolis-server overlay # Create global zone package @ /work/global-zone-packages.tar.gz ptime -m ./tools/build-global-zone-packages.sh "$tarball_src_dir" /work @@ -135,7 +135,7 @@ ptime -m cargo run --locked --release --bin omicron-package -- \ -t recovery target create -i trampoline ptime -m cargo run --locked --release --bin omicron-package -- \ -t recovery package -stamp_packages installinator maghemite +stamp_packages installinator mg-ddm-gz # Create trampoline global zone package @ /work/trampoline-global-zone-packages.tar.gz ptime -m ./tools/build-trampoline-global-zone-packages.sh "$tarball_src_dir" /work diff --git a/.gitignore b/.gitignore index 574e867c02..1d7177320f 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,5 @@ core *.vdev debug.out rusty-tags.vi +*.sw* +tags diff --git a/Cargo.lock b/Cargo.lock index 05cad9a020..9ca83afe54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4166,6 +4166,29 @@ dependencies = [ "autocfg", ] +[[package]] +name = "mg-admin-client" +version = "0.1.0" +dependencies = [ + "anyhow", + "either", + "omicron-common 0.1.0", + "omicron-workspace-hack", + "omicron-zone-package", + "progenitor", + "progenitor-client", + "quote", + "reqwest", + "rustfmt-wrapper", + "serde", + "serde_json", + "sled-hardware", + "slog", + "thiserror", + "tokio", + "toml 0.7.8", +] + [[package]] name = "mime" version = "0.3.17" @@ -5082,6 +5105,7 @@ dependencies = [ "itertools 0.11.0", "lazy_static", "macaddr", + "mg-admin-client", "mime_guess", "newtype_derive", "nexus-db-model", @@ -10133,6 +10157,7 @@ dependencies = [ "installinator-artifact-client", "installinator-artifactd", "installinator-common", + "ipnetwork", "itertools 0.11.0", "omicron-certificates", "omicron-common 0.1.0", diff --git a/Cargo.toml b/Cargo.toml index 96a6cdbacf..57d27ff128 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,7 @@ members = [ "clients/dpd-client", "clients/gateway-client", "clients/installinator-artifact-client", + "clients/mg-admin-client", "clients/nexus-client", "clients/oxide-client", "clients/oximeter-client", @@ -82,6 +83,7 @@ default-members = [ "clients/oximeter-client", "clients/sled-agent-client", "clients/wicketd-client", + "clients/mg-admin-client", "common", "dev-tools/crdb-seed", "dev-tools/omdb", @@ -227,6 +229,7 @@ macaddr = { version = "1.0.1", features = ["serde_std"] } mime_guess = "2.0.4" mockall = "0.11" newtype_derive = "0.1.6" +mg-admin-client = { path = "clients/mg-admin-client" } nexus-client = { path = "clients/nexus-client" } nexus-db-model = { path = "nexus/db-model" } nexus-db-queries = { path = "nexus/db-queries" } diff --git a/bootstore/src/schemes/v0/storage.rs b/bootstore/src/schemes/v0/storage.rs index ee31d24f05..327acc6058 100644 --- a/bootstore/src/schemes/v0/storage.rs +++ b/bootstore/src/schemes/v0/storage.rs @@ -5,9 +5,9 @@ //! Storage for the v0 bootstore scheme //! //! We write two pieces of data to M.2 devices in production via -//! [`omicron_common::Ledger`]: +//! [`omicron_common::ledger::Ledger`]: //! -//! 1. [`super::Fsm::State`] for bootstore state itself +//! 1. [`super::State`] for bootstore state itself //! 2. A network config blob required for pre-rack-unlock configuration //! diff --git a/clients/bootstrap-agent-client/src/lib.rs b/clients/bootstrap-agent-client/src/lib.rs index 3f8b20e1f5..19ecb599f3 100644 --- a/clients/bootstrap-agent-client/src/lib.rs +++ b/clients/bootstrap-agent-client/src/lib.rs @@ -20,6 +20,8 @@ progenitor::generate_api!( derives = [schemars::JsonSchema], replace = { Ipv4Network = ipnetwork::Ipv4Network, + Ipv6Network = ipnetwork::Ipv6Network, + IpNetwork = ipnetwork::IpNetwork, } ); diff --git a/clients/ddm-admin-client/build.rs b/clients/ddm-admin-client/build.rs index e3c1345eda..da74ee9962 100644 --- a/clients/ddm-admin-client/build.rs +++ b/clients/ddm-admin-client/build.rs @@ -21,20 +21,21 @@ fn main() -> Result<()> { println!("cargo:rerun-if-changed=../../package-manifest.toml"); let config: Config = toml::from_str(&manifest) - .context("failed to parse ../../package-manifest.toml")?; - let maghemite = config + .context("failed to parse ../package-manifest.toml")?; + + let ddm = config .packages - .get("maghemite") - .context("missing maghemite package in ../../package-manifest.toml")?; + .get("mg-ddm-gz") + .context("missing mg-ddm-gz package in ../package-manifest.toml")?; - let local_path = match &maghemite.source { + let local_path = match &ddm.source { PackageSource::Prebuilt { commit, .. } => { // Report a relatively verbose error if we haven't downloaded the requisite // openapi spec. let local_path = format!("../../out/downloads/ddm-admin-{commit}.json"); if !Path::new(&local_path).exists() { - bail!("{local_path} doesn't exist; rerun `tools/ci_download_maghemite_openapi` (after updating `tools/maghemite_openapi_version` if the maghemite commit in package-manifest.toml has changed)"); + bail!("{local_path} doesn't exist; rerun `tools/ci_download_maghemite_openapi` (after updating `tools/maghemite_ddm_openapi_version` if the maghemite commit in package-manifest.toml has changed)"); } println!("cargo:rerun-if-changed={local_path}"); local_path @@ -51,7 +52,9 @@ fn main() -> Result<()> { } _ => { - bail!("maghemite external package must have type `prebuilt` or `manual`") + bail!( + "mg-ddm external package must have type `prebuilt` or `manual`" + ) } }; diff --git a/clients/mg-admin-client/Cargo.toml b/clients/mg-admin-client/Cargo.toml new file mode 100644 index 0000000000..c444fee32f --- /dev/null +++ b/clients/mg-admin-client/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "mg-admin-client" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[dependencies] +either.workspace = true +progenitor-client.workspace = true +reqwest = { workspace = true, features = ["json", "stream", "rustls-tls"] } +serde.workspace = true +slog.workspace = true +thiserror.workspace = true +tokio.workspace = true +omicron-common.workspace = true +sled-hardware.workspace = true +omicron-workspace-hack.workspace = true + +[build-dependencies] +anyhow.workspace = true +omicron-zone-package.workspace = true +progenitor.workspace = true +quote.workspace = true +rustfmt-wrapper.workspace = true +serde_json.workspace = true +toml.workspace = true diff --git a/clients/mg-admin-client/build.rs b/clients/mg-admin-client/build.rs new file mode 100644 index 0000000000..dcc7ae61cb --- /dev/null +++ b/clients/mg-admin-client/build.rs @@ -0,0 +1,102 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Copyright 2022 Oxide Computer Company + +use anyhow::bail; +use anyhow::Context; +use anyhow::Result; +use omicron_zone_package::config::Config; +use omicron_zone_package::package::PackageSource; +use quote::quote; +use std::env; +use std::fs; +use std::path::Path; + +fn main() -> Result<()> { + // Find the current maghemite repo commit from our package manifest. + let manifest = fs::read_to_string("../../package-manifest.toml") + .context("failed to read ../../package-manifest.toml")?; + println!("cargo:rerun-if-changed=../../package-manifest.toml"); + + let config: Config = toml::from_str(&manifest) + .context("failed to parse ../../package-manifest.toml")?; + let mg = config + .packages + .get("mgd") + .context("missing mgd package in ../../package-manifest.toml")?; + + let local_path = match &mg.source { + PackageSource::Prebuilt { commit, .. } => { + // Report a relatively verbose error if we haven't downloaded the requisite + // openapi spec. + let local_path = + format!("../../out/downloads/mg-admin-{commit}.json"); + if !Path::new(&local_path).exists() { + bail!("{local_path} doesn't exist; rerun `tools/ci_download_maghemite_openapi` (after updating `tools/maghemite_mg_openapi_version` if the maghemite commit in package-manifest.toml has changed)"); + } + println!("cargo:rerun-if-changed={local_path}"); + local_path + } + + PackageSource::Manual => { + let local_path = + "../../out/downloads/mg-admin-manual.json".to_string(); + if !Path::new(&local_path).exists() { + bail!("{local_path} doesn't exist, please copy manually built mg-admin.json there!"); + } + println!("cargo:rerun-if-changed={local_path}"); + local_path + } + + _ => { + bail!("mgd external package must have type `prebuilt` or `manual`") + } + }; + + let spec = { + let bytes = fs::read(&local_path) + .with_context(|| format!("failed to read {local_path}"))?; + serde_json::from_slice(&bytes).with_context(|| { + format!("failed to parse {local_path} as openapi spec") + })? + }; + + let code = progenitor::Generator::new( + progenitor::GenerationSettings::new() + .with_inner_type(quote!(slog::Logger)) + .with_pre_hook(quote! { + |log: &slog::Logger, request: &reqwest::Request| { + slog::debug!(log, "client request"; + "method" => %request.method(), + "uri" => %request.url(), + "body" => ?&request.body(), + ); + } + }) + .with_post_hook(quote! { + |log: &slog::Logger, result: &Result<_, _>| { + slog::debug!(log, "client response"; "result" => ?result); + } + }), + ) + .generate_tokens(&spec) + .with_context(|| { + format!("failed to generate progenitor client from {local_path}") + })?; + + let content = rustfmt_wrapper::rustfmt(code).with_context(|| { + format!("rustfmt failed on progenitor code from {local_path}") + })?; + + let out_file = + Path::new(&env::var("OUT_DIR").expect("OUT_DIR env var not set")) + .join("mg-admin-client.rs"); + + fs::write(&out_file, content).with_context(|| { + format!("failed to write client to {}", out_file.display()) + })?; + + Ok(()) +} diff --git a/clients/mg-admin-client/src/lib.rs b/clients/mg-admin-client/src/lib.rs new file mode 100644 index 0000000000..bb1d925c73 --- /dev/null +++ b/clients/mg-admin-client/src/lib.rs @@ -0,0 +1,83 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Copyright 2023 Oxide Computer Company + +#![allow(clippy::redundant_closure_call)] +#![allow(clippy::needless_lifetimes)] +#![allow(clippy::match_single_binding)] +#![allow(clippy::clone_on_copy)] +#![allow(rustdoc::broken_intra_doc_links)] +#![allow(rustdoc::invalid_html_tags)] + +#[allow(dead_code)] +mod inner { + include!(concat!(env!("OUT_DIR"), "/mg-admin-client.rs")); +} + +pub use inner::types; +pub use inner::Error; + +use inner::Client as InnerClient; +use omicron_common::api::external::BgpPeerState; +use slog::Logger; +use std::net::Ipv6Addr; +use std::net::SocketAddr; +use thiserror::Error; + +// TODO-cleanup Is it okay to hardcode this port number here? +const MGD_PORT: u16 = 4676; + +#[derive(Debug, Error)] +pub enum MgError { + #[error("Failed to construct an HTTP client: {0}")] + HttpClient(#[from] reqwest::Error), + + #[error("Failed making HTTP request to mgd: {0}")] + MgApi(#[from] Error), +} + +impl From for BgpPeerState { + fn from(s: inner::types::FsmStateKind) -> BgpPeerState { + use inner::types::FsmStateKind; + match s { + FsmStateKind::Idle => BgpPeerState::Idle, + FsmStateKind::Connect => BgpPeerState::Connect, + FsmStateKind::Active => BgpPeerState::Active, + FsmStateKind::OpenSent => BgpPeerState::OpenSent, + FsmStateKind::OpenConfirm => BgpPeerState::OpenConfirm, + FsmStateKind::SessionSetup => BgpPeerState::SessionSetup, + FsmStateKind::Established => BgpPeerState::Established, + } + } +} + +#[derive(Debug, Clone)] +pub struct Client { + pub inner: InnerClient, + pub log: Logger, +} + +impl Client { + /// Creates a new [`Client`] that points to localhost + pub fn localhost(log: &Logger) -> Result { + Self::new(log, SocketAddr::new(Ipv6Addr::LOCALHOST.into(), MGD_PORT)) + } + + pub fn new(log: &Logger, mgd_addr: SocketAddr) -> Result { + let dur = std::time::Duration::from_secs(60); + let log = log.new(slog::o!("MgAdminClient" => mgd_addr)); + + let inner = reqwest::ClientBuilder::new() + .connect_timeout(dur) + .timeout(dur) + .build()?; + let inner = InnerClient::new_with_client( + &format!("http://{mgd_addr}"), + inner, + log.clone(), + ); + Ok(Self { inner, log }) + } +} diff --git a/clients/nexus-client/src/lib.rs b/clients/nexus-client/src/lib.rs index 33a68cb3ce..23ceb114fc 100644 --- a/clients/nexus-client/src/lib.rs +++ b/clients/nexus-client/src/lib.rs @@ -23,6 +23,8 @@ progenitor::generate_api!( }), replace = { Ipv4Network = ipnetwork::Ipv4Network, + Ipv6Network = ipnetwork::Ipv6Network, + IpNetwork = ipnetwork::IpNetwork, MacAddr = omicron_common::api::external::MacAddr, Name = omicron_common::api::external::Name, NewPasswordHash = omicron_passwords::NewPasswordHash, diff --git a/clients/sled-agent-client/src/lib.rs b/clients/sled-agent-client/src/lib.rs index 3daac7dd60..0df21d894e 100644 --- a/clients/sled-agent-client/src/lib.rs +++ b/clients/sled-agent-client/src/lib.rs @@ -5,11 +5,33 @@ //! Interface for making API requests to a Sled Agent use async_trait::async_trait; -use omicron_common::generate_logging_api; use std::convert::TryFrom; use uuid::Uuid; -generate_logging_api!("../../openapi/sled-agent.json"); +progenitor::generate_api!( + spec = "../../openapi/sled-agent.json", + inner_type = slog::Logger, + pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { + slog::debug!(log, "client request"; + "method" => %request.method(), + "uri" => %request.url(), + "body" => ?&request.body(), + ); + }), + post_hook = (|log: &slog::Logger, result: &Result<_, _>| { + slog::debug!(log, "client response"; "result" => ?result); + }), + //TODO trade the manual transformations later in this file for the + // replace directives below? + replace = { + //Ipv4Network = ipnetwork::Ipv4Network, + SwitchLocation = omicron_common::api::external::SwitchLocation, + Ipv6Network = ipnetwork::Ipv6Network, + IpNetwork = ipnetwork::IpNetwork, + PortFec = omicron_common::api::internal::shared::PortFec, + PortSpeed = omicron_common::api::internal::shared::PortSpeed, + } +); impl omicron_common::api::external::ClientError for types::Error { fn message(&self) -> String { @@ -269,6 +291,12 @@ impl From for types::Ipv4Net { } } +impl From for types::Ipv4Network { + fn from(n: ipnetwork::Ipv4Network) -> Self { + Self::try_from(n.to_string()).unwrap_or_else(|e| panic!("{}: {}", n, e)) + } +} + impl From for types::Ipv6Net { fn from(n: ipnetwork::Ipv6Network) -> Self { Self::try_from(n.to_string()).unwrap_or_else(|e| panic!("{}: {}", n, e)) diff --git a/clients/wicketd-client/src/lib.rs b/clients/wicketd-client/src/lib.rs index ff45232520..e4325bdb69 100644 --- a/clients/wicketd-client/src/lib.rs +++ b/clients/wicketd-client/src/lib.rs @@ -44,6 +44,10 @@ progenitor::generate_api!( RackOperationStatus = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, RackNetworkConfig = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, UplinkConfig = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, + PortConfigV1 = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, + BgpPeerConfig = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, + BgpConfig = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, + RouteConfig = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, CurrentRssUserConfigInsensitive = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, CurrentRssUserConfigSensitive = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, CurrentRssUserConfig = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, @@ -52,6 +56,8 @@ progenitor::generate_api!( replace = { Duration = std::time::Duration, Ipv4Network = ipnetwork::Ipv4Network, + Ipv6Network = ipnetwork::Ipv6Network, + IpNetwork = ipnetwork::IpNetwork, PutRssUserConfigInsensitive = wicket_common::rack_setup::PutRssUserConfigInsensitive, EventReportForWicketdEngineSpec = wicket_common::update_events::EventReport, StepEventForWicketdEngineSpec = wicket_common::update_events::StepEvent, diff --git a/common/src/address.rs b/common/src/address.rs index 0358787258..cfdc599064 100644 --- a/common/src/address.rs +++ b/common/src/address.rs @@ -39,6 +39,7 @@ pub const CLICKHOUSE_PORT: u16 = 8123; pub const CLICKHOUSE_KEEPER_PORT: u16 = 9181; pub const OXIMETER_PORT: u16 = 12223; pub const DENDRITE_PORT: u16 = 12224; +pub const MGD_PORT: u16 = 4676; pub const DDMD_PORT: u16 = 8000; pub const MGS_PORT: u16 = 12225; pub const WICKETD_PORT: u16 = 12226; diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 53512408af..ef6faa0e55 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -12,6 +12,7 @@ pub mod http_pagination; use dropshot::HttpError; pub use error::*; +pub use crate::api::internal::shared::SwitchLocation; use anyhow::anyhow; use anyhow::Context; use api_identity::ObjectIdentity; @@ -98,6 +99,13 @@ pub struct DataPageParams<'a, NameType> { } impl<'a, NameType> DataPageParams<'a, NameType> { + pub fn max_page() -> Self { + Self { + marker: None, + direction: dropshot::PaginationOrder::Ascending, + limit: NonZeroU32::new(u32::MAX).unwrap(), + } + } /// Maps the marker type to a new type. /// /// Equivalent to [std::option::Option::map], because that's what it calls. @@ -400,7 +408,7 @@ impl SemverVersion { /// This is the official ECMAScript-compatible validation regex for /// semver: - /// https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string + /// const VALIDATION_REGEX: &str = r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$"; } @@ -690,6 +698,8 @@ pub enum ResourceType { AddressLot, AddressLotBlock, BackgroundTask, + BgpConfig, + BgpAnnounceSet, Fleet, Silo, SiloUser, @@ -2476,7 +2486,9 @@ pub struct SwitchPortBgpPeerConfig { } /// A base BGP configuration. -#[derive(Clone, Debug, Deserialize, JsonSchema, Serialize, PartialEq)] +#[derive( + ObjectIdentity, Clone, Debug, Deserialize, JsonSchema, Serialize, PartialEq, +)] pub struct BgpConfig { #[serde(flatten)] pub identity: IdentityMetadata, @@ -2528,6 +2540,72 @@ pub struct SwitchPortAddressConfig { pub interface_name: String, } +/// The current state of a BGP peer. +#[derive(Clone, Debug, Deserialize, JsonSchema, Serialize, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum BgpPeerState { + /// Initial state. Refuse all incomming BGP connections. No resources + /// allocated to peer. + Idle, + + /// Waiting for the TCP connection to be completed. + Connect, + + /// Trying to acquire peer by listening for and accepting a TCP connection. + Active, + + /// Waiting for open message from peer. + OpenSent, + + /// Waiting for keepaliave or notification from peer. + OpenConfirm, + + /// Synchronizing with peer. + SessionSetup, + + /// Session established. Able to exchange update, notification and keepliave + /// messages with peers. + Established, +} + +/// The current status of a BGP peer. +#[derive(Clone, Debug, Deserialize, JsonSchema, Serialize, PartialEq)] +pub struct BgpPeerStatus { + /// IP address of the peer. + pub addr: IpAddr, + + /// Local autonomous system number. + pub local_asn: u32, + + /// Remote autonomous system number. + pub remote_asn: u32, + + /// State of the peer. + pub state: BgpPeerState, + + /// Time of last state change. + pub state_duration_millis: u64, + + /// Switch with the peer session. + pub switch: SwitchLocation, +} + +/// A route imported from a BGP peer. +#[derive(Clone, Debug, Deserialize, JsonSchema, Serialize, PartialEq)] +pub struct BgpImportedRouteIpv4 { + /// The destination network prefix. + pub prefix: Ipv4Net, + + /// The nexthop the prefix is reachable through. + pub nexthop: Ipv4Addr, + + /// BGP identifier of the originating router. + pub id: u32, + + /// Switch the route is imported into. + pub switch: SwitchLocation, +} + #[cfg(test)] mod test { use serde::Deserialize; diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index 9e3f3ec1f6..1300a8d5ff 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -5,7 +5,7 @@ //! Types shared between Nexus and Sled Agent. use crate::api::external::{self, Name}; -use ipnetwork::Ipv4Network; +use ipnetwork::{IpNetwork, Ipv4Network}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::{ @@ -77,9 +77,57 @@ pub struct RackNetworkConfig { /// Last ip address to be used for configuring network infrastructure pub infra_ip_last: Ipv4Addr, /// Uplinks for connecting the rack to external networks - pub uplinks: Vec, + pub ports: Vec, + /// BGP configurations for connecting the rack to external networks + pub bgp: Vec, } +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] +pub struct BgpConfig { + /// The autonomous system number for the BGP configuration. + pub asn: u32, + /// The set of prefixes for the BGP router to originate. + pub originate: Vec, +} + +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] +pub struct BgpPeerConfig { + /// Switch port the peer is reachable on. + pub asn: u32, + /// Switch port the peer is reachable on. + pub port: String, + /// Address of the peer. + pub addr: Ipv4Addr, +} + +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] +pub struct RouteConfig { + /// The destination of the route. + pub destination: IpNetwork, + /// The nexthop/gateway address. + pub nexthop: IpAddr, +} + +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] +pub struct PortConfigV1 { + /// The set of routes associated with this port. + pub routes: Vec, + /// This port's addresses. + pub addresses: Vec, + /// Switch the port belongs to. + pub switch: SwitchLocation, + /// Nmae of the port this config applies to. + pub port: String, + /// Port speed. + pub uplink_port_speed: PortSpeed, + /// Port forward error correction type. + pub uplink_port_fec: PortFec, + /// BGP peers on this port + pub bgp_peers: Vec, +} + +/// Deprecated, use PortConfigV1 instead. Cannot actually deprecate due to +/// #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] pub struct UplinkConfig { /// Gateway address @@ -99,9 +147,41 @@ pub struct UplinkConfig { pub uplink_vid: Option, } +/// A set of switch uplinks. +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] +pub struct SwitchPorts { + pub uplinks: Vec, +} + +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] +pub struct HostPortConfig { + /// Switchport to use for external connectivity + pub port: String, + + /// IP Address and prefix (e.g., `192.168.0.1/16`) to apply to switchport + /// (must be in infra_ip pool) + pub addrs: Vec, +} + +impl From for HostPortConfig { + fn from(x: PortConfigV1) -> Self { + Self { port: x.port, addrs: x.addresses } + } +} + /// Identifies switch physical location #[derive( - Clone, Copy, Debug, Deserialize, Serialize, PartialEq, JsonSchema, Hash, Eq, + Clone, + Copy, + Debug, + Deserialize, + Serialize, + PartialEq, + JsonSchema, + Hash, + Eq, + PartialOrd, + Ord, )] #[serde(rename_all = "snake_case")] pub enum SwitchLocation { diff --git a/common/src/nexus_config.rs b/common/src/nexus_config.rs index ad62c34f92..a43dc1837f 100644 --- a/common/src/nexus_config.rs +++ b/common/src/nexus_config.rs @@ -223,6 +223,12 @@ pub struct DpdConfig { pub address: SocketAddr, } +/// Configuration for the `Dendrite` dataplane daemon. +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +pub struct MgdConfig { + pub address: SocketAddr, +} + // A deserializable type that does no validation on the tunable parameters. #[derive(Clone, Debug, Deserialize, PartialEq)] struct UnvalidatedTunables { @@ -370,6 +376,9 @@ pub struct PackageConfig { /// `Dendrite` dataplane daemon configuration #[serde(default)] pub dendrite: HashMap, + /// Maghemite mgd daemon configuration + #[serde(default)] + pub mgd: HashMap, /// Background task configuration pub background_tasks: BackgroundTaskConfig, /// Default Crucible region allocation strategy @@ -450,7 +459,7 @@ mod test { use crate::nexus_config::{ BackgroundTaskConfig, ConfigDropshotWithTls, Database, DeploymentConfig, DnsTasksConfig, DpdConfig, ExternalEndpointsConfig, - InternalDns, LoadErrorKind, + InternalDns, LoadErrorKind, MgdConfig, }; use dropshot::ConfigDropshot; use dropshot::ConfigLogging; @@ -586,6 +595,8 @@ mod test { type = "from_dns" [dendrite.switch0] address = "[::1]:12224" + [mgd.switch0] + address = "[::1]:4676" [background_tasks] dns_internal.period_secs_config = 1 dns_internal.period_secs_servers = 2 @@ -665,6 +676,13 @@ mod test { .unwrap(), } )]), + mgd: HashMap::from([( + SwitchLocation::Switch0, + MgdConfig { + address: SocketAddr::from_str("[::1]:4676") + .unwrap(), + } + )]), background_tasks: BackgroundTaskConfig { dns_internal: DnsTasksConfig { period_secs_config: Duration::from_secs(1), diff --git a/dev-tools/omdb/tests/env.out b/dev-tools/omdb/tests/env.out index 8e345b78d1..5d9c263166 100644 --- a/dev-tools/omdb/tests/env.out +++ b/dev-tools/omdb/tests/env.out @@ -7,7 +7,7 @@ sim-b6d65341 [::1]:REDACTED_PORT - REDACTED_UUID_REDACTED_UUID_REDACTED --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "--db-url", "junk", "sleds"] termination: Exited(2) @@ -172,7 +172,7 @@ stderr: note: database URL not specified. Will search DNS. note: (override with --db-url or OMDB_DB_URL) note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.0) ============================================= EXECUTING COMMAND: omdb ["--dns-server", "[::1]:REDACTED_PORT", "db", "sleds"] termination: Exited(0) @@ -185,5 +185,5 @@ stderr: note: database URL not specified. Will search DNS. note: (override with --db-url or OMDB_DB_URL) note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.0) ============================================= diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 6fd84c5eb3..33c4364c36 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -8,7 +8,7 @@ external oxide-dev.test 2 create silo: "tes --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "dns", "diff", "external", "2"] termination: Exited(0) @@ -24,7 +24,7 @@ changes: names added: 1, names removed: 0 --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "dns", "names", "external", "2"] termination: Exited(0) @@ -36,7 +36,7 @@ External zone: oxide-dev.test --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "services", "list-instances"] termination: Exited(0) @@ -49,10 +49,12 @@ Dendrite REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT ExternalDns REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT sim-b6d65341 InternalDns REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT sim-b6d65341 Nexus REDACTED_UUID_REDACTED_UUID_REDACTED [::ffff:127.0.0.1]:REDACTED_PORT sim-b6d65341 +Mgd REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT sim-b6d65341 +Mgd REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT sim-b6d65341 --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "services", "list-by-sled"] termination: Exited(0) @@ -67,11 +69,13 @@ sled: sim-b6d65341 (id REDACTED_UUID_REDACTED_UUID_REDACTED) ExternalDns REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT InternalDns REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT Nexus REDACTED_UUID_REDACTED_UUID_REDACTED [::ffff:127.0.0.1]:REDACTED_PORT + Mgd REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT + Mgd REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "sleds"] termination: Exited(0) @@ -82,7 +86,7 @@ sim-b6d65341 [::1]:REDACTED_PORT - REDACTED_UUID_REDACTED_UUID_REDACTED --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (7.0.0) +note: database schema version matches expected (8.0.0) ============================================= EXECUTING COMMAND: omdb ["mgs", "inventory"] termination: Exited(0) diff --git a/env.sh b/env.sh index 5b1e2b34ac..483a89f597 100644 --- a/env.sh +++ b/env.sh @@ -9,5 +9,6 @@ OMICRON_WS="$(cd $(dirname "${BASH_SOURCE[0]}") && echo $PWD)" export PATH="$OMICRON_WS/out/cockroachdb/bin:$PATH" export PATH="$OMICRON_WS/out/clickhouse:$PATH" export PATH="$OMICRON_WS/out/dendrite-stub/bin:$PATH" +export PATH="$OMICRON_WS/out/mgd/root/opt/oxide/mgd/bin:$PATH" unset OMICRON_WS set +o xtrace diff --git a/illumos-utils/src/destructor.rs b/illumos-utils/src/destructor.rs index e019f2562f..ccc5b15486 100644 --- a/illumos-utils/src/destructor.rs +++ b/illumos-utils/src/destructor.rs @@ -21,7 +21,7 @@ use tokio::sync::mpsc; type SharedBoxFuture = Shared + Send>>>; -/// Future stored within [Destructor]. +/// Future stored within [`Destructor`]. struct ShutdownWaitFuture(SharedBoxFuture>); impl Future for ShutdownWaitFuture { diff --git a/installinator/src/bootstrap.rs b/installinator/src/bootstrap.rs index 2854293d8a..71c76809db 100644 --- a/installinator/src/bootstrap.rs +++ b/installinator/src/bootstrap.rs @@ -20,7 +20,7 @@ use sled_hardware::underlay::BootstrapInterface; use slog::info; use slog::Logger; -const MG_DDM_SERVICE_FMRI: &str = "svc:/system/illumos/mg-ddm"; +const MG_DDM_SERVICE_FMRI: &str = "svc:/oxide/mg-ddm"; const MG_DDM_MANIFEST_PATH: &str = "/opt/oxide/mg-ddm/pkg/ddm/manifest.xml"; // TODO-cleanup The implementation of this function is heavily derived from diff --git a/installinator/src/dispatch.rs b/installinator/src/dispatch.rs index 9c06aeac77..9bec14664c 100644 --- a/installinator/src/dispatch.rs +++ b/installinator/src/dispatch.rs @@ -104,7 +104,7 @@ impl DebugDiscoverOpts { /// Options shared by both [`DebugDiscoverOpts`] and [`InstallOpts`]. #[derive(Debug, Args)] struct DiscoverOpts { - /// The mechanism by which to discover peers: bootstrap or list:[::1]:8000 + /// The mechanism by which to discover peers: bootstrap or `list:[::1]:8000` #[clap(long, default_value_t = DiscoveryMechanism::Bootstrap)] mechanism: DiscoveryMechanism, } diff --git a/internal-dns/src/config.rs b/internal-dns/src/config.rs index e5272cd23a..86dd6e802e 100644 --- a/internal-dns/src/config.rs +++ b/internal-dns/src/config.rs @@ -63,8 +63,9 @@ use crate::names::{ServiceName, DNS_ZONE}; use anyhow::{anyhow, ensure}; use dns_service_client::types::{DnsConfigParams, DnsConfigZone, DnsRecord}; +use omicron_common::api::internal::shared::SwitchLocation; use std::collections::BTreeMap; -use std::net::Ipv6Addr; +use std::net::{Ipv6Addr, SocketAddrV6}; use uuid::Uuid; /// Zones that can be referenced within the internal DNS system. @@ -136,6 +137,8 @@ pub struct DnsConfigBuilder { /// network sleds: BTreeMap, + scrimlets: BTreeMap, + /// set of hosts of type "zone" that have been configured so far, mapping /// each zone's unique uuid to its sole IPv6 address on the control plane /// network @@ -175,6 +178,7 @@ impl DnsConfigBuilder { DnsConfigBuilder { sleds: BTreeMap::new(), zones: BTreeMap::new(), + scrimlets: BTreeMap::new(), service_instances_zones: BTreeMap::new(), service_instances_sleds: BTreeMap::new(), } @@ -205,6 +209,15 @@ impl DnsConfigBuilder { } } + pub fn host_scrimlet( + &mut self, + switch_location: SwitchLocation, + addr: SocketAddrV6, + ) -> anyhow::Result<()> { + self.scrimlets.insert(switch_location, addr); + Ok(()) + } + /// Add a new dendrite host of type "zone" to the configuration /// /// Returns a [`Zone`] that can be used with [`Self::service_backend_zone()`] to @@ -351,6 +364,23 @@ impl DnsConfigBuilder { (zone.dns_name(), vec![DnsRecord::Aaaa(zone_ip)]) }); + let scrimlet_srv_records = + self.scrimlets.clone().into_iter().map(|(location, addr)| { + let srv = DnsRecord::Srv(dns_service_client::types::Srv { + prio: 0, + weight: 0, + port: addr.port(), + target: format!("{location}.scrimlet.{}", DNS_ZONE), + }); + (ServiceName::Scrimlet(location).dns_name(), vec![srv]) + }); + + let scrimlet_aaaa_records = + self.scrimlets.into_iter().map(|(location, addr)| { + let aaaa = DnsRecord::Aaaa(*addr.ip()); + (format!("{location}.scrimlet"), vec![aaaa]) + }); + // Assemble the set of SRV records, which implicitly point back at // zones' AAAA records. let srv_records_zones = self.service_instances_zones.into_iter().map( @@ -399,6 +429,8 @@ impl DnsConfigBuilder { .chain(zone_records) .chain(srv_records_sleds) .chain(srv_records_zones) + .chain(scrimlet_aaaa_records) + .chain(scrimlet_srv_records) .collect(); DnsConfigParams { diff --git a/internal-dns/src/names.rs b/internal-dns/src/names.rs index 44ed9228e2..e0c9b79555 100644 --- a/internal-dns/src/names.rs +++ b/internal-dns/src/names.rs @@ -4,6 +4,7 @@ //! Well-known DNS names and related types for internal DNS (see RFD 248) +use omicron_common::api::internal::shared::SwitchLocation; use uuid::Uuid; /// Name for the control plane DNS zone @@ -32,7 +33,9 @@ pub enum ServiceName { Crucible(Uuid), BoundaryNtp, InternalNtp, - Maghemite, + Maghemite, //TODO change to Dpd - maghemite has several services. + Mgd, + Scrimlet(SwitchLocation), } impl ServiceName { @@ -55,6 +58,8 @@ impl ServiceName { ServiceName::BoundaryNtp => "boundary-ntp", ServiceName::InternalNtp => "internal-ntp", ServiceName::Maghemite => "maghemite", + ServiceName::Mgd => "mgd", + ServiceName::Scrimlet(_) => "scrimlet", } } @@ -76,7 +81,8 @@ impl ServiceName { | ServiceName::CruciblePantry | ServiceName::BoundaryNtp | ServiceName::InternalNtp - | ServiceName::Maghemite => { + | ServiceName::Maghemite + | ServiceName::Mgd => { format!("_{}._tcp", self.service_kind()) } ServiceName::SledAgent(id) => { @@ -85,6 +91,9 @@ impl ServiceName { ServiceName::Crucible(id) => { format!("_{}._tcp.{}", self.service_kind(), id) } + ServiceName::Scrimlet(location) => { + format!("_{location}._scrimlet._tcp") + } } } diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 3de6dac7c0..323386ba25 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -22,6 +22,7 @@ crucible-agent-client.workspace = true crucible-pantry-client.workspace = true dns-service-client.workspace = true dpd-client.workspace = true +mg-admin-client.workspace = true dropshot.workspace = true fatfs.workspace = true futures.workspace = true diff --git a/nexus/db-macros/src/lookup.rs b/nexus/db-macros/src/lookup.rs index 38cab15e30..f2362f5bc5 100644 --- a/nexus/db-macros/src/lookup.rs +++ b/nexus/db-macros/src/lookup.rs @@ -15,7 +15,7 @@ use std::ops::Deref; // INPUT (arguments to the macro) // -/// Arguments for [`lookup_resource!`] +/// Arguments for [`super::lookup_resource!`] // NOTE: this is only "pub" for the `cargo doc` link on [`lookup_resource!`]. #[derive(serde::Deserialize)] pub struct Input { @@ -167,7 +167,7 @@ impl Resource { // MACRO IMPLEMENTATION // -/// Implementation of [`lookup_resource!`] +/// Implementation of [`super::lookup_resource!`] pub fn lookup_resource( raw_input: TokenStream, ) -> Result { diff --git a/nexus/db-model/src/bgp.rs b/nexus/db-model/src/bgp.rs index 532b9cce36..cd0932beb8 100644 --- a/nexus/db-model/src/bgp.rs +++ b/nexus/db-model/src/bgp.rs @@ -6,8 +6,10 @@ use crate::schema::{bgp_announce_set, bgp_announcement, bgp_config}; use crate::SqlU32; use db_macros::Resource; use ipnetwork::IpNetwork; +use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::api::external; +use omicron_common::api::external::IdentityMetadataCreateParams; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -39,6 +41,22 @@ impl Into for BgpConfig { } } +impl From for BgpConfig { + fn from(c: params::BgpConfigCreate) -> BgpConfig { + BgpConfig { + identity: BgpConfigIdentity::new( + Uuid::new_v4(), + IdentityMetadataCreateParams { + name: c.identity.name.clone(), + description: c.identity.description.clone(), + }, + ), + asn: c.asn.into(), + vrf: c.vrf.map(|x| x.to_string()), + } + } +} + #[derive( Queryable, Insertable, @@ -55,6 +73,20 @@ pub struct BgpAnnounceSet { pub identity: BgpAnnounceSetIdentity, } +impl From for BgpAnnounceSet { + fn from(x: params::BgpAnnounceSetCreate) -> BgpAnnounceSet { + BgpAnnounceSet { + identity: BgpAnnounceSetIdentity::new( + Uuid::new_v4(), + IdentityMetadataCreateParams { + name: x.identity.name.clone(), + description: x.identity.description.clone(), + }, + ), + } + } +} + impl Into for BgpAnnounceSet { fn into(self) -> external::BgpAnnounceSet { external::BgpAnnounceSet { identity: self.identity() } diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 61a05754c6..f16b2f3609 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -188,7 +188,7 @@ table! { } table! { - switch_port_settings_route_config (port_settings_id, interface_name, dst, gw, vid) { + switch_port_settings_route_config (port_settings_id, interface_name, dst, gw) { port_settings_id -> Uuid, interface_name -> Text, dst -> Inet, @@ -1142,7 +1142,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(7, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(8, 0, 0); allow_tables_to_appear_in_same_query!( system_update, diff --git a/nexus/db-model/src/service_kind.rs b/nexus/db-model/src/service_kind.rs index c2598434d5..4210c3ee20 100644 --- a/nexus/db-model/src/service_kind.rs +++ b/nexus/db-model/src/service_kind.rs @@ -30,6 +30,7 @@ impl_enum_type!( Oximeter => b"oximeter" Tfport => b"tfport" Ntp => b"ntp" + Mgd => b"mgd" ); impl TryFrom for ServiceUsingCertificate { @@ -88,6 +89,7 @@ impl From for ServiceKind { | internal_api::params::ServiceKind::InternalNtp => { ServiceKind::Ntp } + internal_api::params::ServiceKind::Mgd => ServiceKind::Mgd, } } } diff --git a/nexus/db-model/src/switch_interface.rs b/nexus/db-model/src/switch_interface.rs index 9ac7e4323a..f0c4b91de6 100644 --- a/nexus/db-model/src/switch_interface.rs +++ b/nexus/db-model/src/switch_interface.rs @@ -64,7 +64,14 @@ impl Into for DbSwitchInterfaceKind { } #[derive( - Queryable, Insertable, Selectable, Clone, Debug, Serialize, Deserialize, + Queryable, + Insertable, + Selectable, + Clone, + Debug, + Serialize, + Deserialize, + AsChangeset, )] #[diesel(table_name = switch_vlan_interface_config)] pub struct SwitchVlanInterfaceConfig { diff --git a/nexus/db-model/src/switch_port.rs b/nexus/db-model/src/switch_port.rs index e9c0697450..8f81883301 100644 --- a/nexus/db-model/src/switch_port.rs +++ b/nexus/db-model/src/switch_port.rs @@ -12,6 +12,7 @@ use crate::schema::{ }; use crate::SqlU16; use db_macros::Resource; +use diesel::AsChangeset; use ipnetwork::IpNetwork; use nexus_types::external_api::params; use nexus_types::identity::Resource; @@ -225,7 +226,14 @@ impl Into for SwitchPortConfig { } #[derive( - Queryable, Insertable, Selectable, Clone, Debug, Serialize, Deserialize, + Queryable, + Insertable, + Selectable, + Clone, + Debug, + Serialize, + Deserialize, + AsChangeset, )] #[diesel(table_name = switch_port_settings_link_config)] pub struct SwitchPortLinkConfig { @@ -263,7 +271,14 @@ impl Into for SwitchPortLinkConfig { } #[derive( - Queryable, Insertable, Selectable, Clone, Debug, Serialize, Deserialize, + Queryable, + Insertable, + Selectable, + Clone, + Debug, + Serialize, + Deserialize, + AsChangeset, )] #[diesel(table_name = lldp_service_config)] pub struct LldpServiceConfig { @@ -321,7 +336,14 @@ impl Into for LldpConfig { } #[derive( - Queryable, Insertable, Selectable, Clone, Debug, Serialize, Deserialize, + Queryable, + Insertable, + Selectable, + Clone, + Debug, + Serialize, + Deserialize, + AsChangeset, )] #[diesel(table_name = switch_port_settings_interface_config)] pub struct SwitchInterfaceConfig { @@ -362,7 +384,14 @@ impl Into for SwitchInterfaceConfig { } #[derive( - Queryable, Insertable, Selectable, Clone, Debug, Serialize, Deserialize, + Queryable, + Insertable, + Selectable, + Clone, + Debug, + Serialize, + Deserialize, + AsChangeset, )] #[diesel(table_name = switch_port_settings_route_config)] pub struct SwitchPortRouteConfig { @@ -398,12 +427,23 @@ impl Into for SwitchPortRouteConfig { } #[derive( - Queryable, Insertable, Selectable, Clone, Debug, Serialize, Deserialize, + Queryable, + Insertable, + Selectable, + Clone, + Debug, + Serialize, + Deserialize, + AsChangeset, )] #[diesel(table_name = switch_port_settings_bgp_peer_config)] pub struct SwitchPortBgpPeerConfig { pub port_settings_id: Uuid, + + //TODO(ry) this should be associated with the BGP configuration + // not an individual peer. pub bgp_announce_set_id: Uuid, + pub bgp_config_id: Uuid, pub interface_name: String, pub addr: IpNetwork, @@ -440,7 +480,14 @@ impl Into for SwitchPortBgpPeerConfig { } #[derive( - Queryable, Insertable, Selectable, Clone, Debug, Serialize, Deserialize, + Queryable, + Insertable, + Selectable, + Clone, + Debug, + Serialize, + Deserialize, + AsChangeset, )] #[diesel(table_name = switch_port_settings_address_config)] pub struct SwitchPortAddressConfig { diff --git a/nexus/db-queries/src/db/datastore/bgp.rs b/nexus/db-queries/src/db/datastore/bgp.rs new file mode 100644 index 0000000000..898545b678 --- /dev/null +++ b/nexus/db-queries/src/db/datastore/bgp.rs @@ -0,0 +1,342 @@ +use super::DataStore; +use crate::context::OpContext; +use crate::db; +use crate::db::error::public_error_from_diesel; +use crate::db::error::ErrorHandler; +use crate::db::error::TransactionError; +use crate::db::model::Name; +use crate::db::model::{BgpAnnounceSet, BgpAnnouncement, BgpConfig}; +use crate::db::pagination::paginated; +use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; +use chrono::Utc; +use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; +use nexus_types::external_api::params; +use nexus_types::identity::Resource; +use omicron_common::api::external::http_pagination::PaginatedBy; +use omicron_common::api::external::{ + CreateResult, DeleteResult, Error, ListResultVec, LookupResult, NameOrId, + ResourceType, +}; +use ref_cast::RefCast; +use uuid::Uuid; + +impl DataStore { + pub async fn bgp_config_set( + &self, + opctx: &OpContext, + config: ¶ms::BgpConfigCreate, + ) -> CreateResult { + use db::schema::bgp_config::dsl; + let pool = self.pool_connection_authorized(opctx).await?; + + let config: BgpConfig = config.clone().into(); + + let result = diesel::insert_into(dsl::bgp_config) + .values(config.clone()) + .returning(BgpConfig::as_returning()) + .get_result_async(&*pool) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::BgpConfig, + &config.id().to_string(), + ), + ) + })?; + + Ok(result) + } + + pub async fn bgp_config_delete( + &self, + opctx: &OpContext, + sel: ¶ms::BgpConfigSelector, + ) -> DeleteResult { + use db::schema::bgp_config; + use db::schema::bgp_config::dsl as bgp_config_dsl; + + use db::schema::switch_port_settings_bgp_peer_config as sps_bgp_peer_config; + use db::schema::switch_port_settings_bgp_peer_config::dsl as sps_bgp_peer_config_dsl; + + #[derive(Debug)] + enum BgpConfigDeleteError { + ConfigInUse, + } + type TxnError = TransactionError; + + let pool = self.pool_connection_authorized(opctx).await?; + pool.transaction_async(|conn| async move { + let name_or_id = sel.name_or_id.clone(); + + let id: Uuid = match name_or_id { + NameOrId::Id(id) => id, + NameOrId::Name(name) => { + bgp_config_dsl::bgp_config + .filter(bgp_config::name.eq(name.to_string())) + .select(bgp_config::id) + .limit(1) + .first_async::(&conn) + .await? + } + }; + + let count = + sps_bgp_peer_config_dsl::switch_port_settings_bgp_peer_config + .filter(sps_bgp_peer_config::bgp_config_id.eq(id)) + .count() + .execute_async(&conn) + .await?; + + if count > 0 { + return Err(TxnError::CustomError( + BgpConfigDeleteError::ConfigInUse, + )); + } + + diesel::update(bgp_config_dsl::bgp_config) + .filter(bgp_config_dsl::id.eq(id)) + .set(bgp_config_dsl::time_deleted.eq(Utc::now())) + .execute_async(&conn) + .await?; + + Ok(()) + }) + .await + .map_err(|e| match e { + TxnError::CustomError(BgpConfigDeleteError::ConfigInUse) => { + Error::invalid_request("BGP config in use") + } + TxnError::Database(e) => { + public_error_from_diesel(e, ErrorHandler::Server) + } + }) + } + + pub async fn bgp_config_get( + &self, + opctx: &OpContext, + name_or_id: &NameOrId, + ) -> LookupResult { + use db::schema::bgp_config; + use db::schema::bgp_config::dsl; + let pool = self.pool_connection_authorized(opctx).await?; + + let name_or_id = name_or_id.clone(); + + let config = match name_or_id { + NameOrId::Name(name) => dsl::bgp_config + .filter(bgp_config::name.eq(name.to_string())) + .select(BgpConfig::as_select()) + .limit(1) + .first_async::(&*pool) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)), + NameOrId::Id(id) => dsl::bgp_config + .filter(bgp_config::id.eq(id)) + .select(BgpConfig::as_select()) + .limit(1) + .first_async::(&*pool) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)), + }?; + + Ok(config) + } + + pub async fn bgp_config_list( + &self, + opctx: &OpContext, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + use db::schema::bgp_config::dsl; + + let pool = self.pool_connection_authorized(opctx).await?; + + match pagparams { + PaginatedBy::Id(pagparams) => { + paginated(dsl::bgp_config, dsl::id, &pagparams) + } + PaginatedBy::Name(pagparams) => paginated( + dsl::bgp_config, + dsl::name, + &pagparams.map_name(|n| Name::ref_cast(n)), + ), + } + .filter(dsl::time_deleted.is_null()) + .select(BgpConfig::as_select()) + .load_async(&*pool) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + pub async fn bgp_announce_list( + &self, + opctx: &OpContext, + sel: ¶ms::BgpAnnounceSetSelector, + ) -> ListResultVec { + use db::schema::{ + bgp_announce_set, bgp_announce_set::dsl as announce_set_dsl, + bgp_announcement::dsl as announce_dsl, + }; + + #[derive(Debug)] + enum BgpAnnounceListError { + AnnounceSetNotFound(Name), + } + type TxnError = TransactionError; + + let pool = self.pool_connection_authorized(opctx).await?; + pool.transaction_async(|conn| async move { + let name_or_id = sel.name_or_id.clone(); + + let announce_id: Uuid = match name_or_id { + NameOrId::Id(id) => id, + NameOrId::Name(name) => announce_set_dsl::bgp_announce_set + .filter(bgp_announce_set::time_deleted.is_null()) + .filter(bgp_announce_set::name.eq(name.to_string())) + .select(bgp_announce_set::id) + .limit(1) + .first_async::(&conn) + .await + .map_err(|_| { + TxnError::CustomError( + BgpAnnounceListError::AnnounceSetNotFound( + Name::from(name.clone()), + ), + ) + })?, + }; + + let result = announce_dsl::bgp_announcement + .filter(announce_dsl::announce_set_id.eq(announce_id)) + .select(BgpAnnouncement::as_select()) + .load_async(&conn) + .await?; + + Ok(result) + }) + .await + .map_err(|e| match e { + TxnError::CustomError( + BgpAnnounceListError::AnnounceSetNotFound(name), + ) => Error::not_found_by_name(ResourceType::BgpAnnounceSet, &name), + TxnError::Database(e) => { + public_error_from_diesel(e, ErrorHandler::Server) + } + }) + } + + pub async fn bgp_create_announce_set( + &self, + opctx: &OpContext, + announce: ¶ms::BgpAnnounceSetCreate, + ) -> CreateResult<(BgpAnnounceSet, Vec)> { + use db::schema::bgp_announce_set::dsl as announce_set_dsl; + use db::schema::bgp_announcement::dsl as bgp_announcement_dsl; + + let pool = self.pool_connection_authorized(opctx).await?; + pool.transaction_async(|conn| async move { + let bas: BgpAnnounceSet = announce.clone().into(); + + let db_as: BgpAnnounceSet = + diesel::insert_into(announce_set_dsl::bgp_announce_set) + .values(bas.clone()) + .returning(BgpAnnounceSet::as_returning()) + .get_result_async::(&conn) + .await?; + + let mut db_annoucements = Vec::new(); + for a in &announce.announcement { + let an = BgpAnnouncement { + announce_set_id: db_as.id(), + address_lot_block_id: bas.identity.id, + network: a.network.into(), + }; + let an = + diesel::insert_into(bgp_announcement_dsl::bgp_announcement) + .values(an.clone()) + .returning(BgpAnnouncement::as_returning()) + .get_result_async::(&conn) + .await?; + db_annoucements.push(an); + } + + Ok((db_as, db_annoucements)) + }) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + pub async fn bgp_delete_announce_set( + &self, + opctx: &OpContext, + sel: ¶ms::BgpAnnounceSetSelector, + ) -> DeleteResult { + use db::schema::bgp_announce_set; + use db::schema::bgp_announce_set::dsl as announce_set_dsl; + use db::schema::bgp_announcement::dsl as bgp_announcement_dsl; + + use db::schema::switch_port_settings_bgp_peer_config as sps_bgp_peer_config; + use db::schema::switch_port_settings_bgp_peer_config::dsl as sps_bgp_peer_config_dsl; + + #[derive(Debug)] + enum BgpAnnounceSetDeleteError { + AnnounceSetInUse, + } + type TxnError = TransactionError; + + let pool = self.pool_connection_authorized(opctx).await?; + let name_or_id = sel.name_or_id.clone(); + + pool.transaction_async(|conn| async move { + let id: Uuid = match name_or_id { + NameOrId::Name(name) => { + announce_set_dsl::bgp_announce_set + .filter(bgp_announce_set::name.eq(name.to_string())) + .select(bgp_announce_set::id) + .limit(1) + .first_async::(&conn) + .await? + } + NameOrId::Id(id) => id, + }; + + let count = + sps_bgp_peer_config_dsl::switch_port_settings_bgp_peer_config + .filter(sps_bgp_peer_config::bgp_announce_set_id.eq(id)) + .count() + .execute_async(&conn) + .await?; + + if count > 0 { + return Err(TxnError::CustomError( + BgpAnnounceSetDeleteError::AnnounceSetInUse, + )); + } + + diesel::update(announce_set_dsl::bgp_announce_set) + .filter(announce_set_dsl::id.eq(id)) + .set(announce_set_dsl::time_deleted.eq(Utc::now())) + .execute_async(&conn) + .await?; + + diesel::delete(bgp_announcement_dsl::bgp_announcement) + .filter(bgp_announcement_dsl::announce_set_id.eq(id)) + .execute_async(&conn) + .await?; + + Ok(()) + }) + .await + .map_err(|e| match e { + TxnError::CustomError( + BgpAnnounceSetDeleteError::AnnounceSetInUse, + ) => Error::invalid_request("BGP announce set in use"), + TxnError::Database(e) => { + public_error_from_diesel(e, ErrorHandler::Server) + } + }) + } +} diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index a77e20647a..109bc3fb27 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -48,6 +48,7 @@ use std::sync::Arc; use uuid::Uuid; mod address_lot; +mod bgp; mod certificate; mod console_session; mod dataset; diff --git a/nexus/db-queries/src/db/datastore/switch_port.rs b/nexus/db-queries/src/db/datastore/switch_port.rs index 45be594be6..bedd8c19d9 100644 --- a/nexus/db-queries/src/db/datastore/switch_port.rs +++ b/nexus/db-queries/src/db/datastore/switch_port.rs @@ -97,25 +97,63 @@ pub struct SwitchPortSettingsGroupCreateResult { } impl DataStore { - // port settings + pub async fn switch_port_settings_exist( + &self, + opctx: &OpContext, + name: Name, + ) -> LookupResult { + use db::schema::switch_port_settings::{ + self, dsl as port_settings_dsl, + }; + + let pool = self.pool_connection_authorized(opctx).await?; + + port_settings_dsl::switch_port_settings + .filter(switch_port_settings::time_deleted.is_null()) + .filter(switch_port_settings::name.eq(name)) + .select(switch_port_settings::id) + .limit(1) + .first_async::(&*pool) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + pub async fn switch_ports_using_settings( + &self, + opctx: &OpContext, + switch_port_settings_id: Uuid, + ) -> LookupResult> { + use db::schema::switch_port::{self, dsl}; + + let pool = self.pool_connection_authorized(opctx).await?; + + dsl::switch_port + .filter(switch_port::port_settings_id.eq(switch_port_settings_id)) + .select((switch_port::id, switch_port::port_name)) + .load_async(&*pool) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } pub async fn switch_port_settings_create( &self, opctx: &OpContext, params: ¶ms::SwitchPortSettingsCreate, ) -> CreateResult { - use db::schema::address_lot::dsl as address_lot_dsl; - use db::schema::bgp_announce_set::dsl as bgp_announce_set_dsl; - use db::schema::bgp_config::dsl as bgp_config_dsl; - use db::schema::lldp_service_config::dsl as lldp_config_dsl; - use db::schema::switch_port_settings::dsl as port_settings_dsl; - use db::schema::switch_port_settings_address_config::dsl as address_config_dsl; - use db::schema::switch_port_settings_bgp_peer_config::dsl as bgp_peer_dsl; - use db::schema::switch_port_settings_interface_config::dsl as interface_config_dsl; - use db::schema::switch_port_settings_link_config::dsl as link_config_dsl; - use db::schema::switch_port_settings_port_config::dsl as port_config_dsl; - use db::schema::switch_port_settings_route_config::dsl as route_config_dsl; - use db::schema::switch_vlan_interface_config::dsl as vlan_config_dsl; + use db::schema::{ + address_lot::dsl as address_lot_dsl, + bgp_announce_set::dsl as bgp_announce_set_dsl, + bgp_config::dsl as bgp_config_dsl, + lldp_service_config::dsl as lldp_config_dsl, + switch_port_settings::dsl as port_settings_dsl, + switch_port_settings_address_config::dsl as address_config_dsl, + switch_port_settings_bgp_peer_config::dsl as bgp_peer_dsl, + switch_port_settings_interface_config::dsl as interface_config_dsl, + switch_port_settings_link_config::dsl as link_config_dsl, + switch_port_settings_port_config::dsl as port_config_dsl, + switch_port_settings_route_config::dsl as route_config_dsl, + switch_vlan_interface_config::dsl as vlan_config_dsl, + }; #[derive(Debug)] enum SwitchPortSettingsCreateError { @@ -125,13 +163,13 @@ impl DataStore { ReserveBlock(ReserveBlockError), } type TxnError = TransactionError; + type SpsCreateError = SwitchPortSettingsCreateError; let conn = self.pool_connection_authorized(opctx).await?; // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage conn.transaction_async(|conn| async move { - // create the top level port settings object let port_settings = SwitchPortSettings::new(¶ms.identity); let db_port_settings: SwitchPortSettings = @@ -260,13 +298,6 @@ impl DataStore { let mut bgp_peer_config = Vec::new(); for (interface_name, p) in ¶ms.bgp_peers { - - // add the bgp peer - // TODO this requires pluming in the API to create - // - bgp configs - // - announce sets - // - announcements - use db::schema::bgp_announce_set; let announce_set_id = match &p.bgp_announce_set { NameOrId::Id(id) => *id, @@ -389,16 +420,13 @@ impl DataStore { }) .await .map_err(|e| match e { - TxnError::CustomError( - SwitchPortSettingsCreateError::BgpAnnounceSetNotFound) => { + TxnError::CustomError(SpsCreateError::BgpAnnounceSetNotFound) => { Error::invalid_request("BGP announce set not found") } - TxnError::CustomError( - SwitchPortSettingsCreateError::AddressLotNotFound) => { + TxnError::CustomError(SpsCreateError::AddressLotNotFound) => { Error::invalid_request("AddressLot not found") } - TxnError::CustomError( - SwitchPortSettingsCreateError::BgpConfigNotFound) => { + TxnError::CustomError(SpsCreateError::BgpConfigNotFound) => { Error::invalid_request("BGP config not found") } TxnError::CustomError( @@ -475,30 +503,31 @@ impl DataStore { .await?; // delete the port config object - use db::schema::switch_port_settings_port_config; - use db::schema::switch_port_settings_port_config::dsl as port_config_dsl; + use db::schema::switch_port_settings_port_config::{ + self as sps_port_config, dsl as port_config_dsl, + }; diesel::delete(port_config_dsl::switch_port_settings_port_config) - .filter(switch_port_settings_port_config::port_settings_id.eq(id)) + .filter(sps_port_config::port_settings_id.eq(id)) .execute_async(&conn) .await?; // delete the link configs - use db::schema::switch_port_settings_link_config; - use db::schema::switch_port_settings_link_config::dsl as link_config_dsl; + use db::schema::switch_port_settings_link_config::{ + self as sps_link_config, dsl as link_config_dsl, + }; let links: Vec = diesel::delete( link_config_dsl::switch_port_settings_link_config ) .filter( - switch_port_settings_link_config::port_settings_id.eq(id) + sps_link_config::port_settings_id.eq(id) ) .returning(SwitchPortLinkConfig::as_returning()) .get_results_async(&conn) .await?; // delete lldp configs - use db::schema::lldp_service_config; - use db::schema::lldp_service_config::dsl as lldp_config_dsl; + use db::schema::lldp_service_config::{self, dsl as lldp_config_dsl}; let lldp_svc_ids: Vec = links .iter() .map(|link| link.lldp_service_config_id) @@ -509,26 +538,25 @@ impl DataStore { .await?; // delete interface configs - use db::schema::switch_port_settings_interface_config; - use db::schema::switch_port_settings_interface_config::dsl - as interface_config_dsl; + use db::schema::switch_port_settings_interface_config::{ + self as sps_interface_config, dsl as interface_config_dsl, + }; let interfaces: Vec = diesel::delete( interface_config_dsl::switch_port_settings_interface_config ) .filter( - switch_port_settings_interface_config::port_settings_id.eq( - id - ) + sps_interface_config::port_settings_id.eq(id) ) .returning(SwitchInterfaceConfig::as_returning()) .get_results_async(&conn) .await?; // delete any vlan interfaces - use db::schema::switch_vlan_interface_config; - use db::schema::switch_vlan_interface_config::dsl as vlan_config_dsl; + use db::schema::switch_vlan_interface_config::{ + self, dsl as vlan_config_dsl, + }; let interface_ids: Vec = interfaces .iter() .map(|interface| interface.id) @@ -566,22 +594,26 @@ impl DataStore { .await?; // delete address configs - use db::schema::switch_port_settings_address_config as address_config; - use db::schema::switch_port_settings_address_config::dsl - as address_config_dsl; + use db::schema::switch_port_settings_address_config::{ + self as address_config, dsl as address_config_dsl, + }; - let ps = diesel::delete(address_config_dsl::switch_port_settings_address_config) - .filter(address_config::port_settings_id.eq(id)) - .returning(SwitchPortAddressConfig::as_returning()) - .get_result_async(&conn) - .await?; + let port_settings_addrs = diesel::delete( + address_config_dsl::switch_port_settings_address_config, + ) + .filter(address_config::port_settings_id.eq(id)) + .returning(SwitchPortAddressConfig::as_returning()) + .get_results_async(&conn) + .await?; use db::schema::address_lot_rsvd_block::dsl as rsvd_block_dsl; - diesel::delete(rsvd_block_dsl::address_lot_rsvd_block) - .filter(rsvd_block_dsl::id.eq(ps.rsvd_address_lot_block_id)) - .execute_async(&conn) - .await?; + for ps in &port_settings_addrs { + diesel::delete(rsvd_block_dsl::address_lot_rsvd_block) + .filter(rsvd_block_dsl::id.eq(ps.rsvd_address_lot_block_id)) + .execute_async(&conn) + .await?; + } Ok(()) }) @@ -650,10 +682,10 @@ impl DataStore { // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage conn.transaction_async(|conn| async move { - // get the top level port settings object - use db::schema::switch_port_settings::dsl as port_settings_dsl; - use db::schema::switch_port_settings; + use db::schema::switch_port_settings::{ + self, dsl as port_settings_dsl, + }; let id = match name_or_id { NameOrId::Id(id) => *id, @@ -668,23 +700,27 @@ impl DataStore { .await .map_err(|_| { TxnError::CustomError( - SwitchPortSettingsGetError::NotFound(name.clone()) + SwitchPortSettingsGetError::NotFound( + name.clone(), + ), ) })? } }; - let settings: SwitchPortSettings = port_settings_dsl::switch_port_settings - .filter(switch_port_settings::time_deleted.is_null()) - .filter(switch_port_settings::id.eq(id)) - .select(SwitchPortSettings::as_select()) - .limit(1) - .first_async::(&conn) - .await?; + let settings: SwitchPortSettings = + port_settings_dsl::switch_port_settings + .filter(switch_port_settings::time_deleted.is_null()) + .filter(switch_port_settings::id.eq(id)) + .select(SwitchPortSettings::as_select()) + .limit(1) + .first_async::(&conn) + .await?; // get the port config - use db::schema::switch_port_settings_port_config::dsl as port_config_dsl; - use db::schema::switch_port_settings_port_config as port_config; + use db::schema::switch_port_settings_port_config::{ + self as port_config, dsl as port_config_dsl, + }; let port: SwitchPortConfig = port_config_dsl::switch_port_settings_port_config .filter(port_config::port_settings_id.eq(id)) @@ -694,11 +730,13 @@ impl DataStore { .await?; // initialize result - let mut result = SwitchPortSettingsCombinedResult::new(settings, port); + let mut result = + SwitchPortSettingsCombinedResult::new(settings, port); // get the link configs - use db::schema::switch_port_settings_link_config::dsl as link_config_dsl; - use db::schema::switch_port_settings_link_config as link_config; + use db::schema::switch_port_settings_link_config::{ + self as link_config, dsl as link_config_dsl, + }; result.links = link_config_dsl::switch_port_settings_link_config .filter(link_config::port_settings_id.eq(id)) @@ -706,25 +744,25 @@ impl DataStore { .load_async::(&conn) .await?; - let lldp_svc_ids: Vec = result.links + let lldp_svc_ids: Vec = result + .links .iter() .map(|link| link.lldp_service_config_id) .collect(); - use db::schema::lldp_service_config::dsl as lldp_dsl; use db::schema::lldp_service_config as lldp_config; - result.link_lldp = - lldp_dsl::lldp_service_config - .filter(lldp_config::id.eq_any(lldp_svc_ids)) - .select(LldpServiceConfig::as_select()) - .limit(1) - .load_async::(&conn) - .await?; + use db::schema::lldp_service_config::dsl as lldp_dsl; + result.link_lldp = lldp_dsl::lldp_service_config + .filter(lldp_config::id.eq_any(lldp_svc_ids)) + .select(LldpServiceConfig::as_select()) + .limit(1) + .load_async::(&conn) + .await?; // get the interface configs - use db::schema::switch_port_settings_interface_config::dsl - as interface_config_dsl; - use db::schema::switch_port_settings_interface_config as interface_config; + use db::schema::switch_port_settings_interface_config::{ + self as interface_config, dsl as interface_config_dsl, + }; result.interfaces = interface_config_dsl::switch_port_settings_interface_config @@ -733,37 +771,35 @@ impl DataStore { .load_async::(&conn) .await?; - use db::schema::switch_vlan_interface_config::dsl as vlan_dsl; use db::schema::switch_vlan_interface_config as vlan_config; - let interface_ids: Vec = result.interfaces + use db::schema::switch_vlan_interface_config::dsl as vlan_dsl; + let interface_ids: Vec = result + .interfaces .iter() .map(|interface| interface.id) .collect(); - result.vlan_interfaces = - vlan_dsl::switch_vlan_interface_config - .filter( - vlan_config::interface_config_id.eq_any(interface_ids) - ) + result.vlan_interfaces = vlan_dsl::switch_vlan_interface_config + .filter(vlan_config::interface_config_id.eq_any(interface_ids)) .select(SwitchVlanInterfaceConfig::as_select()) .load_async::(&conn) .await?; - // get the route configs - use db::schema::switch_port_settings_route_config::dsl as route_config_dsl; - use db::schema::switch_port_settings_route_config as route_config; + use db::schema::switch_port_settings_route_config::{ + self as route_config, dsl as route_config_dsl, + }; - result.routes = - route_config_dsl::switch_port_settings_route_config - .filter(route_config::port_settings_id.eq(id)) - .select(SwitchPortRouteConfig::as_select()) - .load_async::(&conn) - .await?; + result.routes = route_config_dsl::switch_port_settings_route_config + .filter(route_config::port_settings_id.eq(id)) + .select(SwitchPortRouteConfig::as_select()) + .load_async::(&conn) + .await?; // get the bgp peer configs - use db::schema::switch_port_settings_bgp_peer_config::dsl as bgp_peer_dsl; - use db::schema::switch_port_settings_bgp_peer_config as bgp_peer; + use db::schema::switch_port_settings_bgp_peer_config::{ + self as bgp_peer, dsl as bgp_peer_dsl, + }; result.bgp_peers = bgp_peer_dsl::switch_port_settings_bgp_peer_config @@ -773,9 +809,9 @@ impl DataStore { .await?; // get the address configs - use db::schema::switch_port_settings_address_config::dsl - as address_config_dsl; - use db::schema::switch_port_settings_address_config as address_config; + use db::schema::switch_port_settings_address_config::{ + self as address_config, dsl as address_config_dsl, + }; result.addresses = address_config_dsl::switch_port_settings_address_config @@ -785,14 +821,15 @@ impl DataStore { .await?; Ok(result) - }) .await .map_err(|e| match e { - TxnError::CustomError( - SwitchPortSettingsGetError::NotFound(name)) => { - Error::not_found_by_name(ResourceType::SwitchPortSettings, &name) - } + TxnError::CustomError(SwitchPortSettingsGetError::NotFound( + name, + )) => Error::not_found_by_name( + ResourceType::SwitchPortSettings, + &name, + ), TxnError::Database(e) => match e { DieselError::DatabaseError(_, _) => { let name = name_or_id.to_string(); @@ -803,7 +840,7 @@ impl DataStore { &name, ), ) - }, + } _ => public_error_from_diesel(e, ErrorHandler::Server), }, }) @@ -1083,8 +1120,10 @@ impl DataStore { &self, opctx: &OpContext, ) -> ListResultVec { - use db::schema::switch_port::dsl as switch_port_dsl; - use db::schema::switch_port_settings_route_config::dsl as route_config_dsl; + use db::schema::{ + switch_port::dsl as switch_port_dsl, + switch_port_settings_route_config::dsl as route_config_dsl, + }; switch_port_dsl::switch_port .filter(switch_port_dsl::port_settings_id.is_not_null()) diff --git a/nexus/src/app/bgp.rs b/nexus/src/app/bgp.rs new file mode 100644 index 0000000000..e800d72bdd --- /dev/null +++ b/nexus/src/app/bgp.rs @@ -0,0 +1,162 @@ +use crate::app::authz; +use crate::external_api::params; +use nexus_db_model::{BgpAnnounceSet, BgpAnnouncement, BgpConfig}; +use nexus_db_queries::context::OpContext; +use omicron_common::api::external::http_pagination::PaginatedBy; +use omicron_common::api::external::{ + BgpImportedRouteIpv4, BgpPeerStatus, CreateResult, DeleteResult, Ipv4Net, + ListResultVec, LookupResult, NameOrId, +}; + +impl super::Nexus { + pub async fn bgp_config_set( + &self, + opctx: &OpContext, + config: ¶ms::BgpConfigCreate, + ) -> CreateResult { + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + let result = self.db_datastore.bgp_config_set(opctx, config).await?; + Ok(result) + } + + pub async fn bgp_config_get( + &self, + opctx: &OpContext, + name_or_id: NameOrId, + ) -> LookupResult { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + self.db_datastore.bgp_config_get(opctx, &name_or_id).await + } + + pub async fn bgp_config_list( + &self, + opctx: &OpContext, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + self.db_datastore.bgp_config_list(opctx, pagparams).await + } + + pub async fn bgp_config_delete( + &self, + opctx: &OpContext, + sel: ¶ms::BgpConfigSelector, + ) -> DeleteResult { + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + let result = self.db_datastore.bgp_config_delete(opctx, sel).await?; + Ok(result) + } + + pub async fn bgp_create_announce_set( + &self, + opctx: &OpContext, + announce: ¶ms::BgpAnnounceSetCreate, + ) -> CreateResult<(BgpAnnounceSet, Vec)> { + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + let result = + self.db_datastore.bgp_create_announce_set(opctx, announce).await?; + Ok(result) + } + + pub async fn bgp_announce_list( + &self, + opctx: &OpContext, + sel: ¶ms::BgpAnnounceSetSelector, + ) -> ListResultVec { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + self.db_datastore.bgp_announce_list(opctx, sel).await + } + + pub async fn bgp_delete_announce_set( + &self, + opctx: &OpContext, + sel: ¶ms::BgpAnnounceSetSelector, + ) -> DeleteResult { + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + let result = + self.db_datastore.bgp_delete_announce_set(opctx, sel).await?; + Ok(result) + } + + pub async fn bgp_peer_status( + &self, + opctx: &OpContext, + ) -> ListResultVec { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + let mut result = Vec::new(); + for (switch, client) in &self.mg_clients { + let router_info = match client.inner.get_routers().await { + Ok(result) => result.into_inner(), + Err(e) => { + error!( + self.log, + "failed to get routers from {switch}: {e}" + ); + continue; + } + }; + + for r in &router_info { + for (addr, info) in &r.peers { + let Ok(addr) = addr.parse() else { + continue; + }; + result.push(BgpPeerStatus { + switch: *switch, + addr, + local_asn: r.asn, + remote_asn: info.asn.unwrap_or(0), + state: info.state.into(), + state_duration_millis: info.duration_millis, + }); + } + } + } + Ok(result) + } + + pub async fn bgp_imported_routes_ipv4( + &self, + opctx: &OpContext, + sel: ¶ms::BgpRouteSelector, + ) -> ListResultVec { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + let mut result = Vec::new(); + for (switch, client) in &self.mg_clients { + let imported: Vec = match client + .inner + .get_imported4(&mg_admin_client::types::GetImported4Request { + asn: sel.asn, + }) + .await + { + Ok(result) => result + .into_inner() + .into_iter() + .map(|x| BgpImportedRouteIpv4 { + switch: *switch, + prefix: Ipv4Net( + ipnetwork::Ipv4Network::new( + x.prefix.value, + x.prefix.length, + ) + .unwrap(), + ), + nexthop: x.nexthop, + id: x.id, + }) + .collect(), + Err(e) => { + error!( + self.log, + "failed to get BGP imported from {switch}: {e}" + ); + continue; + } + }; + + result.extend_from_slice(&imported); + } + Ok(result) + } +} diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 354df0ead3..ee13a7deae 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -20,6 +20,7 @@ use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; use omicron_common::address::DENDRITE_PORT; +use omicron_common::address::MGD_PORT; use omicron_common::address::MGS_PORT; use omicron_common::api::external::Error; use omicron_common::api::internal::shared::SwitchLocation; @@ -34,6 +35,7 @@ use uuid::Uuid; // by resource. mod address_lot; pub(crate) mod background; +mod bgp; mod certificate; mod device_auth; mod disk; @@ -152,6 +154,9 @@ pub struct Nexus { /// Mapping of SwitchLocations to their respective Dendrite Clients dpd_clients: HashMap>, + /// Map switch location to maghemite admin clients. + mg_clients: HashMap>, + /// Background tasks background_tasks: background::BackgroundTasks, @@ -206,7 +211,13 @@ impl Nexus { let mut dpd_clients: HashMap> = HashMap::new(); - // Currently static dpd configuration mappings are still required for testing + let mut mg_clients: HashMap< + SwitchLocation, + Arc, + > = HashMap::new(); + + // Currently static dpd configuration mappings are still required for + // testing for (location, config) in &config.pkg.dendrite { let address = config.address.ip().to_string(); let port = config.address.port(); @@ -216,6 +227,11 @@ impl Nexus { ); dpd_clients.insert(*location, Arc::new(dpd_client)); } + for (location, config) in &config.pkg.mgd { + let mg_client = mg_admin_client::Client::new(&log, config.address) + .map_err(|e| format!("mg admin client: {e}"))?; + mg_clients.insert(*location, Arc::new(mg_client)); + } if config.pkg.dendrite.is_empty() { loop { let result = resolver @@ -249,6 +265,38 @@ impl Nexus { } } } + if config.pkg.mgd.is_empty() { + loop { + let result = resolver + .lookup_all_ipv6(ServiceName::Mgd) + .await + .map_err(|e| format!("Cannot lookup mgd addresses: {e}")); + match result { + Ok(addrs) => { + let mappings = map_switch_zone_addrs( + &log.new(o!("component" => "Nexus")), + addrs, + ) + .await; + for (location, addr) in &mappings { + let port = MGD_PORT; + let mgd_client = mg_admin_client::Client::new( + &log, + std::net::SocketAddr::new((*addr).into(), port), + ) + .map_err(|e| format!("mg admin client: {e}"))?; + mg_clients.insert(*location, Arc::new(mgd_client)); + } + break; + } + Err(e) => { + warn!(log, "Failed to lookup mgd address: {e}"); + tokio::time::sleep(std::time::Duration::from_secs(1)) + .await; + } + } + } + } // Connect to clickhouse - but do so lazily. // Clickhouse may not be executing when Nexus starts. @@ -328,6 +376,7 @@ impl Nexus { internal_resolver: resolver, external_resolver, dpd_clients, + mg_clients, background_tasks, default_region_allocation_strategy: config .pkg diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 3ac4b9063d..3faae7f065 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -33,8 +33,6 @@ use omicron_common::api::external::AddressLotKind; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadataCreateParams; -use omicron_common::api::external::IpNet; -use omicron_common::api::external::Ipv4Net; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::Name; @@ -380,7 +378,7 @@ impl super::Nexus { })?; for (idx, uplink_config) in - rack_network_config.uplinks.iter().enumerate() + rack_network_config.ports.iter().enumerate() { let switch = uplink_config.switch.to_string(); let switch_location = Name::from_str(&switch).map_err(|e| { @@ -449,31 +447,32 @@ impl super::Nexus { addresses: HashMap::new(), }; - let uplink_address = - IpNet::V4(Ipv4Net(uplink_config.uplink_cidr)); - let address = Address { - address_lot: NameOrId::Name(address_lot_name.clone()), - address: uplink_address, - }; - port_settings_params.addresses.insert( - "phy0".to_string(), - AddressConfig { addresses: vec![address] }, - ); - - let dst = IpNet::from_str("0.0.0.0/0").map_err(|e| { - Error::internal_error(&format!( - "failed to parse provided default route CIDR: {e}" - )) - })?; - - let gw = IpAddr::V4(uplink_config.gateway_ip); - let vid = uplink_config.uplink_vid; - let route = Route { dst, gw, vid }; - - port_settings_params.routes.insert( - "phy0".to_string(), - RouteConfig { routes: vec![route] }, - ); + let addresses: Vec
= uplink_config + .addresses + .iter() + .map(|a| Address { + address_lot: NameOrId::Name(address_lot_name.clone()), + address: (*a).into(), + }) + .collect(); + + port_settings_params + .addresses + .insert("phy0".to_string(), AddressConfig { addresses }); + + let routes: Vec = uplink_config + .routes + .iter() + .map(|r| Route { + dst: r.destination.into(), + gw: r.nexthop, + vid: None, + }) + .collect(); + + port_settings_params + .routes + .insert("phy0".to_string(), RouteConfig { routes }); match self .db_datastore @@ -498,9 +497,7 @@ impl super::Nexus { opctx, rack_id, switch_location.into(), - Name::from_str(&uplink_config.uplink_port) - .unwrap() - .into(), + Name::from_str(&uplink_config.port).unwrap().into(), ) .await?; diff --git a/nexus/src/app/sagas/mod.rs b/nexus/src/app/sagas/mod.rs index 88778e3573..f45da89637 100644 --- a/nexus/src/app/sagas/mod.rs +++ b/nexus/src/app/sagas/mod.rs @@ -35,7 +35,6 @@ pub mod snapshot_create; pub mod snapshot_delete; pub mod switch_port_settings_apply; pub mod switch_port_settings_clear; -pub mod switch_port_settings_update; pub mod test_saga; pub mod volume_delete; pub mod volume_remove_rop; diff --git a/nexus/src/app/sagas/switch_port_settings_apply.rs b/nexus/src/app/sagas/switch_port_settings_apply.rs index 687613f0cc..da596b17ce 100644 --- a/nexus/src/app/sagas/switch_port_settings_apply.rs +++ b/nexus/src/app/sagas/switch_port_settings_apply.rs @@ -7,6 +7,7 @@ use crate::app::sagas::retry_until_known_result; use crate::app::sagas::{ declare_saga_actions, ActionRegistry, NexusSaga, SagaInitError, }; +use crate::Nexus; use anyhow::Error; use db::datastore::SwitchPortSettingsCombinedResult; use dpd_client::types::{ @@ -14,14 +15,29 @@ use dpd_client::types::{ RouteSettingsV4, RouteSettingsV6, }; use dpd_client::{Ipv4Cidr, Ipv6Cidr}; +use internal_dns::ServiceName; use ipnetwork::IpNetwork; +use mg_admin_client::types::Prefix4; +use mg_admin_client::types::{ApplyRequest, BgpPeerConfig, BgpRoute}; +use nexus_db_queries::context::OpContext; use nexus_db_queries::db::datastore::UpdatePrecondition; use nexus_db_queries::{authn, db}; +use nexus_types::external_api::params; use omicron_common::api::external::{self, NameOrId}; -use omicron_common::api::internal::shared::SwitchLocation; +use omicron_common::api::internal::shared::{ + ParseSwitchLocationError, PortFec as OmicronPortFec, + PortSpeed as OmicronPortSpeed, SwitchLocation, +}; use serde::{Deserialize, Serialize}; +use sled_agent_client::types::PortConfigV1; +use sled_agent_client::types::RouteConfig; +use sled_agent_client::types::{BgpConfig, EarlyNetworkConfig}; +use sled_agent_client::types::{ + BgpPeerConfig as OmicronBgpPeerConfig, HostPortConfig, +}; use std::collections::HashMap; use std::net::IpAddr; +use std::net::SocketAddrV6; use std::str::FromStr; use std::sync::Arc; use steno::ActionError; @@ -52,6 +68,18 @@ declare_saga_actions! { + spa_ensure_switch_port_settings - spa_undo_ensure_switch_port_settings } + ENSURE_SWITCH_PORT_UPLINK -> "ensure_switch_port_uplink" { + + spa_ensure_switch_port_uplink + - spa_undo_ensure_switch_port_uplink + } + ENSURE_SWITCH_PORT_BGP_SETTINGS -> "ensure_switch_port_bgp_settings" { + + spa_ensure_switch_port_bgp_settings + - spa_undo_ensure_switch_port_bgp_settings + } + ENSURE_SWITCH_PORT_BOOTSTORE_NETWORK_SETTINGS -> "ensure_switch_port_bootstore_network_settings" { + + spa_ensure_switch_port_bootstore_network_settings + - spa_undo_ensure_switch_port_bootstore_network_settings + } } // switch port settings apply saga: definition @@ -74,6 +102,8 @@ impl NexusSaga for SagaSwitchPortSettingsApply { builder.append(associate_switch_port_action()); builder.append(get_switch_port_settings_action()); builder.append(ensure_switch_port_settings_action()); + builder.append(ensure_switch_port_uplink_action()); + builder.append(ensure_switch_port_bgp_settings_action()); Ok(builder.build()?) } } @@ -91,10 +121,10 @@ async fn spa_associate_switch_port( ); // first get the current association so we fall back to this on failure - let port = nexus - .get_switch_port(&opctx, params.switch_port_id) - .await - .map_err(ActionError::action_failed)?; + let port = + nexus.get_switch_port(&opctx, params.switch_port_id).await.map_err( + |e| ActionError::action_failed(format!("get switch port: {e}")), + )?; // update the switch port settings association nexus @@ -105,7 +135,11 @@ async fn spa_associate_switch_port( UpdatePrecondition::DontCare, ) .await - .map_err(ActionError::action_failed)?; + .map_err(|e| { + ActionError::action_failed(format!( + "set switch port settings id {e}" + )) + })?; Ok(port.port_settings_id) } @@ -127,7 +161,9 @@ async fn spa_get_switch_port_settings( &NameOrId::Id(params.switch_port_settings_id), ) .await - .map_err(ActionError::action_failed)?; + .map_err(|e| { + ActionError::action_failed(format!("get switch port settings: {e}")) + })?; Ok(port_settings) } @@ -214,20 +250,28 @@ async fn spa_ensure_switch_port_settings( let settings = sagactx .lookup::("switch_port_settings")?; - let port_id: PortId = PortId::from_str(¶ms.switch_port_name) - .map_err(|e| ActionError::action_failed(e.to_string()))?; + let port_id: PortId = + PortId::from_str(¶ms.switch_port_name).map_err(|e| { + ActionError::action_failed(format!("parse port id: {e}")) + })?; let dpd_client: Arc = select_dendrite_client(&sagactx).await?; - let dpd_port_settings = api_to_dpd_port_settings(&settings) - .map_err(ActionError::action_failed)?; + let dpd_port_settings = + api_to_dpd_port_settings(&settings).map_err(|e| { + ActionError::action_failed(format!( + "translate api port settings to dpd port settings: {e}", + )) + })?; retry_until_known_result(log, || async { dpd_client.port_settings_apply(&port_id, &dpd_port_settings).await }) .await - .map_err(|e| ActionError::action_failed(e.to_string()))?; + .map_err(|e| { + ActionError::action_failed(format!("dpd port settings apply {e}")) + })?; Ok(()) } @@ -270,10 +314,16 @@ async fn spa_undo_ensure_switch_port_settings( let settings = nexus .switch_port_settings_get(&opctx, &NameOrId::Id(id)) .await - .map_err(ActionError::action_failed)?; + .map_err(|e| { + ActionError::action_failed(format!("switch port settings get: {e}")) + })?; - let dpd_port_settings = api_to_dpd_port_settings(&settings) - .map_err(ActionError::action_failed)?; + let dpd_port_settings = + api_to_dpd_port_settings(&settings).map_err(|e| { + ActionError::action_failed(format!( + "translate api to dpd port settings {e}" + )) + })?; retry_until_known_result(log, || async { dpd_client.port_settings_apply(&port_id, &dpd_port_settings).await @@ -284,6 +334,341 @@ async fn spa_undo_ensure_switch_port_settings( Ok(()) } +async fn spa_ensure_switch_port_bgp_settings( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let settings = sagactx + .lookup::("switch_port_settings") + .map_err(|e| { + ActionError::action_failed(format!( + "lookup switch port settings: {e}" + )) + })?; + + ensure_switch_port_bgp_settings(sagactx, settings).await +} + +pub(crate) async fn ensure_switch_port_bgp_settings( + sagactx: NexusActionContext, + settings: SwitchPortSettingsCombinedResult, +) -> Result<(), ActionError> { + let osagactx = sagactx.user_data(); + let nexus = osagactx.nexus(); + let params = sagactx.saga_params::()?; + + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + let mg_client: Arc = + select_mg_client(&sagactx).await.map_err(|e| { + ActionError::action_failed(format!("select mg client: {e}")) + })?; + + let mut bgp_peer_configs = Vec::new(); + + for peer in settings.bgp_peers { + let config = nexus + .bgp_config_get(&opctx, peer.bgp_config_id.into()) + .await + .map_err(|e| { + ActionError::action_failed(format!("get bgp config: {e}")) + })?; + + let announcements = nexus + .bgp_announce_list( + &opctx, + ¶ms::BgpAnnounceSetSelector { + name_or_id: NameOrId::Id(peer.bgp_announce_set_id), + }, + ) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "get bgp announcements: {e}" + )) + })?; + + // TODO picking the first configured address by default, but this needs + // to be something that can be specified in the API. + let nexthop = match settings.addresses.get(0) { + Some(switch_port_addr) => Ok(switch_port_addr.address.ip()), + None => Err(ActionError::action_failed( + "at least one address required for bgp peering".to_string(), + )), + }?; + + let nexthop = match nexthop { + IpAddr::V4(nexthop) => Ok(nexthop), + IpAddr::V6(_) => Err(ActionError::action_failed( + "IPv6 nexthop not yet supported".to_string(), + )), + }?; + + let mut prefixes = Vec::new(); + for a in &announcements { + let value = match a.network.ip() { + IpAddr::V4(value) => Ok(value), + IpAddr::V6(_) => Err(ActionError::action_failed( + "IPv6 announcement not yet supported".to_string(), + )), + }?; + prefixes.push(Prefix4 { value, length: a.network.prefix() }); + } + + let bpc = BgpPeerConfig { + asn: *config.asn, + name: format!("{}", peer.addr.ip()), //TODO(ry)(user defined name) + host: format!("{}:179", peer.addr.ip()), + hold_time: 6, //TODO(ry)(hardocde) + idle_hold_time: 6, //TODO(ry)(hardocde) + delay_open: 0, //TODO(ry)(hardocde) + connect_retry: 0, //TODO(ry)(hardcode) + keepalive: 3, //TODO(ry)(hardcode) + resolution: 100, //TODO(ry)(hardcode) + routes: vec![BgpRoute { nexthop, prefixes }], + }; + + bgp_peer_configs.push(bpc); + } + + mg_client + .inner + .bgp_apply(&ApplyRequest { + peer_group: params.switch_port_name.clone(), + peers: bgp_peer_configs, + }) + .await + .map_err(|e| { + ActionError::action_failed(format!("apply bgp settings: {e}")) + })?; + + Ok(()) +} +async fn spa_undo_ensure_switch_port_bgp_settings( + sagactx: NexusActionContext, +) -> Result<(), Error> { + use mg_admin_client::types::DeleteNeighborRequest; + + let osagactx = sagactx.user_data(); + let nexus = osagactx.nexus(); + let params = sagactx.saga_params::()?; + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let settings = sagactx + .lookup::("switch_port_settings") + .map_err(|e| { + ActionError::action_failed(format!( + "lookup switch port settings (bgp undo): {e}" + )) + })?; + + let mg_client: Arc = + select_mg_client(&sagactx).await.map_err(|e| { + ActionError::action_failed(format!("select mg client (undo): {e}")) + })?; + + for peer in settings.bgp_peers { + let config = nexus + .bgp_config_get(&opctx, peer.bgp_config_id.into()) + .await + .map_err(|e| { + ActionError::action_failed(format!("delete bgp config: {e}")) + })?; + + mg_client + .inner + .delete_neighbor(&DeleteNeighborRequest { + asn: *config.asn, + addr: peer.addr.ip(), + }) + .await + .map_err(|e| { + ActionError::action_failed(format!("delete neighbor: {e}")) + })?; + } + + Ok(()) +} + +async fn spa_ensure_switch_port_bootstore_network_settings( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let osagactx = sagactx.user_data(); + let nexus = osagactx.nexus(); + let params = sagactx.saga_params::()?; + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let settings = sagactx + .lookup::("switch_port_settings") + .map_err(|e| { + ActionError::action_failed(format!( + "lookup switch port settings (bgp undo): {e}" + )) + })?; + + // Just choosing the sled agent associated with switch0 for no reason. + let sa = switch_sled_agent(SwitchLocation::Switch0, &sagactx).await?; + + // Read the current bootstore config, perform the update and write it back. + let mut config = read_bootstore_config(&sa).await?; + let update = bootstore_update( + &nexus, + &opctx, + params.switch_port_id, + ¶ms.switch_port_name, + &settings, + ) + .await?; + apply_bootstore_update(&mut config, &update)?; + write_bootstore_config(&sa, &config).await?; + + Ok(()) +} + +async fn spa_undo_ensure_switch_port_bootstore_network_settings( + sagactx: NexusActionContext, +) -> Result<(), Error> { + // The overall saga update failed but the bootstore udpate succeeded. + // Between now and then other updates may have happened which prevent us + // from simply undoing the changes we did before, as we may inadvertently + // roll back changes at the intersection of this failed update and other + // succesful updates. The only thing we can really do here is attempt a + // complete update of the bootstore network settings based on the current + // state in the Nexus databse which, we assume to be consistent at any point + // in time. + + let nexus = sagactx.user_data().nexus(); + let params = sagactx.saga_params::()?; + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + // Just choosing the sled agent associated with switch0 for no reason. + let sa = switch_sled_agent(SwitchLocation::Switch0, &sagactx).await?; + + // Read the current bootstore network config. + let bs_config = read_bootstore_config(&sa).await?; + + // Compute the total network config from the nexus database. + let mut nexus_config = nexus + .compute_bootstore_network_config(&opctx, &bs_config) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "read nexus bootstore network config: {e}" + )) + })?; + + // Set the correct generation number and send the update. + nexus_config.generation = bs_config.generation; + write_bootstore_config(&sa, &nexus_config).await?; + + Ok(()) +} + +async fn spa_ensure_switch_port_uplink( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + ensure_switch_port_uplink(sagactx, false, None).await +} + +async fn spa_undo_ensure_switch_port_uplink( + sagactx: NexusActionContext, +) -> Result<(), Error> { + Ok(ensure_switch_port_uplink(sagactx, true, None).await?) +} + +pub(crate) async fn ensure_switch_port_uplink( + sagactx: NexusActionContext, + skip_self: bool, + inject: Option, +) -> Result<(), ActionError> { + let params = sagactx.saga_params::()?; + + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + let osagactx = sagactx.user_data(); + let nexus = osagactx.nexus(); + + let switch_port = nexus + .get_switch_port(&opctx, params.switch_port_id) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "get switch port for uplink: {e}" + )) + })?; + + let switch_location: SwitchLocation = + switch_port.switch_location.parse().map_err(|e| { + ActionError::action_failed(format!( + "get switch location for uplink: {e:?}", + )) + })?; + + let mut uplinks: Vec = Vec::new(); + + // The sled agent uplinks interface is an all or nothing interface, so we + // need to get all the uplink configs for all the ports. + let active_ports = + nexus.active_port_settings(&opctx).await.map_err(|e| { + ActionError::action_failed(format!( + "get active switch port settings: {e}" + )) + })?; + + for (port, info) in &active_ports { + // Since we are undoing establishing uplinks for the settings + // associated with this port we skip adding this ports uplinks + // to the list - effectively removing them. + if skip_self && port.id == switch_port.id { + continue; + } + uplinks.push(HostPortConfig { + port: port.port_name.clone(), + addrs: info.addresses.iter().map(|a| a.address).collect(), + }) + } + + if let Some(id) = inject { + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + let settings = nexus + .switch_port_settings_get(&opctx, &id.into()) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "get switch port settings for injection: {e}" + )) + })?; + uplinks.push(HostPortConfig { + port: params.switch_port_name.clone(), + addrs: settings.addresses.iter().map(|a| a.address).collect(), + }) + } + + let sc = switch_sled_agent(switch_location, &sagactx).await?; + sc.uplink_ensure(&sled_agent_client::types::SwitchPorts { uplinks }) + .await + .map_err(|e| { + ActionError::action_failed(format!("ensure uplink: {e}")) + })?; + + Ok(()) +} + // a common route representation for dendrite and port settings #[derive(Debug, Clone, Eq, Hash, PartialEq, Serialize, Deserialize)] pub(crate) struct Route { @@ -316,7 +701,11 @@ async fn spa_disassociate_switch_port( UpdatePrecondition::Value(params.switch_port_settings_id), ) .await - .map_err(ActionError::action_failed)?; + .map_err(|e| { + ActionError::action_failed(format!( + "set switch port settings id for disassociate: {e}" + )) + })?; Ok(()) } @@ -335,12 +724,21 @@ pub(crate) async fn select_dendrite_client( let switch_port = nexus .get_switch_port(&opctx, params.switch_port_id) .await - .map_err(ActionError::action_failed)?; + .map_err(|e| { + ActionError::action_failed(format!( + "get switch port for dendrite client selection {e}" + )) + })?; + let switch_location: SwitchLocation = - switch_port - .switch_location - .parse() - .map_err(ActionError::action_failed)?; + switch_port.switch_location.parse().map_err( + |e: ParseSwitchLocationError| { + ActionError::action_failed(format!( + "get switch location for uplink: {e:?}", + )) + }, + )?; + let dpd_client: Arc = osagactx .nexus() .dpd_clients @@ -353,3 +751,253 @@ pub(crate) async fn select_dendrite_client( .clone(); Ok(dpd_client) } + +pub(crate) async fn select_mg_client( + sagactx: &NexusActionContext, +) -> Result, ActionError> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + let nexus = osagactx.nexus(); + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let switch_port = nexus + .get_switch_port(&opctx, params.switch_port_id) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "get switch port for mg client selection: {e}" + )) + })?; + + let switch_location: SwitchLocation = + switch_port.switch_location.parse().map_err( + |e: ParseSwitchLocationError| { + ActionError::action_failed(format!( + "get switch location for uplink: {e:?}", + )) + }, + )?; + + let mg_client: Arc = osagactx + .nexus() + .mg_clients + .get(&switch_location) + .ok_or_else(|| { + ActionError::action_failed(format!( + "requested switch not available: {switch_location}" + )) + })? + .clone(); + Ok(mg_client) +} + +pub(crate) async fn get_scrimlet_address( + location: SwitchLocation, + nexus: &Arc, +) -> Result { + nexus + .resolver() + .await + .lookup_socket_v6(ServiceName::Scrimlet(location)) + .await + .map_err(|e| e.to_string()) + .map_err(|e| { + ActionError::action_failed(format!( + "scrimlet dns lookup failed {e}", + )) + }) +} + +#[derive(Clone, Debug)] +pub struct EarlyNetworkPortUpdate { + port: PortConfigV1, + bgp_configs: Vec, +} + +pub(crate) async fn bootstore_update( + nexus: &Arc, + opctx: &OpContext, + switch_port_id: Uuid, + switch_port_name: &str, + settings: &SwitchPortSettingsCombinedResult, +) -> Result { + let switch_port = + nexus.get_switch_port(&opctx, switch_port_id).await.map_err(|e| { + ActionError::action_failed(format!( + "get switch port for uplink: {e}" + )) + })?; + + let switch_location: SwitchLocation = + switch_port.switch_location.parse().map_err( + |e: ParseSwitchLocationError| { + ActionError::action_failed(format!( + "get switch location for uplink: {e:?}", + )) + }, + )?; + + let mut peer_info = Vec::new(); + let mut bgp_configs = Vec::new(); + for p in &settings.bgp_peers { + let bgp_config = nexus + .bgp_config_get(&opctx, p.bgp_config_id.into()) + .await + .map_err(|e| { + ActionError::action_failed(format!("get bgp config: {e}")) + })?; + + let announcements = nexus + .bgp_announce_list( + &opctx, + ¶ms::BgpAnnounceSetSelector { + name_or_id: NameOrId::Id(p.bgp_announce_set_id), + }, + ) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "get bgp announcements: {e}" + )) + })?; + + peer_info.push((p, bgp_config.asn.0)); + bgp_configs.push(BgpConfig { + asn: bgp_config.asn.0, + originate: announcements + .iter() + .filter_map(|a| match a.network { + IpNetwork::V4(net) => Some(net.into()), + //TODO v6 + _ => None, + }) + .collect(), + }); + } + + let update = EarlyNetworkPortUpdate { + port: PortConfigV1 { + routes: settings + .routes + .iter() + .map(|r| RouteConfig { destination: r.dst, nexthop: r.gw.ip() }) + .collect(), + addresses: settings.addresses.iter().map(|a| a.address).collect(), + switch: switch_location, + port: switch_port_name.into(), + uplink_port_fec: OmicronPortFec::None, //TODO hardcode + uplink_port_speed: OmicronPortSpeed::Speed100G, //TODO hardcode + bgp_peers: peer_info + .iter() + .filter_map(|(p, asn)| { + //TODO v6 + if let IpAddr::V4(addr) = p.addr.ip() { + Some(OmicronBgpPeerConfig { + asn: *asn, + port: switch_port_name.into(), + addr, + }) + } else { + None + } + }) + .collect(), + }, + bgp_configs, + }; + + Ok(update) +} + +pub(crate) async fn read_bootstore_config( + sa: &sled_agent_client::Client, +) -> Result { + Ok(sa + .read_network_bootstore_config() + .await + .map_err(|e| { + ActionError::action_failed(format!( + "read bootstore network config: {e}" + )) + })? + .into_inner()) +} + +pub(crate) async fn write_bootstore_config( + sa: &sled_agent_client::Client, + config: &EarlyNetworkConfig, +) -> Result<(), ActionError> { + sa.write_network_bootstore_config(config).await.map_err(|e| { + ActionError::action_failed(format!( + "write bootstore network config: {e}" + )) + })?; + Ok(()) +} + +#[derive(Clone, Debug, Default)] +pub(crate) struct BootstoreNetworkPortChange { + previous_port_config: Option, + changed_bgp_configs: Vec, + added_bgp_configs: Vec, +} + +pub(crate) fn apply_bootstore_update( + config: &mut EarlyNetworkConfig, + update: &EarlyNetworkPortUpdate, +) -> Result { + let mut change = BootstoreNetworkPortChange::default(); + + let rack_net_config = match &mut config.rack_network_config { + Some(cfg) => cfg, + None => { + return Err(ActionError::action_failed( + "rack network config not yet initialized".to_string(), + )) + } + }; + + for port in &mut rack_net_config.ports { + if port.port == update.port.port { + change.previous_port_config = Some(port.clone()); + *port = update.port.clone(); + break; + } + } + if change.previous_port_config.is_none() { + rack_net_config.ports.push(update.port.clone()); + } + + for updated_bgp in &update.bgp_configs { + let mut exists = false; + for resident_bgp in &mut rack_net_config.bgp { + if resident_bgp.asn == updated_bgp.asn { + change.changed_bgp_configs.push(resident_bgp.clone()); + *resident_bgp = updated_bgp.clone(); + exists = true; + break; + } + } + if !exists { + change.added_bgp_configs.push(updated_bgp.clone()); + } + } + rack_net_config.bgp.extend_from_slice(&change.added_bgp_configs); + + Ok(change) +} + +pub(crate) async fn switch_sled_agent( + location: SwitchLocation, + sagactx: &NexusActionContext, +) -> Result { + let nexus = sagactx.user_data().nexus(); + let sled_agent_addr = get_scrimlet_address(location, nexus).await?; + Ok(sled_agent_client::Client::new( + &format!("http://{}", sled_agent_addr), + sagactx.user_data().log().clone(), + )) +} diff --git a/nexus/src/app/sagas/switch_port_settings_clear.rs b/nexus/src/app/sagas/switch_port_settings_clear.rs index 0c0f4ec01b..2836d5a2e2 100644 --- a/nexus/src/app/sagas/switch_port_settings_clear.rs +++ b/nexus/src/app/sagas/switch_port_settings_clear.rs @@ -5,17 +5,24 @@ use super::switch_port_settings_apply::select_dendrite_client; use super::NexusActionContext; use crate::app::sagas::retry_until_known_result; -use crate::app::sagas::switch_port_settings_apply::api_to_dpd_port_settings; +use crate::app::sagas::switch_port_settings_apply::{ + api_to_dpd_port_settings, apply_bootstore_update, bootstore_update, + ensure_switch_port_bgp_settings, ensure_switch_port_uplink, + read_bootstore_config, select_mg_client, switch_sled_agent, + write_bootstore_config, +}; use crate::app::sagas::{ declare_saga_actions, ActionRegistry, NexusSaga, SagaInitError, }; use anyhow::Error; use dpd_client::types::PortId; +use mg_admin_client::types::DeleteNeighborRequest; use nexus_db_queries::authn; use nexus_db_queries::db::datastore::UpdatePrecondition; -use omicron_common::api::external::{self, NameOrId}; +use omicron_common::api::external::{self, NameOrId, SwitchLocation}; use serde::{Deserialize, Serialize}; use std::str::FromStr; +use std::sync::Arc; use steno::ActionError; use uuid::Uuid; @@ -36,6 +43,18 @@ declare_saga_actions! { + spa_clear_switch_port_settings - spa_undo_clear_switch_port_settings } + CLEAR_SWITCH_PORT_UPLINK -> "clear_switch_port_uplink" { + + spa_clear_switch_port_uplink + - spa_undo_clear_switch_port_uplink + } + CLEAR_SWITCH_PORT_BGP_SETTINGS -> "clear_switch_port_bgp_settings" { + + spa_clear_switch_port_bgp_settings + - spa_undo_clear_switch_port_bgp_settings + } + CLEAR_SWITCH_PORT_BOOTSTORE_NETWORK_SETTINGS -> "clear_switch_port_bootstore_network_settings" { + + spa_clear_switch_port_bootstore_network_settings + - spa_undo_clear_switch_port_bootstore_network_settings + } } #[derive(Debug)] @@ -54,6 +73,9 @@ impl NexusSaga for SagaSwitchPortSettingsClear { ) -> Result { builder.append(disassociate_switch_port_action()); builder.append(clear_switch_port_settings_action()); + builder.append(clear_switch_port_uplink_action()); + builder.append(clear_switch_port_bgp_settings_action()); + builder.append(clear_switch_port_bootstore_network_settings_action()); Ok(builder.build()?) } } @@ -181,3 +203,182 @@ async fn spa_undo_clear_switch_port_settings( Ok(()) } + +async fn spa_clear_switch_port_uplink( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + ensure_switch_port_uplink(sagactx, true, None).await +} + +async fn spa_undo_clear_switch_port_uplink( + sagactx: NexusActionContext, +) -> Result<(), Error> { + let id = sagactx + .lookup::>("original_switch_port_settings_id") + .map_err(|e| external::Error::internal_error(&e.to_string()))?; + + Ok(ensure_switch_port_uplink(sagactx, false, id).await?) +} + +async fn spa_clear_switch_port_bgp_settings( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + let nexus = osagactx.nexus(); + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let orig_port_settings_id = sagactx + .lookup::>("original_switch_port_settings_id") + .map_err(|e| { + ActionError::action_failed(format!( + "original port settings id lookup: {e}" + )) + })?; + + let id = match orig_port_settings_id { + Some(id) => id, + None => return Ok(()), + }; + + let settings = nexus + .switch_port_settings_get(&opctx, &NameOrId::Id(id)) + .await + .map_err(ActionError::action_failed)?; + + let mg_client: Arc = + select_mg_client(&sagactx).await.map_err(|e| { + ActionError::action_failed(format!("select mg client (undo): {e}")) + })?; + + for peer in settings.bgp_peers { + let config = nexus + .bgp_config_get(&opctx, peer.bgp_config_id.into()) + .await + .map_err(|e| { + ActionError::action_failed(format!("delete bgp config: {e}")) + })?; + + mg_client + .inner + .delete_neighbor(&DeleteNeighborRequest { + asn: *config.asn, + addr: peer.addr.ip(), + }) + .await + .map_err(|e| { + ActionError::action_failed(format!("delete neighbor: {e}")) + })?; + } + + Ok(()) +} + +async fn spa_undo_clear_switch_port_bgp_settings( + sagactx: NexusActionContext, +) -> Result<(), Error> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + let nexus = osagactx.nexus(); + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let orig_port_settings_id = + sagactx.lookup::>("original_switch_port_settings_id")?; + + let id = match orig_port_settings_id { + Some(id) => id, + None => return Ok(()), + }; + + let settings = + nexus.switch_port_settings_get(&opctx, &NameOrId::Id(id)).await?; + + Ok(ensure_switch_port_bgp_settings(sagactx, settings).await?) +} + +async fn spa_clear_switch_port_bootstore_network_settings( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let nexus = sagactx.user_data().nexus(); + let params = sagactx.saga_params::()?; + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + // Just choosing the sled agent associated with switch0 for no reason. + let sa = switch_sled_agent(SwitchLocation::Switch0, &sagactx).await?; + + // Read the current bootstore network config. + let bs_config = read_bootstore_config(&sa).await?; + + // Compute the total network config from the nexus database. + let mut nexus_config = nexus + .compute_bootstore_network_config(&opctx, &bs_config) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "read nexus bootstore network config: {e}" + )) + })?; + + // Set the correct generation number and send the update. + nexus_config.generation = bs_config.generation; + write_bootstore_config(&sa, &nexus_config).await?; + + Ok(()) +} + +async fn spa_undo_clear_switch_port_bootstore_network_settings( + sagactx: NexusActionContext, +) -> Result<(), Error> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + let nexus = osagactx.nexus(); + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let orig_port_settings_id = sagactx + .lookup::>("original_switch_port_settings_id") + .map_err(|e| { + ActionError::action_failed(format!( + "original port settings id lookup: {e}" + )) + })?; + + let id = match orig_port_settings_id { + Some(id) => id, + None => return Ok(()), + }; + + let settings = nexus + .switch_port_settings_get(&opctx, &NameOrId::Id(id)) + .await + .map_err(ActionError::action_failed)?; + + // Just choosing the sled agent associated with switch0 for no reason. + let sa = switch_sled_agent(SwitchLocation::Switch0, &sagactx).await?; + + // Read the current bootstore config, perform the update and write it back. + let mut config = read_bootstore_config(&sa).await?; + let update = bootstore_update( + &nexus, + &opctx, + params.switch_port_id, + ¶ms.port_name, + &settings, + ) + .await?; + apply_bootstore_update(&mut config, &update)?; + write_bootstore_config(&sa, &config).await?; + + Ok(()) +} diff --git a/nexus/src/app/sagas/switch_port_settings_update.rs b/nexus/src/app/sagas/switch_port_settings_update.rs deleted file mode 100644 index 23120bdbf4..0000000000 --- a/nexus/src/app/sagas/switch_port_settings_update.rs +++ /dev/null @@ -1,5 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -// TODO https://github.com/oxidecomputer/omicron/issues/3002 diff --git a/nexus/src/app/switch_port.rs b/nexus/src/app/switch_port.rs index 996290b684..3911bfcaa3 100644 --- a/nexus/src/app/switch_port.rs +++ b/nexus/src/app/switch_port.rs @@ -5,30 +5,107 @@ use crate::app::sagas; use crate::external_api::params; use db::datastore::SwitchPortSettingsCombinedResult; +use ipnetwork::IpNetwork; use nexus_db_queries::authn; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; use nexus_db_queries::db::datastore::UpdatePrecondition; use nexus_db_queries::db::model::{SwitchPort, SwitchPortSettings}; +use nexus_types::identity::Resource; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::{ self, CreateResult, DataPageParams, DeleteResult, ListResultVec, LookupResult, Name, NameOrId, UpdateResult, }; +use sled_agent_client::types::BgpConfig; +use sled_agent_client::types::BgpPeerConfig; +use sled_agent_client::types::{ + EarlyNetworkConfig, PortConfigV1, RackNetworkConfig, RouteConfig, +}; use std::sync::Arc; use uuid::Uuid; impl super::Nexus { - pub(crate) async fn switch_port_settings_create( - &self, + pub(crate) async fn switch_port_settings_post( + self: &Arc, opctx: &OpContext, params: params::SwitchPortSettingsCreate, ) -> CreateResult { opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + + //TODO(ry) race conditions on exists check versus update/create. + // Normally I would use a DB lock here, but not sure what + // the Omicron way of doing things here is. + + match self + .db_datastore + .switch_port_settings_exist( + opctx, + params.identity.name.clone().into(), + ) + .await + { + Ok(id) => self.switch_port_settings_update(opctx, id, params).await, + Err(_) => self.switch_port_settings_create(opctx, params).await, + } + } + + pub async fn switch_port_settings_create( + self: &Arc, + opctx: &OpContext, + params: params::SwitchPortSettingsCreate, + ) -> CreateResult { self.db_datastore.switch_port_settings_create(opctx, ¶ms).await } + pub(crate) async fn switch_port_settings_update( + self: &Arc, + opctx: &OpContext, + switch_port_settings_id: Uuid, + new_settings: params::SwitchPortSettingsCreate, + ) -> CreateResult { + // delete old settings + self.switch_port_settings_delete( + opctx, + ¶ms::SwitchPortSettingsSelector { + port_settings: Some(NameOrId::Id(switch_port_settings_id)), + }, + ) + .await?; + + // create new settings + let result = self + .switch_port_settings_create(opctx, new_settings.clone()) + .await?; + + // run the port settings apply saga for each port referencing the + // updated settings + + let ports = self + .db_datastore + .switch_ports_using_settings(opctx, switch_port_settings_id) + .await?; + + for (switch_port_id, switch_port_name) in ports.into_iter() { + let saga_params = sagas::switch_port_settings_apply::Params { + serialized_authn: authn::saga::Serialized::for_opctx(opctx), + switch_port_id, + switch_port_settings_id: result.settings.id(), + switch_port_name: switch_port_name.to_string(), + }; + + self.execute_saga::< + sagas::switch_port_settings_apply::SagaSwitchPortSettingsApply + >( + saga_params, + ) + .await?; + } + + Ok(result) + } + pub(crate) async fn switch_port_settings_delete( &self, opctx: &OpContext, @@ -151,7 +228,9 @@ impl super::Nexus { switch_port_name: port.to_string(), }; - self.execute_saga::( + self.execute_saga::< + sagas::switch_port_settings_apply::SagaSwitchPortSettingsApply + >( saga_params, ) .await?; @@ -215,4 +294,124 @@ impl super::Nexus { Ok(()) } + + // TODO it would likely be better to do this as a one shot db query. + pub(crate) async fn active_port_settings( + &self, + opctx: &OpContext, + ) -> LookupResult> { + let mut ports = Vec::new(); + let port_list = + self.switch_port_list(opctx, &DataPageParams::max_page()).await?; + + for p in port_list { + if let Some(id) = p.port_settings_id { + ports.push(( + p.clone(), + self.switch_port_settings_get(opctx, &id.into()).await?, + )); + } + } + + LookupResult::Ok(ports) + } + + pub(crate) async fn compute_bootstore_network_config( + &self, + opctx: &OpContext, + current: &EarlyNetworkConfig, + ) -> LookupResult { + let mut rack_net_config = match ¤t.rack_network_config { + Some(cfg) => { + RackNetworkConfig { + infra_ip_first: cfg.infra_ip_first, + infra_ip_last: cfg.infra_ip_last, + ports: Vec::new(), // To be filled in from db + bgp: Vec::new(), // To be filled in from db + } + } + None => { + return LookupResult::Err( + external::Error::ServiceUnavailable { + internal_message: + "bootstore network config not initialized yet" + .to_string(), + }, + ); + } + }; + + let db_ports = self.active_port_settings(opctx).await?; + + for (port, info) in &db_ports { + let mut peer_info = Vec::new(); + for p in &info.bgp_peers { + let bgp_config = + self.bgp_config_get(&opctx, p.bgp_config_id.into()).await?; + let announcements = self + .bgp_announce_list( + &opctx, + ¶ms::BgpAnnounceSetSelector { + name_or_id: p.bgp_announce_set_id.into(), + }, + ) + .await?; + let addr = match p.addr { + ipnetwork::IpNetwork::V4(addr) => addr, + ipnetwork::IpNetwork::V6(_) => continue, //TODO v6 + }; + peer_info.push((p, bgp_config.asn.0, addr.ip())); + rack_net_config.bgp.push(BgpConfig { + asn: bgp_config.asn.0, + originate: announcements + .iter() + .filter_map(|a| match a.network { + IpNetwork::V4(net) => Some(net.into()), + //TODO v6 + _ => None, + }) + .collect(), + }); + } + + let p = PortConfigV1 { + routes: info + .routes + .iter() + .map(|r| RouteConfig { + destination: r.dst, + nexthop: r.gw.ip(), + }) + .collect(), + addresses: info.addresses.iter().map(|a| a.address).collect(), + bgp_peers: peer_info + .iter() + .map(|(_p, asn, addr)| BgpPeerConfig { + addr: *addr, + asn: *asn, + port: port.port_name.clone(), + }) + .collect(), + switch: port.switch_location.parse().unwrap(), + port: port.port_name.clone(), + //TODO hardcode + uplink_port_fec: + omicron_common::api::internal::shared::PortFec::None, + //TODO hardcode + uplink_port_speed: + omicron_common::api::internal::shared::PortSpeed::Speed100G, + }; + + rack_net_config.ports.push(p); + } + + let result = EarlyNetworkConfig { + generation: current.generation, + rack_subnet: current.rack_subnet, + ntp_servers: current.ntp_servers.clone(), //TODO update from db + rack_network_config: Some(rack_net_config), + }; + + LookupResult::Ok(result) + } } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 1fddfba85b..990704904a 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -63,6 +63,11 @@ use omicron_common::api::external::http_pagination::ScanParams; use omicron_common::api::external::AddressLot; use omicron_common::api::external::AddressLotBlock; use omicron_common::api::external::AddressLotCreateResponse; +use omicron_common::api::external::BgpAnnounceSet; +use omicron_common::api::external::BgpAnnouncement; +use omicron_common::api::external::BgpConfig; +use omicron_common::api::external::BgpImportedRouteIpv4; +use omicron_common::api::external::BgpPeerStatus; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Disk; use omicron_common::api::external::Error; @@ -250,6 +255,15 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(networking_switch_port_apply_settings)?; api.register(networking_switch_port_clear_settings)?; + api.register(networking_bgp_config_create)?; + api.register(networking_bgp_config_list)?; + api.register(networking_bgp_status)?; + api.register(networking_bgp_imported_routes_ipv4)?; + api.register(networking_bgp_config_delete)?; + api.register(networking_bgp_announce_set_create)?; + api.register(networking_bgp_announce_set_list)?; + api.register(networking_bgp_announce_set_delete)?; + // Fleet-wide API operations api.register(silo_list)?; api.register(silo_create)?; @@ -2642,7 +2656,7 @@ async fn networking_switch_port_settings_create( let nexus = &apictx.nexus; let params = new_settings.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let result = nexus.switch_port_settings_create(&opctx, params).await?; + let result = nexus.switch_port_settings_post(&opctx, params).await?; let settings: SwitchPortSettingsView = result.into(); Ok(HttpResponseCreated(settings)) @@ -2810,6 +2824,193 @@ async fn networking_switch_port_clear_settings( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } +/// Create a new BGP configuration. +#[endpoint { + method = POST, + path = "/v1/system/networking/bgp", + tags = ["system/networking"], +}] +async fn networking_bgp_config_create( + rqctx: RequestContext>, + config: TypedBody, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.nexus; + let config = config.into_inner(); + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let result = nexus.bgp_config_set(&opctx, &config).await?; + Ok(HttpResponseCreated::(result.into())) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Get BGP configurations. +#[endpoint { + method = GET, + path = "/v1/system/networking/bgp", + tags = ["system/networking"], +}] +async fn networking_bgp_config_list( + rqctx: RequestContext>, + query_params: Query>, +) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let configs = nexus + .bgp_config_list(&opctx, &paginated_by) + .await? + .into_iter() + .map(|p| p.into()) + .collect(); + + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + configs, + &marker_for_name_or_id, + )?)) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +//TODO pagination? the normal by-name/by-id stuff does not work here +/// Get BGP peer status +#[endpoint { + method = GET, + path = "/v1/system/networking/bgp-status", + tags = ["system/networking"], +}] +async fn networking_bgp_status( + rqctx: RequestContext>, +) -> Result>, HttpError> { + let apictx = rqctx.context(); + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let handler = async { + let nexus = &apictx.nexus; + let result = nexus.bgp_peer_status(&opctx).await?; + Ok(HttpResponseOk(result)) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +//TODO pagination? the normal by-name/by-id stuff does not work here +/// Get imported IPv4 BGP routes. +#[endpoint { + method = GET, + path = "/v1/system/networking/bgp-routes-ipv4", + tags = ["system/networking"], +}] +async fn networking_bgp_imported_routes_ipv4( + rqctx: RequestContext>, + query_params: Query, +) -> Result>, HttpError> { + let apictx = rqctx.context(); + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let handler = async { + let nexus = &apictx.nexus; + let sel = query_params.into_inner(); + let result = nexus.bgp_imported_routes_ipv4(&opctx, &sel).await?; + Ok(HttpResponseOk(result)) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Delete a BGP configuration. +#[endpoint { + method = DELETE, + path = "/v1/system/networking/bgp", + tags = ["system/networking"], +}] +async fn networking_bgp_config_delete( + rqctx: RequestContext>, + sel: Query, +) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.nexus; + let sel = sel.into_inner(); + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + nexus.bgp_config_delete(&opctx, &sel).await?; + Ok(HttpResponseUpdatedNoContent {}) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Create a new BGP announce set. +#[endpoint { + method = POST, + path = "/v1/system/networking/bgp-announce", + tags = ["system/networking"], +}] +async fn networking_bgp_announce_set_create( + rqctx: RequestContext>, + config: TypedBody, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.nexus; + let config = config.into_inner(); + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let result = nexus.bgp_create_announce_set(&opctx, &config).await?; + Ok(HttpResponseCreated::(result.0.into())) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +//TODO pagination? the normal by-name/by-id stuff does not work here +/// Get originated routes for a given BGP configuration. +#[endpoint { + method = GET, + path = "/v1/system/networking/bgp-announce", + tags = ["system/networking"], +}] +async fn networking_bgp_announce_set_list( + rqctx: RequestContext>, + query_params: Query, +) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.nexus; + let sel = query_params.into_inner(); + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let result = nexus + .bgp_announce_list(&opctx, &sel) + .await? + .into_iter() + .map(|p| p.into()) + .collect(); + Ok(HttpResponseOk(result)) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Delete a BGP announce set. +#[endpoint { + method = DELETE, + path = "/v1/system/networking/bgp-announce", + tags = ["system/networking"], +}] +async fn networking_bgp_announce_set_delete( + rqctx: RequestContext>, + selector: Query, +) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.nexus; + let sel = selector.into_inner(); + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + nexus.bgp_delete_announce_set(&opctx, &sel).await?; + Ok(HttpResponseUpdatedNoContent {}) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + // Images /// List images diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 34c218b3e2..2875363111 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -86,6 +86,7 @@ pub struct ControlPlaneTestContext { pub oximeter: Oximeter, pub producer: ProducerServer, pub dendrite: HashMap, + pub mgd: HashMap, pub external_dns_zone_name: String, pub external_dns: dns_server::TransientServer, pub internal_dns: dns_server::TransientServer, @@ -108,6 +109,9 @@ impl ControlPlaneTestContext { for (_, mut dendrite) in self.dendrite { dendrite.cleanup().await.unwrap(); } + for (_, mut mgd) in self.mgd { + mgd.cleanup().await.unwrap(); + } self.logctx.cleanup_successful(); } } @@ -237,6 +241,7 @@ pub struct ControlPlaneTestContextBuilder<'a, N: NexusServer> { pub oximeter: Option, pub producer: Option, pub dendrite: HashMap, + pub mgd: HashMap, // NOTE: Only exists after starting Nexus, until external Nexus is // initialized. @@ -274,6 +279,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { oximeter: None, producer: None, dendrite: HashMap::new(), + mgd: HashMap::new(), nexus_internal: None, nexus_internal_addr: None, external_dns_zone_name: None, @@ -398,6 +404,32 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { ); } + pub async fn start_mgd(&mut self, switch_location: SwitchLocation) { + let log = &self.logctx.log; + debug!(log, "Starting mgd for {switch_location}"); + + // Set up an instance of mgd + let mgd = dev::maghemite::MgdInstance::start(0).await.unwrap(); + let port = mgd.port; + self.mgd.insert(switch_location, mgd); + let address = SocketAddrV6::new(Ipv6Addr::LOCALHOST, port, 0, 0); + + debug!(log, "mgd port is {port}"); + + let config = omicron_common::nexus_config::MgdConfig { + address: std::net::SocketAddr::V6(address), + }; + self.config.pkg.mgd.insert(switch_location, config); + + let sled_id = Uuid::parse_str(SLED_AGENT_UUID).unwrap(); + self.rack_init_builder.add_service( + address, + ServiceKind::Mgd, + internal_dns::ServiceName::Mgd, + sled_id, + ); + } + pub async fn start_oximeter(&mut self) { let log = &self.logctx.log; debug!(log, "Starting Oximeter"); @@ -528,8 +560,11 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { &format!("http://{}", internal_dns_address), log.clone(), ); + let dns_config = self.rack_init_builder.internal_dns_config.clone().build(); + + slog::info!(log, "DNS population: {:#?}", dns_config); dns_config_client.dns_config_put(&dns_config).await.expect( "Failed to send initial DNS records to internal DNS server", ); @@ -669,6 +704,25 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { ); } + pub async fn scrimlet_dns_setup(&mut self) { + let sled_agent = self + .sled_agent + .as_ref() + .expect("Cannot set up scrimlet DNS without sled agent"); + + let sa = match sled_agent.http_server.local_addr() { + SocketAddr::V6(sa) => sa, + SocketAddr::V4(_) => panic!("expected SocketAddrV6 for sled agent"), + }; + + for loc in [SwitchLocation::Switch0, SwitchLocation::Switch1] { + self.rack_init_builder + .internal_dns_config + .host_scrimlet(loc, sa) + .expect("add switch0 scrimlet dns entry"); + } + } + // Set up an external DNS server. pub async fn start_external_dns(&mut self) { let log = self.logctx.log.new(o!("component" => "external_dns_server")); @@ -742,6 +796,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { producer: self.producer.unwrap(), logctx: self.logctx, dendrite: self.dendrite, + mgd: self.mgd, external_dns_zone_name: self.external_dns_zone_name.unwrap(), external_dns: self.external_dns.unwrap(), internal_dns: self.internal_dns.unwrap(), @@ -772,6 +827,9 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { for (_, mut dendrite) in self.dendrite { dendrite.cleanup().await.unwrap(); } + for (_, mut mgd) in self.mgd { + mgd.cleanup().await.unwrap(); + } self.logctx.cleanup_successful(); } } @@ -862,11 +920,14 @@ async fn setup_with_config_impl( builder.start_clickhouse().await; builder.start_dendrite(SwitchLocation::Switch0).await; builder.start_dendrite(SwitchLocation::Switch1).await; + builder.start_mgd(SwitchLocation::Switch0).await; + builder.start_mgd(SwitchLocation::Switch1).await; builder.start_internal_dns().await; builder.start_external_dns().await; builder.start_nexus_internal().await; builder.start_sled(sim_mode).await; builder.start_crucible_pantry().await; + builder.scrimlet_dns_setup().await; // Give Nexus necessary information to find the Crucible Pantry let dns_config = builder.populate_internal_dns().await; diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index e9ae11c21f..27cb30c24b 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -420,6 +420,39 @@ lazy_static! { }; } +lazy_static! { + pub static ref DEMO_BGP_CONFIG_CREATE_URL: String = + format!("/v1/system/networking/bgp?name_or_id=as47"); + pub static ref DEMO_BGP_CONFIG: params::BgpConfigCreate = + params::BgpConfigCreate { + identity: IdentityMetadataCreateParams { + name: "as47".parse().unwrap(), + description: "BGP config for AS47".into(), + }, + asn: 47, + vrf: None, + }; + pub static ref DEMO_BGP_ANNOUNCE_SET_URL: String = + format!("/v1/system/networking/bgp-announce?name_or_id=a-bag-of-addrs"); + pub static ref DEMO_BGP_ANNOUNCE: params::BgpAnnounceSetCreate = + params::BgpAnnounceSetCreate { + identity: IdentityMetadataCreateParams { + name: "a-bag-of-addrs".parse().unwrap(), + description: "a bag of addrs".into(), + }, + announcement: vec![params::BgpAnnouncementCreate { + address_lot_block: NameOrId::Name( + "some-block".parse().unwrap(), + ), + network: "10.0.0.0/16".parse().unwrap(), + }], + }; + pub static ref DEMO_BGP_STATUS_URL: String = + format!("/v1/system/networking/bgp-status"); + pub static ref DEMO_BGP_ROUTES_IPV4_URL: String = + format!("/v1/system/networking/bgp-routes-ipv4?asn=47"); +} + lazy_static! { // Project Images pub static ref DEMO_IMAGE_NAME: Name = "demo-image".parse().unwrap(); @@ -1876,5 +1909,48 @@ lazy_static! { AllowedMethod::GetNonexistent ], }, + VerifyEndpoint { + url: &DEMO_BGP_CONFIG_CREATE_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Post( + serde_json::to_value(&*DEMO_BGP_CONFIG).unwrap(), + ), + AllowedMethod::Get, + AllowedMethod::Delete + ], + }, + + VerifyEndpoint { + url: &DEMO_BGP_ANNOUNCE_SET_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Post( + serde_json::to_value(&*DEMO_BGP_ANNOUNCE).unwrap(), + ), + AllowedMethod::GetNonexistent, + AllowedMethod::Delete + ], + }, + + VerifyEndpoint { + url: &DEMO_BGP_STATUS_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::GetNonexistent, + ], + }, + + VerifyEndpoint { + url: &DEMO_BGP_ROUTES_IPV4_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::GetNonexistent, + ], + } ]; } diff --git a/nexus/tests/integration_tests/initialization.rs b/nexus/tests/integration_tests/initialization.rs index 2d4c76dc99..43a4ac8f2e 100644 --- a/nexus/tests/integration_tests/initialization.rs +++ b/nexus/tests/integration_tests/initialization.rs @@ -29,6 +29,8 @@ async fn test_nexus_boots_before_cockroach() { builder.start_dendrite(SwitchLocation::Switch0).await; builder.start_dendrite(SwitchLocation::Switch1).await; + builder.start_mgd(SwitchLocation::Switch0).await; + builder.start_mgd(SwitchLocation::Switch1).await; builder.start_internal_dns().await; builder.start_external_dns().await; @@ -144,6 +146,11 @@ async fn test_nexus_boots_before_dendrite() { builder.start_dendrite(SwitchLocation::Switch1).await; info!(log, "Started Dendrite"); + info!(log, "Starting mgd"); + builder.start_mgd(SwitchLocation::Switch0).await; + builder.start_mgd(SwitchLocation::Switch1).await; + info!(log, "Started mgd"); + info!(log, "Populating internal DNS records"); builder.populate_internal_dns().await; info!(log, "Populated internal DNS records"); @@ -166,6 +173,8 @@ async fn nexus_schema_test_setup( builder.start_external_dns().await; builder.start_dendrite(SwitchLocation::Switch0).await; builder.start_dendrite(SwitchLocation::Switch1).await; + builder.start_mgd(SwitchLocation::Switch0).await; + builder.start_mgd(SwitchLocation::Switch1).await; builder.populate_internal_dns().await; } diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index 6d2595b561..5bf5daa1ba 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -58,6 +58,8 @@ async fn test_setup<'a>( builder.start_external_dns().await; builder.start_dendrite(SwitchLocation::Switch0).await; builder.start_dendrite(SwitchLocation::Switch1).await; + builder.start_mgd(SwitchLocation::Switch0).await; + builder.start_mgd(SwitchLocation::Switch1).await; builder.populate_internal_dns().await; builder } diff --git a/nexus/tests/integration_tests/switch_port.rs b/nexus/tests/integration_tests/switch_port.rs index 3d3d6c9f5f..f65ab10b43 100644 --- a/nexus/tests/integration_tests/switch_port.rs +++ b/nexus/tests/integration_tests/switch_port.rs @@ -10,8 +10,10 @@ use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder}; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params::{ Address, AddressConfig, AddressLotBlockCreate, AddressLotCreate, - LinkConfig, LldpServiceConfig, Route, RouteConfig, SwitchInterfaceConfig, - SwitchInterfaceKind, SwitchPortApplySettings, SwitchPortSettingsCreate, + BgpAnnounceSetCreate, BgpAnnouncementCreate, BgpConfigCreate, + BgpPeerConfig, LinkConfig, LldpServiceConfig, Route, RouteConfig, + SwitchInterfaceConfig, SwitchInterfaceKind, SwitchPortApplySettings, + SwitchPortSettingsCreate, }; use nexus_types::external_api::views::Rack; use omicron_common::api::external::{ @@ -33,10 +35,16 @@ async fn test_port_settings_basic_crud(ctx: &ControlPlaneTestContext) { description: "an address parking lot".into(), }, kind: AddressLotKind::Infra, - blocks: vec![AddressLotBlockCreate { - first_address: "203.0.113.10".parse().unwrap(), - last_address: "203.0.113.20".parse().unwrap(), - }], + blocks: vec![ + AddressLotBlockCreate { + first_address: "203.0.113.10".parse().unwrap(), + last_address: "203.0.113.20".parse().unwrap(), + }, + AddressLotBlockCreate { + first_address: "1.2.3.0".parse().unwrap(), + last_address: "1.2.3.255".parse().unwrap(), + }, + ], }; NexusRequest::objects_post( @@ -49,6 +57,48 @@ async fn test_port_settings_basic_crud(ctx: &ControlPlaneTestContext) { .await .unwrap(); + // Create BGP config + let bgp_config = BgpConfigCreate { + identity: IdentityMetadataCreateParams { + name: "as47".parse().unwrap(), + description: "autonomous system 47".into(), + }, + asn: 47, + vrf: None, + }; + + NexusRequest::objects_post( + client, + "/v1/system/networking/bgp", + &bgp_config, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap(); + + // Create BGP announce set + let announce_set = BgpAnnounceSetCreate { + identity: IdentityMetadataCreateParams { + name: "instances".parse().unwrap(), + description: "autonomous system 47 announcements".into(), + }, + announcement: vec![BgpAnnouncementCreate { + address_lot_block: NameOrId::Name("parkinglot".parse().unwrap()), + network: "1.2.3.0/24".parse().unwrap(), + }], + }; + + NexusRequest::objects_post( + client, + "/v1/system/networking/bgp-announce", + &announce_set, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap(); + // Create port settings let mut settings = SwitchPortSettingsCreate::new(IdentityMetadataCreateParams { @@ -191,6 +241,28 @@ async fn test_port_settings_basic_crud(ctx: &ControlPlaneTestContext) { .parsed_body() .unwrap(); + // Update port settings. Should not see conflict. + settings.bgp_peers.insert( + "phy0".into(), + BgpPeerConfig { + bgp_config: NameOrId::Name("as47".parse().unwrap()), //TODO + bgp_announce_set: NameOrId::Name("instances".parse().unwrap()), //TODO + interface_name: "phy0".to_string(), + addr: "1.2.3.4".parse().unwrap(), + }, + ); + let _created: SwitchPortSettingsView = NexusRequest::objects_post( + client, + "/v1/system/networking/switch-port-settings", + &settings, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + // There should be one switch port to begin with, see // Server::start_and_populate in nexus/src/lib.rs diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 1d7f5556c2..e55eaa4df6 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -145,6 +145,14 @@ networking_address_lot_block_list GET /v1/system/networking/address- networking_address_lot_create POST /v1/system/networking/address-lot networking_address_lot_delete DELETE /v1/system/networking/address-lot/{address_lot} networking_address_lot_list GET /v1/system/networking/address-lot +networking_bgp_announce_set_create POST /v1/system/networking/bgp-announce +networking_bgp_announce_set_delete DELETE /v1/system/networking/bgp-announce +networking_bgp_announce_set_list GET /v1/system/networking/bgp-announce +networking_bgp_config_create POST /v1/system/networking/bgp +networking_bgp_config_delete DELETE /v1/system/networking/bgp +networking_bgp_config_list GET /v1/system/networking/bgp +networking_bgp_imported_routes_ipv4 GET /v1/system/networking/bgp-routes-ipv4 +networking_bgp_status GET /v1/system/networking/bgp-status networking_loopback_address_create POST /v1/system/networking/loopback-address networking_loopback_address_delete DELETE /v1/system/networking/loopback-address/{rack_id}/{switch_location}/{address}/{subnet_mask} networking_loopback_address_list GET /v1/system/networking/loopback-address diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index b4e0e705d8..633bb8d5c6 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -1406,6 +1406,20 @@ pub struct Route { pub vid: Option, } +/// Select a BGP config by a name or id. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +pub struct BgpConfigSelector { + /// A name or id to use when selecting BGP config. + pub name_or_id: NameOrId, +} + +/// List BGP configs with an optional name or id. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +pub struct BgpConfigListSelector { + /// A name or id to use when selecting BGP config. + pub name_or_id: Option, +} + /// A BGP peer configuration for an interface. Includes the set of announcements /// that will be advertised to the peer identified by `addr`. The `bgp_config` /// parameter is a reference to global BGP parameters. The `interface_name` @@ -1431,17 +1445,38 @@ pub struct BgpPeerConfig { /// Parameters for creating a named set of BGP announcements. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct CreateBgpAnnounceSet { +pub struct BgpAnnounceSetCreate { #[serde(flatten)] pub identity: IdentityMetadataCreateParams, /// The announcements in this set. - pub announcement: Vec, + pub announcement: Vec, +} + +/// Select a BGP announce set by a name or id. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +pub struct BgpAnnounceSetSelector { + /// A name or id to use when selecting BGP port settings + pub name_or_id: NameOrId, +} + +/// List BGP announce set with an optional name or id. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +pub struct BgpAnnounceListSelector { + /// A name or id to use when selecting BGP config. + pub name_or_id: Option, +} + +/// Selector used for querying imported BGP routes. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +pub struct BgpRouteSelector { + /// The ASN to filter on. Required. + pub asn: u32, } /// A BGP announcement tied to a particular address lot block. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct BgpAnnouncement { +pub struct BgpAnnouncementCreate { /// Address lot this announcement is drawn from. pub address_lot_block: NameOrId, @@ -1452,7 +1487,7 @@ pub struct BgpAnnouncement { /// Parameters for creating a BGP configuration. This includes and autonomous /// system number (ASN) and a virtual routing and forwarding (VRF) identifier. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct CreateBgpConfig { +pub struct BgpConfigCreate { #[serde(flatten)] pub identity: IdentityMetadataCreateParams, @@ -1464,6 +1499,13 @@ pub struct CreateBgpConfig { pub vrf: Option, } +/// Select a BGP status information by BGP config id. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +pub struct BgpStatusSelector { + /// A name or id of the BGP configuration to get status for + pub name_or_id: NameOrId, +} + /// A set of addresses associated with a port configuration. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct AddressConfig { diff --git a/nexus/types/src/internal_api/params.rs b/nexus/types/src/internal_api/params.rs index e2a5e3d094..c0991ebb17 100644 --- a/nexus/types/src/internal_api/params.rs +++ b/nexus/types/src/internal_api/params.rs @@ -182,6 +182,7 @@ pub enum ServiceKind { Tfport, BoundaryNtp { snat: SourceNatConfig, nic: ServiceNic }, InternalNtp, + Mgd, } impl fmt::Display for ServiceKind { @@ -200,6 +201,7 @@ impl fmt::Display for ServiceKind { Tfport => "tfport", CruciblePantry => "crucible_pantry", BoundaryNtp { .. } | InternalNtp => "ntp", + Mgd => "mgd", }; write!(f, "{}", s) } diff --git a/openapi/bootstrap-agent.json b/openapi/bootstrap-agent.json index 682512cc24..91b8ae9130 100644 --- a/openapi/bootstrap-agent.json +++ b/openapi/bootstrap-agent.json @@ -241,6 +241,53 @@ } ] }, + "BgpConfig": { + "type": "object", + "properties": { + "asn": { + "description": "The autonomous system number for the BGP configuration.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "originate": { + "description": "The set of prefixes for the BGP router to originate.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Ipv4Network" + } + } + }, + "required": [ + "asn", + "originate" + ] + }, + "BgpPeerConfig": { + "type": "object", + "properties": { + "addr": { + "description": "Address of the peer.", + "type": "string", + "format": "ipv4" + }, + "asn": { + "description": "Switch port the peer is reachable on.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "port": { + "description": "Switch port the peer is reachable on.", + "type": "string" + } + }, + "required": [ + "addr", + "asn", + "port" + ] + }, "BootstrapAddressDiscovery": { "oneOf": [ { @@ -333,6 +380,26 @@ "request_id" ] }, + "IpNetwork": { + "oneOf": [ + { + "title": "v4", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv4Network" + } + ] + }, + { + "title": "v6", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv6Network" + } + ] + } + ] + }, "IpRange": { "oneOf": [ { @@ -375,6 +442,10 @@ "last" ] }, + "Ipv6Network": { + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\")[/](12[0-8]|1[0-1][0-9]|[0-9]?[0-9])$" + }, "Ipv6Range": { "description": "A non-decreasing IPv6 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", "type": "object", @@ -406,6 +477,69 @@ "description": "Password hashes must be in PHC (Password Hashing Competition) string format. Passwords must be hashed with Argon2id. Password hashes may be rejected if the parameters appear not to be secure enough.", "type": "string" }, + "PortConfigV1": { + "type": "object", + "properties": { + "addresses": { + "description": "This port's addresses.", + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNetwork" + } + }, + "bgp_peers": { + "description": "BGP peers on this port", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpPeerConfig" + } + }, + "port": { + "description": "Nmae of the port this config applies to.", + "type": "string" + }, + "routes": { + "description": "The set of routes associated with this port.", + "type": "array", + "items": { + "$ref": "#/components/schemas/RouteConfig" + } + }, + "switch": { + "description": "Switch the port belongs to.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchLocation" + } + ] + }, + "uplink_port_fec": { + "description": "Port forward error correction type.", + "allOf": [ + { + "$ref": "#/components/schemas/PortFec" + } + ] + }, + "uplink_port_speed": { + "description": "Port speed.", + "allOf": [ + { + "$ref": "#/components/schemas/PortSpeed" + } + ] + } + }, + "required": [ + "addresses", + "bgp_peers", + "port", + "routes", + "switch", + "uplink_port_fec", + "uplink_port_speed" + ] + }, "PortFec": { "description": "Switchport FEC options", "type": "string", @@ -533,6 +667,13 @@ "description": "Initial network configuration", "type": "object", "properties": { + "bgp": { + "description": "BGP configurations for connecting the rack to external networks", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpConfig" + } + }, "infra_ip_first": { "description": "First ip address to be used for configuring network infrastructure", "type": "string", @@ -543,18 +684,19 @@ "type": "string", "format": "ipv4" }, - "uplinks": { + "ports": { "description": "Uplinks for connecting the rack to external networks", "type": "array", "items": { - "$ref": "#/components/schemas/UplinkConfig" + "$ref": "#/components/schemas/PortConfigV1" } } }, "required": [ + "bgp", "infra_ip_first", "infra_ip_last", - "uplinks" + "ports" ] }, "RackOperationStatus": { @@ -747,6 +889,28 @@ "user_password_hash" ] }, + "RouteConfig": { + "type": "object", + "properties": { + "destination": { + "description": "The destination of the route.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNetwork" + } + ] + }, + "nexthop": { + "description": "The nexthop/gateway address.", + "type": "string", + "format": "ip" + } + }, + "required": [ + "destination", + "nexthop" + ] + }, "SemverVersion": { "type": "string", "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" @@ -770,67 +934,6 @@ } ] }, - "UplinkConfig": { - "type": "object", - "properties": { - "gateway_ip": { - "description": "Gateway address", - "type": "string", - "format": "ipv4" - }, - "switch": { - "description": "Switch to use for uplink", - "allOf": [ - { - "$ref": "#/components/schemas/SwitchLocation" - } - ] - }, - "uplink_cidr": { - "description": "IP Address and prefix (e.g., `192.168.0.1/16`) to apply to switchport (must be in infra_ip pool)", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv4Network" - } - ] - }, - "uplink_port": { - "description": "Switchport to use for external connectivity", - "type": "string" - }, - "uplink_port_fec": { - "description": "Forward Error Correction setting for the uplink port", - "allOf": [ - { - "$ref": "#/components/schemas/PortFec" - } - ] - }, - "uplink_port_speed": { - "description": "Speed for the Switchport", - "allOf": [ - { - "$ref": "#/components/schemas/PortSpeed" - } - ] - }, - "uplink_vid": { - "nullable": true, - "description": "VLAN id to use for uplink", - "type": "integer", - "format": "uint16", - "minimum": 0 - } - }, - "required": [ - "gateway_ip", - "switch", - "uplink_cidr", - "uplink_port", - "uplink_port_fec", - "uplink_port_speed" - ] - }, "UserId": { "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", "type": "string" diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 67db222155..1c1d29fd8b 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -767,6 +767,53 @@ "serial_number" ] }, + "BgpConfig": { + "type": "object", + "properties": { + "asn": { + "description": "The autonomous system number for the BGP configuration.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "originate": { + "description": "The set of prefixes for the BGP router to originate.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Ipv4Network" + } + } + }, + "required": [ + "asn", + "originate" + ] + }, + "BgpPeerConfig": { + "type": "object", + "properties": { + "addr": { + "description": "Address of the peer.", + "type": "string", + "format": "ipv4" + }, + "asn": { + "description": "Switch port the peer is reachable on.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "port": { + "description": "Switch port the peer is reachable on.", + "type": "string" + } + }, + "required": [ + "addr", + "asn", + "port" + ] + }, "BinRangedouble": { "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", "oneOf": [ @@ -3653,6 +3700,26 @@ } ] }, + "IpNetwork": { + "oneOf": [ + { + "title": "v4", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv4Network" + } + ] + }, + { + "title": "v6", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv6Network" + } + ] + } + ] + }, "IpRange": { "oneOf": [ { @@ -3695,6 +3762,10 @@ "last" ] }, + "Ipv6Network": { + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\")[/](12[0-8]|1[0-1][0-9]|[0-9]?[0-9])$" + }, "Ipv6Range": { "description": "A non-decreasing IPv6 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", "type": "object", @@ -4038,6 +4109,69 @@ "PhysicalDiskPutResponse": { "type": "object" }, + "PortConfigV1": { + "type": "object", + "properties": { + "addresses": { + "description": "This port's addresses.", + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNetwork" + } + }, + "bgp_peers": { + "description": "BGP peers on this port", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpPeerConfig" + } + }, + "port": { + "description": "Nmae of the port this config applies to.", + "type": "string" + }, + "routes": { + "description": "The set of routes associated with this port.", + "type": "array", + "items": { + "$ref": "#/components/schemas/RouteConfig" + } + }, + "switch": { + "description": "Switch the port belongs to.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchLocation" + } + ] + }, + "uplink_port_fec": { + "description": "Port forward error correction type.", + "allOf": [ + { + "$ref": "#/components/schemas/PortFec" + } + ] + }, + "uplink_port_speed": { + "description": "Port speed.", + "allOf": [ + { + "$ref": "#/components/schemas/PortSpeed" + } + ] + } + }, + "required": [ + "addresses", + "bgp_peers", + "port", + "routes", + "switch", + "uplink_port_fec", + "uplink_port_speed" + ] + }, "PortFec": { "description": "Switchport FEC options", "type": "string", @@ -4303,6 +4437,13 @@ "description": "Initial network configuration", "type": "object", "properties": { + "bgp": { + "description": "BGP configurations for connecting the rack to external networks", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpConfig" + } + }, "infra_ip_first": { "description": "First ip address to be used for configuring network infrastructure", "type": "string", @@ -4313,18 +4454,19 @@ "type": "string", "format": "ipv4" }, - "uplinks": { + "ports": { "description": "Uplinks for connecting the rack to external networks", "type": "array", "items": { - "$ref": "#/components/schemas/UplinkConfig" + "$ref": "#/components/schemas/PortConfigV1" } } }, "required": [ + "bgp", "infra_ip_first", "infra_ip_last", - "uplinks" + "ports" ] }, "RecoverySiloConfig": { @@ -4346,6 +4488,28 @@ "user_password_hash" ] }, + "RouteConfig": { + "type": "object", + "properties": { + "destination": { + "description": "The destination of the route.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNetwork" + } + ] + }, + "nexthop": { + "description": "The nexthop/gateway address.", + "type": "string", + "format": "ip" + } + }, + "required": [ + "destination", + "nexthop" + ] + }, "Saga": { "description": "Sagas\n\nThese are currently only intended for observability by developers. We will eventually want to flesh this out into something more observable for end users.", "type": "object", @@ -4822,6 +4986,20 @@ "required": [ "type" ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "mgd" + ] + } + }, + "required": [ + "type" + ] } ] }, @@ -5090,67 +5268,6 @@ "SwitchPutResponse": { "type": "object" }, - "UplinkConfig": { - "type": "object", - "properties": { - "gateway_ip": { - "description": "Gateway address", - "type": "string", - "format": "ipv4" - }, - "switch": { - "description": "Switch to use for uplink", - "allOf": [ - { - "$ref": "#/components/schemas/SwitchLocation" - } - ] - }, - "uplink_cidr": { - "description": "IP Address and prefix (e.g., `192.168.0.1/16`) to apply to switchport (must be in infra_ip pool)", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv4Network" - } - ] - }, - "uplink_port": { - "description": "Switchport to use for external connectivity", - "type": "string" - }, - "uplink_port_fec": { - "description": "Forward Error Correction setting for the uplink port", - "allOf": [ - { - "$ref": "#/components/schemas/PortFec" - } - ] - }, - "uplink_port_speed": { - "description": "Speed for the Switchport", - "allOf": [ - { - "$ref": "#/components/schemas/PortSpeed" - } - ] - }, - "uplink_vid": { - "nullable": true, - "description": "VLAN id to use for uplink", - "type": "integer", - "format": "uint16", - "minimum": 0 - } - }, - "required": [ - "gateway_ip", - "switch", - "uplink_cidr", - "uplink_port", - "uplink_port_fec", - "uplink_port_speed" - ] - }, "UserId": { "title": "A name unique within the parent collection", "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", diff --git a/openapi/nexus.json b/openapi/nexus.json index 9dda94f283..009a168164 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -5165,13 +5165,13 @@ } } }, - "/v1/system/networking/loopback-address": { + "/v1/system/networking/bgp": { "get": { "tags": [ "system/networking" ], - "summary": "Get loopback addresses, optionally filtering by id", - "operationId": "networking_loopback_address_list", + "summary": "Get BGP configurations.", + "operationId": "networking_bgp_config_list", "parameters": [ { "in": "query", @@ -5184,6 +5184,14 @@ "minimum": 1 } }, + { + "in": "query", + "name": "name_or_id", + "description": "A name or id to use when selecting BGP config.", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, { "in": "query", "name": "page_token", @@ -5197,7 +5205,7 @@ "in": "query", "name": "sort_by", "schema": { - "$ref": "#/components/schemas/IdSortMode" + "$ref": "#/components/schemas/NameOrIdSortMode" } } ], @@ -5207,7 +5215,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/LoopbackAddressResultsPage" + "$ref": "#/components/schemas/BgpConfigResultsPage" } } } @@ -5227,13 +5235,13 @@ "tags": [ "system/networking" ], - "summary": "Create a loopback address", - "operationId": "networking_loopback_address_create", + "summary": "Create a new BGP configuration.", + "operationId": "networking_bgp_config_create", "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/LoopbackAddressCreate" + "$ref": "#/components/schemas/BgpConfigCreate" } } }, @@ -5245,7 +5253,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/LoopbackAddress" + "$ref": "#/components/schemas/BgpConfig" } } } @@ -5257,60 +5265,27 @@ "$ref": "#/components/responses/Error" } } - } - }, - "/v1/system/networking/loopback-address/{rack_id}/{switch_location}/{address}/{subnet_mask}": { + }, "delete": { "tags": [ "system/networking" ], - "summary": "Delete a loopback address", - "operationId": "networking_loopback_address_delete", + "summary": "Delete a BGP configuration.", + "operationId": "networking_bgp_config_delete", "parameters": [ { - "in": "path", - "name": "address", - "description": "The IP address and subnet mask to use when selecting the loopback address.", - "required": true, - "schema": { - "type": "string", - "format": "ip" - } - }, - { - "in": "path", - "name": "rack_id", - "description": "The rack to use when selecting the loopback address.", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - }, - { - "in": "path", - "name": "subnet_mask", - "description": "The IP address and subnet mask to use when selecting the loopback address.", - "required": true, - "schema": { - "type": "integer", - "format": "uint8", - "minimum": 0 - } - }, - { - "in": "path", - "name": "switch_location", - "description": "The switch location to use when selecting the loopback address.", + "in": "query", + "name": "name_or_id", + "description": "A name or id to use when selecting BGP config.", "required": true, "schema": { - "$ref": "#/components/schemas/Name" + "$ref": "#/components/schemas/NameOrId" } } ], "responses": { "204": { - "description": "successful deletion" + "description": "resource updated" }, "4XX": { "$ref": "#/components/responses/Error" @@ -5321,48 +5296,22 @@ } } }, - "/v1/system/networking/switch-port-settings": { + "/v1/system/networking/bgp-announce": { "get": { "tags": [ "system/networking" ], - "summary": "List switch port settings", - "operationId": "networking_switch_port_settings_list", + "summary": "Get originated routes for a given BGP configuration.", + "operationId": "networking_bgp_announce_set_list", "parameters": [ { "in": "query", - "name": "limit", - "description": "Maximum number of items returned by a single call", - "schema": { - "nullable": true, - "type": "integer", - "format": "uint32", - "minimum": 1 - } - }, - { - "in": "query", - "name": "page_token", - "description": "Token returned by previous call to retrieve the subsequent page", - "schema": { - "nullable": true, - "type": "string" - } - }, - { - "in": "query", - "name": "port_settings", - "description": "An optional name or id to use when selecting port settings.", + "name": "name_or_id", + "description": "A name or id to use when selecting BGP port settings", + "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" } - }, - { - "in": "query", - "name": "sort_by", - "schema": { - "$ref": "#/components/schemas/NameOrIdSortMode" - } } ], "responses": { @@ -5371,7 +5320,11 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SwitchPortSettingsResultsPage" + "title": "Array_of_BgpAnnouncement", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpAnnouncement" + } } } } @@ -5382,22 +5335,19 @@ "5XX": { "$ref": "#/components/responses/Error" } - }, - "x-dropshot-pagination": { - "required": [] } }, "post": { "tags": [ "system/networking" ], - "summary": "Create switch port settings", - "operationId": "networking_switch_port_settings_create", + "summary": "Create a new BGP announce set.", + "operationId": "networking_bgp_announce_set_create", "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SwitchPortSettingsCreate" + "$ref": "#/components/schemas/BgpAnnounceSetCreate" } } }, @@ -5409,7 +5359,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SwitchPortSettingsView" + "$ref": "#/components/schemas/BgpAnnounceSet" } } } @@ -5426,13 +5376,14 @@ "tags": [ "system/networking" ], - "summary": "Delete switch port settings", - "operationId": "networking_switch_port_settings_delete", + "summary": "Delete a BGP announce set.", + "operationId": "networking_bgp_announce_set_delete", "parameters": [ { "in": "query", - "name": "port_settings", - "description": "An optional name or id to use when selecting port settings.", + "name": "name_or_id", + "description": "A name or id to use when selecting BGP port settings", + "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -5440,7 +5391,7 @@ ], "responses": { "204": { - "description": "successful deletion" + "description": "resource updated" }, "4XX": { "$ref": "#/components/responses/Error" @@ -5451,21 +5402,23 @@ } } }, - "/v1/system/networking/switch-port-settings/{port}": { + "/v1/system/networking/bgp-routes-ipv4": { "get": { "tags": [ "system/networking" ], - "summary": "Get information about a switch port", - "operationId": "networking_switch_port_settings_view", + "summary": "Get imported IPv4 BGP routes.", + "operationId": "networking_bgp_imported_routes_ipv4", "parameters": [ { - "in": "path", - "name": "port", - "description": "A name or id to use when selecting switch port settings info objects.", + "in": "query", + "name": "asn", + "description": "The ASN to filter on. Required.", "required": true, "schema": { - "$ref": "#/components/schemas/NameOrId" + "type": "integer", + "format": "uint32", + "minimum": 0 } } ], @@ -5475,7 +5428,11 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SwitchPortSettingsView" + "title": "Array_of_BgpImportedRouteIpv4", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpImportedRouteIpv4" + } } } } @@ -5489,55 +5446,24 @@ } } }, - "/v1/system/policy": { + "/v1/system/networking/bgp-status": { "get": { "tags": [ - "policy" - ], - "summary": "Fetch the top-level IAM policy", - "operationId": "system_policy_view", - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/FleetRolePolicy" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - }, - "put": { - "tags": [ - "policy" + "system/networking" ], - "summary": "Update the top-level IAM policy", - "operationId": "system_policy_update", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/FleetRolePolicy" - } - } - }, - "required": true - }, + "summary": "Get BGP peer status", + "operationId": "networking_bgp_status", "responses": { "200": { "description": "successful operation", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/FleetRolePolicy" + "title": "Array_of_BgpPeerStatus", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpPeerStatus" + } } } } @@ -5551,13 +5477,13 @@ } } }, - "/v1/system/roles": { + "/v1/system/networking/loopback-address": { "get": { "tags": [ - "roles" + "system/networking" ], - "summary": "List built-in roles", - "operationId": "role_list", + "summary": "Get loopback addresses, optionally filtering by id", + "operationId": "networking_loopback_address_list", "parameters": [ { "in": "query", @@ -5578,6 +5504,13 @@ "nullable": true, "type": "string" } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } } ], "responses": { @@ -5586,7 +5519,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/RoleResultsPage" + "$ref": "#/components/schemas/LoopbackAddressResultsPage" } } } @@ -5601,33 +5534,30 @@ "x-dropshot-pagination": { "required": [] } - } - }, - "/v1/system/roles/{role_name}": { - "get": { + }, + "post": { "tags": [ - "roles" + "system/networking" ], - "summary": "Fetch a built-in role", - "operationId": "role_view", - "parameters": [ - { - "in": "path", - "name": "role_name", - "description": "The built-in role's unique name.", - "required": true, - "schema": { - "type": "string" + "summary": "Create a loopback address", + "operationId": "networking_loopback_address_create", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LoopbackAddressCreate" + } } - } - ], + }, + "required": true + }, "responses": { - "200": { - "description": "successful operation", + "201": { + "description": "successful creation", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Role" + "$ref": "#/components/schemas/LoopbackAddress" } } } @@ -5641,38 +5571,107 @@ } } }, - "/v1/system/silos": { - "get": { + "/v1/system/networking/loopback-address/{rack_id}/{switch_location}/{address}/{subnet_mask}": { + "delete": { "tags": [ - "system/silos" + "system/networking" ], - "summary": "List silos", - "description": "Lists silos that are discoverable based on the current permissions.", - "operationId": "silo_list", + "summary": "Delete a loopback address", + "operationId": "networking_loopback_address_delete", "parameters": [ { - "in": "query", - "name": "limit", - "description": "Maximum number of items returned by a single call", + "in": "path", + "name": "address", + "description": "The IP address and subnet mask to use when selecting the loopback address.", + "required": true, "schema": { - "nullable": true, - "type": "integer", - "format": "uint32", - "minimum": 1 + "type": "string", + "format": "ip" } }, { - "in": "query", - "name": "page_token", - "description": "Token returned by previous call to retrieve the subsequent page", + "in": "path", + "name": "rack_id", + "description": "The rack to use when selecting the loopback address.", + "required": true, "schema": { - "nullable": true, - "type": "string" + "type": "string", + "format": "uuid" } }, { - "in": "query", - "name": "sort_by", + "in": "path", + "name": "subnet_mask", + "description": "The IP address and subnet mask to use when selecting the loopback address.", + "required": true, + "schema": { + "type": "integer", + "format": "uint8", + "minimum": 0 + } + }, + { + "in": "path", + "name": "switch_location", + "description": "The switch location to use when selecting the loopback address.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/switch-port-settings": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "List switch port settings", + "operationId": "networking_switch_port_settings_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "port_settings", + "description": "An optional name or id to use when selecting port settings.", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", "schema": { "$ref": "#/components/schemas/NameOrIdSortMode" } @@ -5684,7 +5683,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SiloResultsPage" + "$ref": "#/components/schemas/SwitchPortSettingsResultsPage" } } } @@ -5702,15 +5701,15 @@ }, "post": { "tags": [ - "system/silos" + "system/networking" ], - "summary": "Create a silo", - "operationId": "silo_create", + "summary": "Create switch port settings", + "operationId": "networking_switch_port_settings_create", "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SiloCreate" + "$ref": "#/components/schemas/SwitchPortSettingsCreate" } } }, @@ -5722,7 +5721,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Silo" + "$ref": "#/components/schemas/SwitchPortSettingsView" } } } @@ -5734,37 +5733,26 @@ "$ref": "#/components/responses/Error" } } - } - }, - "/v1/system/silos/{silo}": { - "get": { + }, + "delete": { "tags": [ - "system/silos" + "system/networking" ], - "summary": "Fetch a silo", - "description": "Fetch a silo by name.", - "operationId": "silo_view", + "summary": "Delete switch port settings", + "operationId": "networking_switch_port_settings_delete", "parameters": [ { - "in": "path", - "name": "silo", - "description": "Name or ID of the silo", - "required": true, + "in": "query", + "name": "port_settings", + "description": "An optional name or id to use when selecting port settings.", "schema": { "$ref": "#/components/schemas/NameOrId" } } ], "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Silo" - } - } - } + "204": { + "description": "successful deletion" }, "4XX": { "$ref": "#/components/responses/Error" @@ -5773,19 +5761,20 @@ "$ref": "#/components/responses/Error" } } - }, - "delete": { + } + }, + "/v1/system/networking/switch-port-settings/{port}": { + "get": { "tags": [ - "system/silos" + "system/networking" ], - "summary": "Delete a silo", - "description": "Delete a silo by name.", - "operationId": "silo_delete", + "summary": "Get information about a switch port", + "operationId": "networking_switch_port_settings_view", "parameters": [ { "in": "path", - "name": "silo", - "description": "Name or ID of the silo", + "name": "port", + "description": "A name or id to use when selecting switch port settings info objects.", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -5793,8 +5782,15 @@ } ], "responses": { - "204": { - "description": "successful deletion" + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SwitchPortSettingsView" + } + } + } }, "4XX": { "$ref": "#/components/responses/Error" @@ -5805,31 +5801,20 @@ } } }, - "/v1/system/silos/{silo}/policy": { + "/v1/system/policy": { "get": { "tags": [ - "system/silos" - ], - "summary": "Fetch a silo's IAM policy", - "operationId": "silo_policy_view", - "parameters": [ - { - "in": "path", - "name": "silo", - "description": "Name or ID of the silo", - "required": true, - "schema": { - "$ref": "#/components/schemas/NameOrId" - } - } + "policy" ], + "summary": "Fetch the top-level IAM policy", + "operationId": "system_policy_view", "responses": { "200": { "description": "successful operation", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SiloRolePolicy" + "$ref": "#/components/schemas/FleetRolePolicy" } } } @@ -5844,26 +5829,15 @@ }, "put": { "tags": [ - "system/silos" - ], - "summary": "Update a silo's IAM policy", - "operationId": "silo_policy_update", - "parameters": [ - { - "in": "path", - "name": "silo", - "description": "Name or ID of the silo", - "required": true, - "schema": { - "$ref": "#/components/schemas/NameOrId" - } - } + "policy" ], + "summary": "Update the top-level IAM policy", + "operationId": "system_policy_update", "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SiloRolePolicy" + "$ref": "#/components/schemas/FleetRolePolicy" } } }, @@ -5875,7 +5849,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SiloRolePolicy" + "$ref": "#/components/schemas/FleetRolePolicy" } } } @@ -5889,13 +5863,13 @@ } } }, - "/v1/system/users": { + "/v1/system/roles": { "get": { "tags": [ - "system/silos" + "roles" ], - "summary": "List built-in (system) users in a silo", - "operationId": "silo_user_list", + "summary": "List built-in roles", + "operationId": "role_list", "parameters": [ { "in": "query", @@ -5916,21 +5890,6 @@ "nullable": true, "type": "string" } - }, - { - "in": "query", - "name": "silo", - "description": "Name or ID of the silo", - "schema": { - "$ref": "#/components/schemas/NameOrId" - } - }, - { - "in": "query", - "name": "sort_by", - "schema": { - "$ref": "#/components/schemas/IdSortMode" - } } ], "responses": { @@ -5939,7 +5898,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/UserResultsPage" + "$ref": "#/components/schemas/RoleResultsPage" } } } @@ -5952,37 +5911,25 @@ } }, "x-dropshot-pagination": { - "required": [ - "silo" - ] + "required": [] } } }, - "/v1/system/users/{user_id}": { + "/v1/system/roles/{role_name}": { "get": { "tags": [ - "system/silos" + "roles" ], - "summary": "Fetch a built-in (system) user", - "operationId": "silo_user_view", + "summary": "Fetch a built-in role", + "operationId": "role_view", "parameters": [ { "in": "path", - "name": "user_id", - "description": "The user's internal id", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - }, - { - "in": "query", - "name": "silo", - "description": "Name or ID of the silo", + "name": "role_name", + "description": "The built-in role's unique name.", "required": true, "schema": { - "$ref": "#/components/schemas/NameOrId" + "type": "string" } } ], @@ -5992,7 +5939,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/User" + "$ref": "#/components/schemas/Role" } } } @@ -6006,13 +5953,14 @@ } } }, - "/v1/system/users-builtin": { + "/v1/system/silos": { "get": { "tags": [ "system/silos" ], - "summary": "List built-in users", - "operationId": "user_builtin_list", + "summary": "List silos", + "description": "Lists silos that are discoverable based on the current permissions.", + "operationId": "silo_list", "parameters": [ { "in": "query", @@ -6038,7 +5986,7 @@ "in": "query", "name": "sort_by", "schema": { - "$ref": "#/components/schemas/NameSortMode" + "$ref": "#/components/schemas/NameOrIdSortMode" } } ], @@ -6048,7 +5996,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/UserBuiltinResultsPage" + "$ref": "#/components/schemas/SiloResultsPage" } } } @@ -6063,32 +6011,30 @@ "x-dropshot-pagination": { "required": [] } - } - }, - "/v1/system/users-builtin/{user}": { - "get": { - "tags": [ + }, + "post": { + "tags": [ "system/silos" ], - "summary": "Fetch a built-in user", - "operationId": "user_builtin_view", - "parameters": [ - { - "in": "path", - "name": "user", - "required": true, - "schema": { - "$ref": "#/components/schemas/NameOrId" + "summary": "Create a silo", + "operationId": "silo_create", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloCreate" + } } - } - ], + }, + "required": true + }, "responses": { - "200": { - "description": "successful operation", + "201": { + "description": "successful creation", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/UserBuiltin" + "$ref": "#/components/schemas/Silo" } } } @@ -6102,48 +6048,22 @@ } } }, - "/v1/users": { + "/v1/system/silos/{silo}": { "get": { "tags": [ - "silos" + "system/silos" ], - "summary": "List users", - "operationId": "user_list", + "summary": "Fetch a silo", + "description": "Fetch a silo by name.", + "operationId": "silo_view", "parameters": [ { - "in": "query", - "name": "group", - "schema": { - "nullable": true, - "type": "string", - "format": "uuid" - } - }, - { - "in": "query", - "name": "limit", - "description": "Maximum number of items returned by a single call", - "schema": { - "nullable": true, - "type": "integer", - "format": "uint32", - "minimum": 1 - } - }, - { - "in": "query", - "name": "page_token", - "description": "Token returned by previous call to retrieve the subsequent page", - "schema": { - "nullable": true, - "type": "string" - } - }, - { - "in": "query", - "name": "sort_by", + "in": "path", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, "schema": { - "$ref": "#/components/schemas/IdSortMode" + "$ref": "#/components/schemas/NameOrId" } } ], @@ -6153,7 +6073,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/UserResultsPage" + "$ref": "#/components/schemas/Silo" } } } @@ -6164,32 +6084,51 @@ "5XX": { "$ref": "#/components/responses/Error" } - }, - "x-dropshot-pagination": { - "required": [] } - } - }, - "/v1/vpc-firewall-rules": { - "get": { + }, + "delete": { "tags": [ - "vpcs" + "system/silos" ], - "summary": "List firewall rules", - "operationId": "vpc_firewall_rules_view", + "summary": "Delete a silo", + "description": "Delete a silo by name.", + "operationId": "silo_delete", "parameters": [ { - "in": "query", - "name": "project", - "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "in": "path", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/silos/{silo}/policy": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "Fetch a silo's IAM policy", + "operationId": "silo_policy_view", + "parameters": [ { - "in": "query", - "name": "vpc", - "description": "Name or ID of the VPC", + "in": "path", + "name": "silo", + "description": "Name or ID of the silo", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -6202,7 +6141,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcFirewallRules" + "$ref": "#/components/schemas/SiloRolePolicy" } } } @@ -6217,23 +6156,15 @@ }, "put": { "tags": [ - "vpcs" + "system/silos" ], - "summary": "Replace firewall rules", - "operationId": "vpc_firewall_rules_update", + "summary": "Update a silo's IAM policy", + "operationId": "silo_policy_update", "parameters": [ { - "in": "query", - "name": "project", - "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", - "schema": { - "$ref": "#/components/schemas/NameOrId" - } - }, - { - "in": "query", - "name": "vpc", - "description": "Name or ID of the VPC", + "in": "path", + "name": "silo", + "description": "Name or ID of the silo", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -6244,7 +6175,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcFirewallRuleUpdateParams" + "$ref": "#/components/schemas/SiloRolePolicy" } } }, @@ -6256,7 +6187,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcFirewallRules" + "$ref": "#/components/schemas/SiloRolePolicy" } } } @@ -6270,14 +6201,13 @@ } } }, - "/v1/vpc-router-routes": { + "/v1/system/users": { "get": { "tags": [ - "vpcs" + "system/silos" ], - "summary": "List routes", - "description": "List the routes associated with a router in a particular VPC.", - "operationId": "vpc_router_route_list", + "summary": "List built-in (system) users in a silo", + "operationId": "silo_user_list", "parameters": [ { "in": "query", @@ -6301,16 +6231,8 @@ }, { "in": "query", - "name": "project", - "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", - "schema": { - "$ref": "#/components/schemas/NameOrId" - } - }, - { - "in": "query", - "name": "router", - "description": "Name or ID of the router", + "name": "silo", + "description": "Name or ID of the silo", "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -6319,15 +6241,7 @@ "in": "query", "name": "sort_by", "schema": { - "$ref": "#/components/schemas/NameOrIdSortMode" - } - }, - { - "in": "query", - "name": "vpc", - "description": "Name or ID of the VPC, only required if `subnet` is provided as a `Name`", - "schema": { - "$ref": "#/components/schemas/NameOrId" + "$ref": "#/components/schemas/IdSortMode" } } ], @@ -6337,7 +6251,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/RouterRouteResultsPage" + "$ref": "#/components/schemas/UserResultsPage" } } } @@ -6351,60 +6265,46 @@ }, "x-dropshot-pagination": { "required": [ - "router" + "silo" ] } - }, - "post": { + } + }, + "/v1/system/users/{user_id}": { + "get": { "tags": [ - "vpcs" + "system/silos" ], - "summary": "Create a router", - "operationId": "vpc_router_route_create", + "summary": "Fetch a built-in (system) user", + "operationId": "silo_user_view", "parameters": [ { - "in": "query", - "name": "project", - "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", - "schema": { - "$ref": "#/components/schemas/NameOrId" - } - }, - { - "in": "query", - "name": "router", - "description": "Name or ID of the router", + "in": "path", + "name": "user_id", + "description": "The user's internal id", "required": true, "schema": { - "$ref": "#/components/schemas/NameOrId" + "type": "string", + "format": "uuid" } }, { "in": "query", - "name": "vpc", - "description": "Name or ID of the VPC, only required if `subnet` is provided as a `Name`", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RouterRouteCreate" - } - } - }, - "required": true - }, "responses": { - "201": { - "description": "successful creation", + "200": { + "description": "successful operation", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/RouterRoute" + "$ref": "#/components/schemas/User" } } } @@ -6418,44 +6318,77 @@ } } }, - "/v1/vpc-router-routes/{route}": { + "/v1/system/users-builtin": { "get": { "tags": [ - "vpcs" + "system/silos" ], - "summary": "Fetch a route", - "operationId": "vpc_router_route_view", + "summary": "List built-in users", + "operationId": "user_builtin_list", "parameters": [ { - "in": "path", - "name": "route", - "description": "Name or ID of the route", - "required": true, + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", "schema": { - "$ref": "#/components/schemas/NameOrId" + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 } }, { "in": "query", - "name": "project", - "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", "schema": { - "$ref": "#/components/schemas/NameOrId" + "nullable": true, + "type": "string" } }, { "in": "query", - "name": "router", - "description": "Name or ID of the router", - "required": true, + "name": "sort_by", "schema": { - "$ref": "#/components/schemas/NameOrId" + "$ref": "#/components/schemas/NameSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UserBuiltinResultsPage" + } + } } }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/system/users-builtin/{user}": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "Fetch a built-in user", + "operationId": "user_builtin_view", + "parameters": [ { - "in": "query", - "name": "vpc", - "description": "Name or ID of the VPC, only required if `subnet` is provided as a `Name`", + "in": "path", + "name": "user", + "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -6467,7 +6400,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/RouterRoute" + "$ref": "#/components/schemas/UserBuiltin" } } } @@ -6479,65 +6412,60 @@ "$ref": "#/components/responses/Error" } } - }, - "put": { + } + }, + "/v1/users": { + "get": { "tags": [ - "vpcs" + "silos" ], - "summary": "Update a route", - "operationId": "vpc_router_route_update", + "summary": "List users", + "operationId": "user_list", "parameters": [ { - "in": "path", - "name": "route", - "description": "Name or ID of the route", - "required": true, + "in": "query", + "name": "group", "schema": { - "$ref": "#/components/schemas/NameOrId" + "nullable": true, + "type": "string", + "format": "uuid" } }, { "in": "query", - "name": "project", - "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "name": "limit", + "description": "Maximum number of items returned by a single call", "schema": { - "$ref": "#/components/schemas/NameOrId" + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 } }, { "in": "query", - "name": "router", - "description": "Name or ID of the router", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", "schema": { - "$ref": "#/components/schemas/NameOrId" + "nullable": true, + "type": "string" } }, { "in": "query", - "name": "vpc", - "description": "Name or ID of the VPC, only required if `subnet` is provided as a `Name`", + "name": "sort_by", "schema": { - "$ref": "#/components/schemas/NameOrId" + "$ref": "#/components/schemas/IdSortMode" } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RouterRouteUpdate" - } - } - }, - "required": true - }, "responses": { "200": { "description": "successful operation", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/RouterRoute" + "$ref": "#/components/schemas/UserResultsPage" } } } @@ -6548,36 +6476,68 @@ "5XX": { "$ref": "#/components/responses/Error" } + }, + "x-dropshot-pagination": { + "required": [] } - }, - "delete": { + } + }, + "/v1/vpc-firewall-rules": { + "get": { "tags": [ "vpcs" ], - "summary": "Delete a route", - "operationId": "vpc_router_route_delete", + "summary": "List firewall rules", + "operationId": "vpc_firewall_rules_view", "parameters": [ { - "in": "path", - "name": "route", - "description": "Name or ID of the route", - "required": true, + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", "schema": { "$ref": "#/components/schemas/NameOrId" } }, { "in": "query", - "name": "project", - "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "name": "vpc", + "description": "Name or ID of the VPC", + "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcFirewallRules" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "vpcs" + ], + "summary": "Replace firewall rules", + "operationId": "vpc_firewall_rules_update", + "parameters": [ { "in": "query", - "name": "router", - "description": "Name or ID of the router", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -6585,15 +6545,33 @@ { "in": "query", "name": "vpc", - "description": "Name or ID of the VPC, only required if `subnet` is provided as a `Name`", + "description": "Name or ID of the VPC", + "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" } } ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcFirewallRuleUpdateParams" + } + } + }, + "required": true + }, "responses": { - "204": { - "description": "successful deletion" + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcFirewallRules" + } + } + } }, "4XX": { "$ref": "#/components/responses/Error" @@ -6604,13 +6582,14 @@ } } }, - "/v1/vpc-routers": { + "/v1/vpc-router-routes": { "get": { "tags": [ "vpcs" ], - "summary": "List routers", - "operationId": "vpc_router_list", + "summary": "List routes", + "description": "List the routes associated with a router in a particular VPC.", + "operationId": "vpc_router_route_list", "parameters": [ { "in": "query", @@ -6640,6 +6619,14 @@ "$ref": "#/components/schemas/NameOrId" } }, + { + "in": "query", + "name": "router", + "description": "Name or ID of the router", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, { "in": "query", "name": "sort_by", @@ -6650,7 +6637,7 @@ { "in": "query", "name": "vpc", - "description": "Name or ID of the VPC", + "description": "Name or ID of the VPC, only required if `subnet` is provided as a `Name`", "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -6662,7 +6649,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcRouterResultsPage" + "$ref": "#/components/schemas/RouterRouteResultsPage" } } } @@ -6676,7 +6663,7 @@ }, "x-dropshot-pagination": { "required": [ - "vpc" + "router" ] } }, @@ -6684,8 +6671,8 @@ "tags": [ "vpcs" ], - "summary": "Create a VPC router", - "operationId": "vpc_router_create", + "summary": "Create a router", + "operationId": "vpc_router_route_create", "parameters": [ { "in": "query", @@ -6697,19 +6684,27 @@ }, { "in": "query", - "name": "vpc", - "description": "Name or ID of the VPC", + "name": "router", + "description": "Name or ID of the router", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC, only required if `subnet` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } } ], "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcRouterCreate" + "$ref": "#/components/schemas/RouterRouteCreate" } } }, @@ -6721,7 +6716,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcRouter" + "$ref": "#/components/schemas/RouterRoute" } } } @@ -6735,18 +6730,18 @@ } } }, - "/v1/vpc-routers/{router}": { + "/v1/vpc-router-routes/{route}": { "get": { "tags": [ "vpcs" ], - "summary": "Fetch a router", - "operationId": "vpc_router_view", + "summary": "Fetch a route", + "operationId": "vpc_router_route_view", "parameters": [ { "in": "path", - "name": "router", - "description": "Name or ID of the router", + "name": "route", + "description": "Name or ID of the route", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -6760,10 +6755,19 @@ "$ref": "#/components/schemas/NameOrId" } }, + { + "in": "query", + "name": "router", + "description": "Name or ID of the router", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, { "in": "query", "name": "vpc", - "description": "Name or ID of the VPC", + "description": "Name or ID of the VPC, only required if `subnet` is provided as a `Name`", "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -6775,7 +6779,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcRouter" + "$ref": "#/components/schemas/RouterRoute" } } } @@ -6792,13 +6796,13 @@ "tags": [ "vpcs" ], - "summary": "Update a router", - "operationId": "vpc_router_update", + "summary": "Update a route", + "operationId": "vpc_router_route_update", "parameters": [ { "in": "path", - "name": "router", - "description": "Name or ID of the router", + "name": "route", + "description": "Name or ID of the route", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -6812,10 +6816,18 @@ "$ref": "#/components/schemas/NameOrId" } }, + { + "in": "query", + "name": "router", + "description": "Name or ID of the router", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, { "in": "query", "name": "vpc", - "description": "Name or ID of the VPC", + "description": "Name or ID of the VPC, only required if `subnet` is provided as a `Name`", "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -6825,7 +6837,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcRouterUpdate" + "$ref": "#/components/schemas/RouterRouteUpdate" } } }, @@ -6837,7 +6849,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcRouter" + "$ref": "#/components/schemas/RouterRoute" } } } @@ -6854,13 +6866,13 @@ "tags": [ "vpcs" ], - "summary": "Delete a router", - "operationId": "vpc_router_delete", + "summary": "Delete a route", + "operationId": "vpc_router_route_delete", "parameters": [ { "in": "path", - "name": "router", - "description": "Name or ID of the router", + "name": "route", + "description": "Name or ID of the route", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -6874,10 +6886,18 @@ "$ref": "#/components/schemas/NameOrId" } }, + { + "in": "query", + "name": "router", + "description": "Name or ID of the router", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, { "in": "query", "name": "vpc", - "description": "Name or ID of the VPC", + "description": "Name or ID of the VPC, only required if `subnet` is provided as a `Name`", "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -6896,13 +6916,13 @@ } } }, - "/v1/vpc-subnets": { + "/v1/vpc-routers": { "get": { "tags": [ "vpcs" ], - "summary": "List subnets", - "operationId": "vpc_subnet_list", + "summary": "List routers", + "operationId": "vpc_router_list", "parameters": [ { "in": "query", @@ -6954,7 +6974,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcSubnetResultsPage" + "$ref": "#/components/schemas/VpcRouterResultsPage" } } } @@ -6976,8 +6996,8 @@ "tags": [ "vpcs" ], - "summary": "Create a subnet", - "operationId": "vpc_subnet_create", + "summary": "Create a VPC router", + "operationId": "vpc_router_create", "parameters": [ { "in": "query", @@ -7001,7 +7021,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcSubnetCreate" + "$ref": "#/components/schemas/VpcRouterCreate" } } }, @@ -7013,7 +7033,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcSubnet" + "$ref": "#/components/schemas/VpcRouter" } } } @@ -7027,18 +7047,18 @@ } } }, - "/v1/vpc-subnets/{subnet}": { + "/v1/vpc-routers/{router}": { "get": { "tags": [ "vpcs" ], - "summary": "Fetch a subnet", - "operationId": "vpc_subnet_view", + "summary": "Fetch a router", + "operationId": "vpc_router_view", "parameters": [ { "in": "path", - "name": "subnet", - "description": "Name or ID of the subnet", + "name": "router", + "description": "Name or ID of the router", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -7067,7 +7087,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcSubnet" + "$ref": "#/components/schemas/VpcRouter" } } } @@ -7084,13 +7104,13 @@ "tags": [ "vpcs" ], - "summary": "Update a subnet", - "operationId": "vpc_subnet_update", + "summary": "Update a router", + "operationId": "vpc_router_update", "parameters": [ { "in": "path", - "name": "subnet", - "description": "Name or ID of the subnet", + "name": "router", + "description": "Name or ID of the router", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -7117,7 +7137,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcSubnetUpdate" + "$ref": "#/components/schemas/VpcRouterUpdate" } } }, @@ -7129,7 +7149,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcSubnet" + "$ref": "#/components/schemas/VpcRouter" } } } @@ -7146,13 +7166,13 @@ "tags": [ "vpcs" ], - "summary": "Delete a subnet", - "operationId": "vpc_subnet_delete", + "summary": "Delete a router", + "operationId": "vpc_router_delete", "parameters": [ { "in": "path", - "name": "subnet", - "description": "Name or ID of the subnet", + "name": "router", + "description": "Name or ID of the router", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -7188,23 +7208,14 @@ } } }, - "/v1/vpc-subnets/{subnet}/network-interfaces": { + "/v1/vpc-subnets": { "get": { "tags": [ "vpcs" ], - "summary": "List network interfaces", - "operationId": "vpc_subnet_list_network_interfaces", + "summary": "List subnets", + "operationId": "vpc_subnet_list", "parameters": [ - { - "in": "path", - "name": "subnet", - "description": "Name or ID of the subnet", - "required": true, - "schema": { - "$ref": "#/components/schemas/NameOrId" - } - }, { "in": "query", "name": "limit", @@ -7255,7 +7266,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/InstanceNetworkInterfaceResultsPage" + "$ref": "#/components/schemas/VpcSubnetResultsPage" } } } @@ -7268,89 +7279,30 @@ } }, "x-dropshot-pagination": { - "required": [] + "required": [ + "vpc" + ] } - } - }, - "/v1/vpcs": { - "get": { + }, + "post": { "tags": [ "vpcs" ], - "summary": "List VPCs", - "operationId": "vpc_list", + "summary": "Create a subnet", + "operationId": "vpc_subnet_create", "parameters": [ - { - "in": "query", - "name": "limit", - "description": "Maximum number of items returned by a single call", - "schema": { - "nullable": true, - "type": "integer", - "format": "uint32", - "minimum": 1 - } - }, - { - "in": "query", - "name": "page_token", - "description": "Token returned by previous call to retrieve the subsequent page", - "schema": { - "nullable": true, - "type": "string" - } - }, { "in": "query", "name": "project", - "description": "Name or ID of the project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", "schema": { "$ref": "#/components/schemas/NameOrId" } }, { "in": "query", - "name": "sort_by", - "schema": { - "$ref": "#/components/schemas/NameOrIdSortMode" - } - } - ], - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/VpcResultsPage" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - }, - "x-dropshot-pagination": { - "required": [ - "project" - ] - } - }, - "post": { - "tags": [ - "vpcs" - ], - "summary": "Create a VPC", - "operationId": "vpc_create", - "parameters": [ - { - "in": "query", - "name": "project", - "description": "Name or ID of the project", + "name": "vpc", + "description": "Name or ID of the VPC", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -7361,7 +7313,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcCreate" + "$ref": "#/components/schemas/VpcSubnetCreate" } } }, @@ -7373,7 +7325,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Vpc" + "$ref": "#/components/schemas/VpcSubnet" } } } @@ -7387,18 +7339,18 @@ } } }, - "/v1/vpcs/{vpc}": { + "/v1/vpc-subnets/{subnet}": { "get": { "tags": [ "vpcs" ], - "summary": "Fetch a VPC", - "operationId": "vpc_view", + "summary": "Fetch a subnet", + "operationId": "vpc_subnet_view", "parameters": [ { "in": "path", - "name": "vpc", - "description": "Name or ID of the VPC", + "name": "subnet", + "description": "Name or ID of the subnet", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -7407,7 +7359,15 @@ { "in": "query", "name": "project", - "description": "Name or ID of the project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -7419,7 +7379,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Vpc" + "$ref": "#/components/schemas/VpcSubnet" } } } @@ -7436,13 +7396,13 @@ "tags": [ "vpcs" ], - "summary": "Update a VPC", - "operationId": "vpc_update", + "summary": "Update a subnet", + "operationId": "vpc_subnet_update", "parameters": [ { "in": "path", - "name": "vpc", - "description": "Name or ID of the VPC", + "name": "subnet", + "description": "Name or ID of the subnet", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -7451,7 +7411,15 @@ { "in": "query", "name": "project", - "description": "Name or ID of the project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -7461,7 +7429,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcUpdate" + "$ref": "#/components/schemas/VpcSubnetUpdate" } } }, @@ -7473,7 +7441,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Vpc" + "$ref": "#/components/schemas/VpcSubnet" } } } @@ -7490,13 +7458,13 @@ "tags": [ "vpcs" ], - "summary": "Delete a VPC", - "operationId": "vpc_delete", + "summary": "Delete a subnet", + "operationId": "vpc_subnet_delete", "parameters": [ { "in": "path", - "name": "vpc", - "description": "Name or ID of the VPC", + "name": "subnet", + "description": "Name or ID of the subnet", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -7505,7 +7473,15 @@ { "in": "query", "name": "project", - "description": "Name or ID of the project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -7523,11 +7499,347 @@ } } } - } - }, - "components": { - "responses": { - "Error": { + }, + "/v1/vpc-subnets/{subnet}/network-interfaces": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "List network interfaces", + "operationId": "vpc_subnet_list_network_interfaces", + "parameters": [ + { + "in": "path", + "name": "subnet", + "description": "Name or ID of the subnet", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceNetworkInterfaceResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/vpcs": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "List VPCs", + "operationId": "vpc_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "project" + ] + } + }, + "post": { + "tags": [ + "vpcs" + ], + "summary": "Create a VPC", + "operationId": "vpc_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Vpc" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/vpcs/{vpc}": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "Fetch a VPC", + "operationId": "vpc_view", + "parameters": [ + { + "in": "path", + "name": "vpc", + "description": "Name or ID of the VPC", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Vpc" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "vpcs" + ], + "summary": "Update a VPC", + "operationId": "vpc_update", + "parameters": [ + { + "in": "path", + "name": "vpc", + "description": "Name or ID of the VPC", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Vpc" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "vpcs" + ], + "summary": "Delete a VPC", + "operationId": "vpc_delete", + "parameters": [ + { + "in": "path", + "name": "vpc", + "description": "Name or ID of the VPC", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + } + }, + "components": { + "responses": { + "Error": { "description": "Error", "content": { "application/json": { @@ -7535,56 +7847,416 @@ "$ref": "#/components/schemas/Error" } } - } - } - }, - "schemas": { - "Address": { - "description": "An address tied to an address lot.", + } + } + }, + "schemas": { + "Address": { + "description": "An address tied to an address lot.", + "type": "object", + "properties": { + "address": { + "description": "The address and prefix length of this address.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" + } + ] + }, + "address_lot": { + "description": "The address lot this address is drawn from.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + } + }, + "required": [ + "address", + "address_lot" + ] + }, + "AddressConfig": { + "description": "A set of addresses associated with a port configuration.", + "type": "object", + "properties": { + "addresses": { + "description": "The set of addresses assigned to the port configuration.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Address" + } + } + }, + "required": [ + "addresses" + ] + }, + "AddressLot": { + "description": "Represents an address lot object, containing the id of the lot that can be used in other API calls.", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "kind": { + "description": "Desired use of `AddressLot`", + "allOf": [ + { + "$ref": "#/components/schemas/AddressLotKind" + } + ] + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "kind", + "name", + "time_created", + "time_modified" + ] + }, + "AddressLotBlock": { + "description": "An address lot block is a part of an address lot and contains a range of addresses. The range is inclusive.", + "type": "object", + "properties": { + "first_address": { + "description": "The first address of the block (inclusive).", + "type": "string", + "format": "ip" + }, + "id": { + "description": "The id of the address lot block.", + "type": "string", + "format": "uuid" + }, + "last_address": { + "description": "The last address of the block (inclusive).", + "type": "string", + "format": "ip" + } + }, + "required": [ + "first_address", + "id", + "last_address" + ] + }, + "AddressLotBlockCreate": { + "description": "Parameters for creating an address lot block. Fist and last addresses are inclusive.", + "type": "object", + "properties": { + "first_address": { + "description": "The first address in the lot (inclusive).", + "type": "string", + "format": "ip" + }, + "last_address": { + "description": "The last address in the lot (inclusive).", + "type": "string", + "format": "ip" + } + }, + "required": [ + "first_address", + "last_address" + ] + }, + "AddressLotBlockResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/AddressLotBlock" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "AddressLotCreate": { + "description": "Parameters for creating an address lot.", "type": "object", "properties": { - "address": { - "description": "The address and prefix length of this address.", + "blocks": { + "description": "The blocks to add along with the new address lot.", + "type": "array", + "items": { + "$ref": "#/components/schemas/AddressLotBlockCreate" + } + }, + "description": { + "type": "string" + }, + "kind": { + "description": "The kind of address lot to create.", "allOf": [ { - "$ref": "#/components/schemas/IpNet" + "$ref": "#/components/schemas/AddressLotKind" } ] }, - "address_lot": { - "description": "The address lot this address is drawn from.", + "name": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "blocks", + "description", + "kind", + "name" + ] + }, + "AddressLotCreateResponse": { + "description": "An address lot and associated blocks resulting from creating an address lot.", + "type": "object", + "properties": { + "blocks": { + "description": "The address lot blocks that were created.", + "type": "array", + "items": { + "$ref": "#/components/schemas/AddressLotBlock" + } + }, + "lot": { + "description": "The address lot that was created.", "allOf": [ { - "$ref": "#/components/schemas/NameOrId" + "$ref": "#/components/schemas/AddressLot" } ] } }, "required": [ - "address", - "address_lot" + "blocks", + "lot" ] }, - "AddressConfig": { - "description": "A set of addresses associated with a port configuration.", + "AddressLotKind": { + "description": "The kind associated with an address lot.", + "oneOf": [ + { + "description": "Infrastructure address lots are used for network infrastructure like addresses assigned to rack switches.", + "type": "string", + "enum": [ + "infra" + ] + }, + { + "description": "Pool address lots are used by IP pools.", + "type": "string", + "enum": [ + "pool" + ] + } + ] + }, + "AddressLotResultsPage": { + "description": "A single page of results", "type": "object", "properties": { - "addresses": { - "description": "The set of addresses assigned to the port configuration.", + "items": { + "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/Address" + "$ref": "#/components/schemas/AddressLot" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "Baseboard": { + "description": "Properties that uniquely identify an Oxide hardware component", + "type": "object", + "properties": { + "part": { + "type": "string" + }, + "revision": { + "type": "integer", + "format": "int64" + }, + "serial": { + "type": "string" + } + }, + "required": [ + "part", + "revision", + "serial" + ] + }, + "BgpAnnounceSet": { + "description": "Represents a BGP announce set by id. The id can be used with other API calls to view and manage the announce set.", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "name", + "time_created", + "time_modified" + ] + }, + "BgpAnnounceSetCreate": { + "description": "Parameters for creating a named set of BGP announcements.", + "type": "object", + "properties": { + "announcement": { + "description": "The announcements in this set.", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpAnnouncementCreate" } + }, + "description": { + "type": "string" + }, + "name": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "announcement", + "description", + "name" + ] + }, + "BgpAnnouncement": { + "description": "A BGP announcement tied to an address lot block.", + "type": "object", + "properties": { + "address_lot_block_id": { + "description": "The address block the IP network being announced is drawn from.", + "type": "string", + "format": "uuid" + }, + "announce_set_id": { + "description": "The id of the set this announcement is a part of.", + "type": "string", + "format": "uuid" + }, + "network": { + "description": "The IP network being announced.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" + } + ] + } + }, + "required": [ + "address_lot_block_id", + "announce_set_id", + "network" + ] + }, + "BgpAnnouncementCreate": { + "description": "A BGP announcement tied to a particular address lot block.", + "type": "object", + "properties": { + "address_lot_block": { + "description": "Address lot this announcement is drawn from.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + }, + "network": { + "description": "The network being announced.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" + } + ] } }, "required": [ - "addresses" + "address_lot_block", + "network" ] }, - "AddressLot": { - "description": "Represents an address lot object, containing the id of the lot that can be used in other API calls.", + "BgpConfig": { + "description": "A base BGP configuration.", "type": "object", "properties": { + "asn": { + "description": "The autonomous system number of this BGP configuration.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, "description": { "description": "human-readable free-form text about a resource", "type": "string" @@ -7594,14 +8266,6 @@ "type": "string", "format": "uuid" }, - "kind": { - "description": "Desired use of `AddressLot`", - "allOf": [ - { - "$ref": "#/components/schemas/AddressLotKind" - } - ] - }, "name": { "description": "unique, mutable, user-controlled identifier for each resource", "allOf": [ @@ -7619,64 +8283,55 @@ "description": "timestamp when this resource was last modified", "type": "string", "format": "date-time" + }, + "vrf": { + "nullable": true, + "description": "Optional virtual routing and forwarding identifier for this BGP configuration.", + "type": "string" } }, "required": [ + "asn", "description", "id", - "kind", "name", "time_created", "time_modified" ] }, - "AddressLotBlock": { - "description": "An address lot block is a part of an address lot and contains a range of addresses. The range is inclusive.", + "BgpConfigCreate": { + "description": "Parameters for creating a BGP configuration. This includes and autonomous system number (ASN) and a virtual routing and forwarding (VRF) identifier.", "type": "object", "properties": { - "first_address": { - "description": "The first address of the block (inclusive).", - "type": "string", - "format": "ip" + "asn": { + "description": "The autonomous system number of this BGP configuration.", + "type": "integer", + "format": "uint32", + "minimum": 0 }, - "id": { - "description": "The id of the address lot block.", - "type": "string", - "format": "uuid" + "description": { + "type": "string" }, - "last_address": { - "description": "The last address of the block (inclusive).", - "type": "string", - "format": "ip" - } - }, - "required": [ - "first_address", - "id", - "last_address" - ] - }, - "AddressLotBlockCreate": { - "description": "Parameters for creating an address lot block. Fist and last addresses are inclusive.", - "type": "object", - "properties": { - "first_address": { - "description": "The first address in the lot (inclusive).", - "type": "string", - "format": "ip" + "name": { + "$ref": "#/components/schemas/Name" }, - "last_address": { - "description": "The last address in the lot (inclusive).", - "type": "string", - "format": "ip" + "vrf": { + "nullable": true, + "description": "Optional virtual routing and forwarding identifier for this BGP configuration.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] } }, "required": [ - "first_address", - "last_address" + "asn", + "description", + "name" ] }, - "AddressLotBlockResultsPage": { + "BgpConfigResultsPage": { "description": "A single page of results", "type": "object", "properties": { @@ -7684,7 +8339,7 @@ "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/AddressLotBlock" + "$ref": "#/components/schemas/BgpConfig" } }, "next_page": { @@ -7697,160 +8352,187 @@ "items" ] }, - "AddressLotCreate": { - "description": "Parameters for creating an address lot.", + "BgpImportedRouteIpv4": { + "description": "A route imported from a BGP peer.", "type": "object", "properties": { - "blocks": { - "description": "The blocks to add along with the new address lot.", - "type": "array", - "items": { - "$ref": "#/components/schemas/AddressLotBlockCreate" - } + "id": { + "description": "BGP identifier of the originating router.", + "type": "integer", + "format": "uint32", + "minimum": 0 }, - "description": { - "type": "string" + "nexthop": { + "description": "The nexthop the prefix is reachable through.", + "type": "string", + "format": "ipv4" }, - "kind": { - "description": "The kind of address lot to create.", + "prefix": { + "description": "The destination network prefix.", "allOf": [ { - "$ref": "#/components/schemas/AddressLotKind" + "$ref": "#/components/schemas/Ipv4Net" } ] }, - "name": { - "$ref": "#/components/schemas/Name" + "switch": { + "description": "Switch the route is imported into.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchLocation" + } + ] } }, "required": [ - "blocks", - "description", - "kind", - "name" + "id", + "nexthop", + "prefix", + "switch" ] }, - "AddressLotCreateResponse": { - "description": "An address lot and associated blocks resulting from creating an address lot.", + "BgpPeerConfig": { + "description": "A BGP peer configuration for an interface. Includes the set of announcements that will be advertised to the peer identified by `addr`. The `bgp_config` parameter is a reference to global BGP parameters. The `interface_name` indicates what interface the peer should be contacted on.", "type": "object", "properties": { - "blocks": { - "description": "The address lot blocks that were created.", - "type": "array", - "items": { - "$ref": "#/components/schemas/AddressLotBlock" - } + "addr": { + "description": "The address of the host to peer with.", + "type": "string", + "format": "ip" }, - "lot": { - "description": "The address lot that was created.", + "bgp_announce_set": { + "description": "The set of announcements advertised by the peer.", "allOf": [ { - "$ref": "#/components/schemas/AddressLot" + "$ref": "#/components/schemas/NameOrId" } ] + }, + "bgp_config": { + "description": "The global BGP configuration used for establishing a session with this peer.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + }, + "interface_name": { + "description": "The name of interface to peer on. This is relative to the port configuration this BGP peer configuration is a part of. For example this value could be phy0 to refer to a primary physical interface. Or it could be vlan47 to refer to a VLAN interface.", + "type": "string" } }, "required": [ - "blocks", - "lot" + "addr", + "bgp_announce_set", + "bgp_config", + "interface_name" ] }, - "AddressLotKind": { - "description": "The kind associated with an address lot.", + "BgpPeerState": { + "description": "The current state of a BGP peer.", "oneOf": [ { - "description": "Infrastructure address lots are used for network infrastructure like addresses assigned to rack switches.", + "description": "Initial state. Refuse all incomming BGP connections. No resources allocated to peer.", "type": "string", "enum": [ - "infra" + "idle" ] }, { - "description": "Pool address lots are used by IP pools.", + "description": "Waiting for the TCP connection to be completed.", "type": "string", "enum": [ - "pool" + "connect" ] - } - ] - }, - "AddressLotResultsPage": { - "description": "A single page of results", - "type": "object", - "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/AddressLot" - } }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" - } - }, - "required": [ - "items" - ] - }, - "Baseboard": { - "description": "Properties that uniquely identify an Oxide hardware component", - "type": "object", - "properties": { - "part": { - "type": "string" + { + "description": "Trying to acquire peer by listening for and accepting a TCP connection.", + "type": "string", + "enum": [ + "active" + ] }, - "revision": { - "type": "integer", - "format": "int64" + { + "description": "Waiting for open message from peer.", + "type": "string", + "enum": [ + "open_sent" + ] }, - "serial": { - "type": "string" + { + "description": "Waiting for keepaliave or notification from peer.", + "type": "string", + "enum": [ + "open_confirm" + ] + }, + { + "description": "Synchronizing with peer.", + "type": "string", + "enum": [ + "session_setup" + ] + }, + { + "description": "Session established. Able to exchange update, notification and keepliave messages with peers.", + "type": "string", + "enum": [ + "established" + ] } - }, - "required": [ - "part", - "revision", - "serial" ] }, - "BgpPeerConfig": { - "description": "A BGP peer configuration for an interface. Includes the set of announcements that will be advertised to the peer identified by `addr`. The `bgp_config` parameter is a reference to global BGP parameters. The `interface_name` indicates what interface the peer should be contacted on.", + "BgpPeerStatus": { + "description": "The current status of a BGP peer.", "type": "object", "properties": { "addr": { - "description": "The address of the host to peer with.", + "description": "IP address of the peer.", "type": "string", "format": "ip" }, - "bgp_announce_set": { - "description": "The set of announcements advertised by the peer.", + "local_asn": { + "description": "Local autonomous system number.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "remote_asn": { + "description": "Remote autonomous system number.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "state": { + "description": "State of the peer.", "allOf": [ { - "$ref": "#/components/schemas/NameOrId" + "$ref": "#/components/schemas/BgpPeerState" } ] }, - "bgp_config": { - "description": "The global BGP configuration used for establishing a session with this peer.", + "state_duration_millis": { + "description": "Time of last state change.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "switch": { + "description": "Switch with the peer session.", "allOf": [ { - "$ref": "#/components/schemas/NameOrId" + "$ref": "#/components/schemas/SwitchLocation" } ] - }, - "interface_name": { - "description": "The name of interface to peer on. This is relative to the port configuration this BGP peer configuration is a part of. For example this value could be phy0 to refer to a primary physical interface. Or it could be vlan47 to refer to a VLAN interface.", - "type": "string" } }, "required": [ "addr", - "bgp_announce_set", - "bgp_config", - "interface_name" + "local_asn", + "remote_asn", + "state", + "state_duration_millis", + "switch" ] }, "BinRangedouble": { @@ -13539,6 +14221,25 @@ } ] }, + "SwitchLocation": { + "description": "Identifies switch physical location", + "oneOf": [ + { + "description": "Switch in upper slot", + "type": "string", + "enum": [ + "switch0" + ] + }, + { + "description": "Switch in lower slot", + "type": "string", + "enum": [ + "switch1" + ] + } + ] + }, "SwitchPort": { "description": "A switch port represents a physical external port on a rack switch.", "type": "object", diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index 56437ab283..0dbf1f6fb4 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -289,6 +289,53 @@ } } }, + "/network-bootstore-config": { + "get": { + "operationId": "read_network_bootstore_config", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EarlyNetworkConfig" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "operationId": "write_network_bootstore_config", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EarlyNetworkConfig" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/services": { "put": { "operationId": "services_put", @@ -338,6 +385,32 @@ } } }, + "/switch-ports": { + "post": { + "operationId": "uplink_ensure", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SwitchPorts" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/timesync": { "get": { "operationId": "timesync_get", @@ -863,6 +936,53 @@ } }, "schemas": { + "BgpConfig": { + "type": "object", + "properties": { + "asn": { + "description": "The autonomous system number for the BGP configuration.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "originate": { + "description": "The set of prefixes for the BGP router to originate.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Ipv4Network" + } + } + }, + "required": [ + "asn", + "originate" + ] + }, + "BgpPeerConfig": { + "type": "object", + "properties": { + "addr": { + "description": "Address of the peer.", + "type": "string", + "format": "ipv4" + }, + "asn": { + "description": "Switch port the peer is reachable on.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "port": { + "description": "Switch port the peer is reachable on.", + "type": "string" + } + }, + "required": [ + "addr", + "asn", + "port" + ] + }, "BundleUtilization": { "description": "The portion of a debug dataset used for zone bundles.", "type": "object", @@ -1571,6 +1691,42 @@ "secs" ] }, + "EarlyNetworkConfig": { + "description": "Network configuration required to bring up the control plane\n\nThe fields in this structure are those from [`super::params::RackInitializeRequest`] necessary for use beyond RSS. This is just for the initial rack configuration and cold boot purposes. Updates will come from Nexus in the future.", + "type": "object", + "properties": { + "generation": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "ntp_servers": { + "description": "The external NTP server addresses.", + "type": "array", + "items": { + "type": "string" + } + }, + "rack_network_config": { + "nullable": true, + "description": "A copy of the initial rack network configuration when we are in generation `1`.", + "allOf": [ + { + "$ref": "#/components/schemas/RackNetworkConfig" + } + ] + }, + "rack_subnet": { + "type": "string", + "format": "ipv6" + } + }, + "required": [ + "generation", + "ntp_servers", + "rack_subnet" + ] + }, "Error": { "description": "Error information from a response.", "type": "object", @@ -1637,6 +1793,26 @@ } ] }, + "HostPortConfig": { + "type": "object", + "properties": { + "addrs": { + "description": "IP Address and prefix (e.g., `192.168.0.1/16`) to apply to switchport (must be in infra_ip pool)", + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNetwork" + } + }, + "port": { + "description": "Switchport to use for external connectivity", + "type": "string" + } + }, + "required": [ + "addrs", + "port" + ] + }, "InstanceCpuCount": { "description": "The number of CPUs in an Instance", "type": "integer", @@ -2099,6 +2275,26 @@ } ] }, + "IpNetwork": { + "oneOf": [ + { + "title": "v4", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv4Network" + } + ] + }, + { + "title": "v6", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv6Network" + } + ] + } + ] + }, "Ipv4Net": { "example": "192.168.1.0/24", "title": "An IPv4 subnet", @@ -2106,6 +2302,10 @@ "type": "string", "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$" }, + "Ipv4Network": { + "type": "string", + "pattern": "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\/(3[0-2]|[0-2]?[0-9])$" + }, "Ipv6Net": { "example": "fd12:3456::/64", "title": "An IPv6 subnet", @@ -2113,6 +2313,10 @@ "type": "string", "pattern": "^([fF][dD])[0-9a-fA-F]{2}:(([0-9a-fA-F]{1,4}:){6}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,6}:)([0-9a-fA-F]{1,4})?\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" }, + "Ipv6Network": { + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\")[/](12[0-8]|1[0-1][0-9]|[0-9]?[0-9])$" + }, "KnownArtifactKind": { "description": "Kinds of update artifacts, as used by Nexus to determine what updates are available and by sled-agent to determine how to apply an update when asked.", "type": "string", @@ -2247,6 +2451,93 @@ } ] }, + "PortConfigV1": { + "type": "object", + "properties": { + "addresses": { + "description": "This port's addresses.", + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNetwork" + } + }, + "bgp_peers": { + "description": "BGP peers on this port", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpPeerConfig" + } + }, + "port": { + "description": "Nmae of the port this config applies to.", + "type": "string" + }, + "routes": { + "description": "The set of routes associated with this port.", + "type": "array", + "items": { + "$ref": "#/components/schemas/RouteConfig" + } + }, + "switch": { + "description": "Switch the port belongs to.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchLocation" + } + ] + }, + "uplink_port_fec": { + "description": "Port forward error correction type.", + "allOf": [ + { + "$ref": "#/components/schemas/PortFec" + } + ] + }, + "uplink_port_speed": { + "description": "Port speed.", + "allOf": [ + { + "$ref": "#/components/schemas/PortSpeed" + } + ] + } + }, + "required": [ + "addresses", + "bgp_peers", + "port", + "routes", + "switch", + "uplink_port_fec", + "uplink_port_speed" + ] + }, + "PortFec": { + "description": "Switchport FEC options", + "type": "string", + "enum": [ + "firecode", + "none", + "rs" + ] + }, + "PortSpeed": { + "description": "Switchport Speed options", + "type": "string", + "enum": [ + "speed0_g", + "speed1_g", + "speed10_g", + "speed25_g", + "speed40_g", + "speed50_g", + "speed100_g", + "speed200_g", + "speed400_g" + ] + }, "PriorityDimension": { "description": "A dimension along with bundles can be sorted, to determine priority.", "oneOf": [ @@ -2275,6 +2566,64 @@ "minItems": 2, "maxItems": 2 }, + "RackNetworkConfig": { + "description": "Initial network configuration", + "type": "object", + "properties": { + "bgp": { + "description": "BGP configurations for connecting the rack to external networks", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpConfig" + } + }, + "infra_ip_first": { + "description": "First ip address to be used for configuring network infrastructure", + "type": "string", + "format": "ipv4" + }, + "infra_ip_last": { + "description": "Last ip address to be used for configuring network infrastructure", + "type": "string", + "format": "ipv4" + }, + "ports": { + "description": "Uplinks for connecting the rack to external networks", + "type": "array", + "items": { + "$ref": "#/components/schemas/PortConfigV1" + } + } + }, + "required": [ + "bgp", + "infra_ip_first", + "infra_ip_last", + "ports" + ] + }, + "RouteConfig": { + "type": "object", + "properties": { + "destination": { + "description": "The destination of the route.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNetwork" + } + ] + }, + "nexthop": { + "description": "The nexthop/gateway address.", + "type": "string", + "format": "ip" + } + }, + "required": [ + "destination", + "nexthop" + ] + }, "SemverVersion": { "type": "string", "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" @@ -2789,6 +3138,40 @@ "format": "uint8", "minimum": 0 }, + "SwitchLocation": { + "description": "Identifies switch physical location", + "oneOf": [ + { + "description": "Switch in upper slot", + "type": "string", + "enum": [ + "switch0" + ] + }, + { + "description": "Switch in lower slot", + "type": "string", + "enum": [ + "switch1" + ] + } + ] + }, + "SwitchPorts": { + "description": "A set of switch uplinks.", + "type": "object", + "properties": { + "uplinks": { + "type": "array", + "items": { + "$ref": "#/components/schemas/HostPortConfig" + } + } + }, + "required": [ + "uplinks" + ] + }, "TimeSync": { "type": "object", "properties": { diff --git a/openapi/wicketd.json b/openapi/wicketd.json index d67fc79f7a..1bd73d3fd4 100644 --- a/openapi/wicketd.json +++ b/openapi/wicketd.json @@ -820,6 +820,53 @@ } ] }, + "BgpConfig": { + "type": "object", + "properties": { + "asn": { + "description": "The autonomous system number for the BGP configuration.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "originate": { + "description": "The set of prefixes for the BGP router to originate.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Ipv4Network" + } + } + }, + "required": [ + "asn", + "originate" + ] + }, + "BgpPeerConfig": { + "type": "object", + "properties": { + "addr": { + "description": "Address of the peer.", + "type": "string", + "format": "ipv4" + }, + "asn": { + "description": "Switch port the peer is reachable on.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "port": { + "description": "Switch port the peer is reachable on.", + "type": "string" + } + }, + "required": [ + "addr", + "asn", + "port" + ] + }, "BootstrapSledDescription": { "type": "object", "properties": { @@ -1321,6 +1368,26 @@ "installable" ] }, + "IpNetwork": { + "oneOf": [ + { + "title": "v4", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv4Network" + } + ] + }, + { + "title": "v6", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv6Network" + } + ] + } + ] + }, "IpRange": { "oneOf": [ { @@ -1363,6 +1430,10 @@ "last" ] }, + "Ipv6Network": { + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\")[/](12[0-8]|1[0-1][0-9]|[0-9]?[0-9])$" + }, "Ipv6Range": { "description": "A non-decreasing IPv6 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", "type": "object", @@ -1386,6 +1457,69 @@ "description": "Password hashes must be in PHC (Password Hashing Competition) string format. Passwords must be hashed with Argon2id. Password hashes may be rejected if the parameters appear not to be secure enough.", "type": "string" }, + "PortConfigV1": { + "type": "object", + "properties": { + "addresses": { + "description": "This port's addresses.", + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNetwork" + } + }, + "bgp_peers": { + "description": "BGP peers on this port", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpPeerConfig" + } + }, + "port": { + "description": "Nmae of the port this config applies to.", + "type": "string" + }, + "routes": { + "description": "The set of routes associated with this port.", + "type": "array", + "items": { + "$ref": "#/components/schemas/RouteConfig" + } + }, + "switch": { + "description": "Switch the port belongs to.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchLocation" + } + ] + }, + "uplink_port_fec": { + "description": "Port forward error correction type.", + "allOf": [ + { + "$ref": "#/components/schemas/PortFec" + } + ] + }, + "uplink_port_speed": { + "description": "Port speed.", + "allOf": [ + { + "$ref": "#/components/schemas/PortSpeed" + } + ] + } + }, + "required": [ + "addresses", + "bgp_peers", + "port", + "routes", + "switch", + "uplink_port_fec", + "uplink_port_speed" + ] + }, "PortFec": { "description": "Switchport FEC options", "type": "string", @@ -1976,6 +2110,13 @@ "description": "Initial network configuration", "type": "object", "properties": { + "bgp": { + "description": "BGP configurations for connecting the rack to external networks", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpConfig" + } + }, "infra_ip_first": { "description": "First ip address to be used for configuring network infrastructure", "type": "string", @@ -1986,18 +2127,19 @@ "type": "string", "format": "ipv4" }, - "uplinks": { + "ports": { "description": "Uplinks for connecting the rack to external networks", "type": "array", "items": { - "$ref": "#/components/schemas/UplinkConfig" + "$ref": "#/components/schemas/PortConfigV1" } } }, "required": [ + "bgp", "infra_ip_first", "infra_ip_last", - "uplinks" + "ports" ] }, "RackOperationStatus": { @@ -2314,6 +2456,28 @@ } ] }, + "RouteConfig": { + "type": "object", + "properties": { + "destination": { + "description": "The destination of the route.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNetwork" + } + ] + }, + "nexthop": { + "description": "The nexthop/gateway address.", + "type": "string", + "format": "ip" + } + }, + "required": [ + "destination", + "nexthop" + ] + }, "SemverVersion": { "type": "string", "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" @@ -4439,67 +4603,6 @@ } ] }, - "UplinkConfig": { - "type": "object", - "properties": { - "gateway_ip": { - "description": "Gateway address", - "type": "string", - "format": "ipv4" - }, - "switch": { - "description": "Switch to use for uplink", - "allOf": [ - { - "$ref": "#/components/schemas/SwitchLocation" - } - ] - }, - "uplink_cidr": { - "description": "IP Address and prefix (e.g., `192.168.0.1/16`) to apply to switchport (must be in infra_ip pool)", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv4Network" - } - ] - }, - "uplink_port": { - "description": "Switchport to use for external connectivity", - "type": "string" - }, - "uplink_port_fec": { - "description": "Forward Error Correction setting for the uplink port", - "allOf": [ - { - "$ref": "#/components/schemas/PortFec" - } - ] - }, - "uplink_port_speed": { - "description": "Speed for the Switchport", - "allOf": [ - { - "$ref": "#/components/schemas/PortSpeed" - } - ] - }, - "uplink_vid": { - "nullable": true, - "description": "VLAN id to use for uplink", - "type": "integer", - "format": "uint16", - "minimum": 0 - } - }, - "required": [ - "gateway_ip", - "switch", - "uplink_cidr", - "uplink_port", - "uplink_port_fec", - "uplink_port_speed" - ] - }, "IgnitionCommand": { "description": "Ignition command.", "type": "string", diff --git a/package-manifest.toml b/package-manifest.toml index 7cf235c24a..5fc22d3405 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -412,7 +412,7 @@ source.commit = "901b710b6e5bd05a94a323693c2b971e7e7b240e" source.sha256 = "0f681cdbe7312f66fd3c99fe033b379e49c59fa4ad04d307f68b12514307e976" output.type = "zone" -[package.maghemite] +[package.mg-ddm-gz] service_name = "mg-ddm" # Note: unlike every other package, `maghemite` is not restricted to either the # "standard" or "trampoline" image; it is included in both. @@ -422,10 +422,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "12703675393459e74139f8140e0b3c4c4f129d5d" +source.commit = "7b88dbcc7810f4fe9c82a7459862a7340d7e09ce" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//maghemite.sha256.txt -source.sha256 = "e57fe791ee898d59890c5779fbd4dce598250fb6ed53832024212bcdeec0cc5b" +source.sha256 = "ca7dad9723aae0507546065e77748186d06ebcf29d6217d1d430ccfff0678c2c" output.type = "tarball" [package.mg-ddm] @@ -438,10 +438,25 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "12703675393459e74139f8140e0b3c4c4f129d5d" +source.commit = "7b88dbcc7810f4fe9c82a7459862a7340d7e09ce" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt -source.sha256 = "3aa0d32b1d2b6be7091b9c665657296e924a86a00ca38756e9f45a1e629fd92b" +source.sha256 = "47b7b9b70fa2564ebd9fca4e591e1dfe7a48ef8dd413b678de08bbfc33b27d50" +output.type = "zone" +output.intermediate_only = true + +[package.mgd] +service_name = "mgd" +source.type = "prebuilt" +source.repo = "maghemite" +# Updating the commit hash here currently requires also updating +# `tools/maghemite_openapi_version`. Failing to do so will cause a failure when +# building `ddm-admin-client` (which will instruct you to update +# `tools/maghemite_openapi_version`). +source.commit = "7b88dbcc7810f4fe9c82a7459862a7340d7e09ce" +# The SHA256 digest is automatically posted to: +# https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt +source.sha256 = "82e965486e7acdbc4661258f1bc8d7751155f406f0973d55e61f6506d18984c2" output.type = "zone" output.intermediate_only = true @@ -458,8 +473,8 @@ only_for_targets.image = "standard" # 2. Copy dendrite.tar.gz from dendrite/out to omicron/out source.type = "prebuilt" source.repo = "dendrite" -source.commit = "7712104585266a2898da38c1345210ad26f9e71d" -source.sha256 = "486b0b016c0df06947810b90f3a3dd40423f0ee6f255ed079dc8e5618c9a7281" +source.commit = "c0cbc39b55fac54b95468304c497e00f3d3cf686" +source.sha256 = "3706e0e8230b7f76407ec0acea9020b9efc7d6c78b74c304102fd8e62cac6760" output.type = "zone" output.intermediate_only = true @@ -483,8 +498,8 @@ only_for_targets.image = "standard" # 2. Copy the output zone image from dendrite/out to omicron/out source.type = "prebuilt" source.repo = "dendrite" -source.commit = "7712104585266a2898da38c1345210ad26f9e71d" -source.sha256 = "76ff76d3526323c3fcbe2351cf9fbda4840e0dc11cd0eb6b71a3e0bd36c5e5e8" +source.commit = "c0cbc39b55fac54b95468304c497e00f3d3cf686" +source.sha256 = "f0847927f7d7197d9a5c4267a0bd0af609d18fd8d6d9b80755c370872c5297fa" output.type = "zone" output.intermediate_only = true @@ -501,8 +516,8 @@ only_for_targets.image = "standard" # 2. Copy dendrite.tar.gz from dendrite/out to omicron/out/dendrite-softnpu.tar.gz source.type = "prebuilt" source.repo = "dendrite" -source.commit = "7712104585266a2898da38c1345210ad26f9e71d" -source.sha256 = "b8e5c176070f9bc9ea0028de1999c77d66ea3438913664163975964effe4481b" +source.commit = "c0cbc39b55fac54b95468304c497e00f3d3cf686" +source.sha256 = "33b5897db1fe7b57d282531724ecd7bf74f5156f9aa23f10c6f0d9b54c38a987" output.type = "zone" output.intermediate_only = true @@ -534,6 +549,7 @@ source.packages = [ "wicketd.tar.gz", "wicket.tar.gz", "mg-ddm.tar.gz", + "mgd.tar.gz", "switch_zone_setup.tar.gz", "xcvradm.tar.gz" ] @@ -555,6 +571,7 @@ source.packages = [ "wicketd.tar.gz", "wicket.tar.gz", "mg-ddm.tar.gz", + "mgd.tar.gz", "switch_zone_setup.tar.gz", "sp-sim-stub.tar.gz" ] @@ -576,6 +593,7 @@ source.packages = [ "wicketd.tar.gz", "wicket.tar.gz", "mg-ddm.tar.gz", + "mgd.tar.gz", "switch_zone_setup.tar.gz", "sp-sim-softnpu.tar.gz" ] diff --git a/schema/crdb/8.0.0/up.sql b/schema/crdb/8.0.0/up.sql new file mode 100644 index 0000000000..c617a0b634 --- /dev/null +++ b/schema/crdb/8.0.0/up.sql @@ -0,0 +1 @@ +ALTER TYPE omicron.public.service_kind ADD VALUE IF NOT EXISTS 'mgd'; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 9f5f78326c..d074a0af53 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -198,7 +198,8 @@ CREATE TYPE IF NOT EXISTS omicron.public.service_kind AS ENUM ( 'nexus', 'ntp', 'oximeter', - 'tfport' + 'tfport', + 'mgd' ); CREATE TABLE IF NOT EXISTS omicron.public.service ( @@ -2539,7 +2540,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '7.0.0', NULL) + ( TRUE, NOW(), NOW(), '8.0.0', NULL) ON CONFLICT DO NOTHING; diff --git a/schema/rss-sled-plan.json b/schema/rss-sled-plan.json index 4a8b02d23d..d91f2080c1 100644 --- a/schema/rss-sled-plan.json +++ b/schema/rss-sled-plan.json @@ -91,6 +91,53 @@ } ] }, + "BgpConfig": { + "type": "object", + "required": [ + "asn", + "originate" + ], + "properties": { + "asn": { + "description": "The autonomous system number for the BGP configuration.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "originate": { + "description": "The set of prefixes for the BGP router to originate.", + "type": "array", + "items": { + "$ref": "#/definitions/Ipv4Network" + } + } + } + }, + "BgpPeerConfig": { + "type": "object", + "required": [ + "addr", + "asn", + "port" + ], + "properties": { + "addr": { + "description": "Address of the peer.", + "type": "string", + "format": "ipv4" + }, + "asn": { + "description": "Switch port the peer is reachable on.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "port": { + "description": "Switch port the peer is reachable on.", + "type": "string" + } + } + }, "BootstrapAddressDiscovery": { "oneOf": [ { @@ -149,6 +196,27 @@ } } }, + "IpNetwork": { + "oneOf": [ + { + "title": "v4", + "allOf": [ + { + "$ref": "#/definitions/Ipv4Network" + } + ] + }, + { + "title": "v6", + "allOf": [ + { + "$ref": "#/definitions/Ipv6Network" + } + ] + } + ], + "x-rust-type": "ipnetwork::IpNetwork" + }, "IpRange": { "oneOf": [ { @@ -201,6 +269,11 @@ "type": "string", "pattern": "^([fF][dD])[0-9a-fA-F]{2}:(([0-9a-fA-F]{1,4}:){6}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,6}:)([0-9a-fA-F]{1,4})?\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" }, + "Ipv6Network": { + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\")[/](12[0-8]|1[0-1][0-9]|[0-9]?[0-9])$", + "x-rust-type": "ipnetwork::Ipv6Network" + }, "Ipv6Range": { "description": "A non-decreasing IPv6 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", "type": "object", @@ -244,6 +317,69 @@ "description": "Password hashes must be in PHC (Password Hashing Competition) string format. Passwords must be hashed with Argon2id. Password hashes may be rejected if the parameters appear not to be secure enough.", "type": "string" }, + "PortConfigV1": { + "type": "object", + "required": [ + "addresses", + "bgp_peers", + "port", + "routes", + "switch", + "uplink_port_fec", + "uplink_port_speed" + ], + "properties": { + "addresses": { + "description": "This port's addresses.", + "type": "array", + "items": { + "$ref": "#/definitions/IpNetwork" + } + }, + "bgp_peers": { + "description": "BGP peers on this port", + "type": "array", + "items": { + "$ref": "#/definitions/BgpPeerConfig" + } + }, + "port": { + "description": "Nmae of the port this config applies to.", + "type": "string" + }, + "routes": { + "description": "The set of routes associated with this port.", + "type": "array", + "items": { + "$ref": "#/definitions/RouteConfig" + } + }, + "switch": { + "description": "Switch the port belongs to.", + "allOf": [ + { + "$ref": "#/definitions/SwitchLocation" + } + ] + }, + "uplink_port_fec": { + "description": "Port forward error correction type.", + "allOf": [ + { + "$ref": "#/definitions/PortFec" + } + ] + }, + "uplink_port_speed": { + "description": "Port speed.", + "allOf": [ + { + "$ref": "#/definitions/PortSpeed" + } + ] + } + } + }, "PortFec": { "description": "Switchport FEC options", "type": "string", @@ -371,11 +507,19 @@ "description": "Initial network configuration", "type": "object", "required": [ + "bgp", "infra_ip_first", "infra_ip_last", - "uplinks" + "ports" ], "properties": { + "bgp": { + "description": "BGP configurations for connecting the rack to external networks", + "type": "array", + "items": { + "$ref": "#/definitions/BgpConfig" + } + }, "infra_ip_first": { "description": "First ip address to be used for configuring network infrastructure", "type": "string", @@ -386,11 +530,11 @@ "type": "string", "format": "ipv4" }, - "uplinks": { + "ports": { "description": "Uplinks for connecting the rack to external networks", "type": "array", "items": { - "$ref": "#/definitions/UplinkConfig" + "$ref": "#/definitions/PortConfigV1" } } } @@ -414,6 +558,28 @@ } } }, + "RouteConfig": { + "type": "object", + "required": [ + "destination", + "nexthop" + ], + "properties": { + "destination": { + "description": "The destination of the route.", + "allOf": [ + { + "$ref": "#/definitions/IpNetwork" + } + ] + }, + "nexthop": { + "description": "The nexthop/gateway address.", + "type": "string", + "format": "ip" + } + } + }, "StartSledAgentRequest": { "description": "Configuration information for launching a Sled Agent.", "type": "object", @@ -484,69 +650,6 @@ } ] }, - "UplinkConfig": { - "type": "object", - "required": [ - "gateway_ip", - "switch", - "uplink_cidr", - "uplink_port", - "uplink_port_fec", - "uplink_port_speed" - ], - "properties": { - "gateway_ip": { - "description": "Gateway address", - "type": "string", - "format": "ipv4" - }, - "switch": { - "description": "Switch to use for uplink", - "allOf": [ - { - "$ref": "#/definitions/SwitchLocation" - } - ] - }, - "uplink_cidr": { - "description": "IP Address and prefix (e.g., `192.168.0.1/16`) to apply to switchport (must be in infra_ip pool)", - "allOf": [ - { - "$ref": "#/definitions/Ipv4Network" - } - ] - }, - "uplink_port": { - "description": "Switchport to use for external connectivity", - "type": "string" - }, - "uplink_port_fec": { - "description": "Forward Error Correction setting for the uplink port", - "allOf": [ - { - "$ref": "#/definitions/PortFec" - } - ] - }, - "uplink_port_speed": { - "description": "Speed for the Switchport", - "allOf": [ - { - "$ref": "#/definitions/PortSpeed" - } - ] - }, - "uplink_vid": { - "description": "VLAN id to use for uplink", - "type": [ - "integer", - "null" - ], - "format": "uint16", - "minimum": 0.0 - } - } - }, "UserId": { "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", "type": "string" diff --git a/sled-agent/src/bootstrap/early_networking.rs b/sled-agent/src/bootstrap/early_networking.rs index 61d4c84af3..7bb2506dc3 100644 --- a/sled-agent/src/bootstrap/early_networking.rs +++ b/sled-agent/src/bootstrap/early_networking.rs @@ -7,25 +7,26 @@ use anyhow::{anyhow, Context}; use bootstore::schemes::v0 as bootstore; use ddm_admin_client::{Client as DdmAdminClient, DdmError}; -use dpd_client::types::Ipv6Entry; +use dpd_client::types::{Ipv6Entry, RouteSettingsV6}; use dpd_client::types::{ LinkCreate, LinkId, LinkSettings, PortId, PortSettings, RouteSettingsV4, }; use dpd_client::Client as DpdClient; -use dpd_client::Ipv4Cidr; use futures::future; use gateway_client::Client as MgsClient; use internal_dns::resolver::{ResolveError, Resolver as DnsResolver}; use internal_dns::ServiceName; +use ipnetwork::IpNetwork; use omicron_common::address::{Ipv6Subnet, AZ_PREFIX, MGS_PORT}; use omicron_common::address::{DDMD_PORT, DENDRITE_PORT}; use omicron_common::api::internal::shared::{ - PortFec, PortSpeed, RackNetworkConfig, SwitchLocation, UplinkConfig, + PortConfigV1, PortFec, PortSpeed, RackNetworkConfig, SwitchLocation, }; use omicron_common::backoff::{ retry_notify, retry_policy_local, BackoffError, ExponentialBackoff, ExponentialBackoffBuilder, }; +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use slog::Logger; use std::collections::{HashMap, HashSet}; @@ -107,11 +108,11 @@ impl<'a> EarlyNetworkSetup<'a> { resolver: &DnsResolver, config: &RackNetworkConfig, ) -> HashSet { - // Which switches have uplinks? + // Which switches have configured ports? let uplinked_switches = config - .uplinks + .ports .iter() - .map(|uplink_config| uplink_config.switch) + .map(|port_config| port_config.switch) .collect::>(); // If we have no uplinks, we have nothing to look up. @@ -342,7 +343,7 @@ impl<'a> EarlyNetworkSetup<'a> { &mut self, rack_network_config: &RackNetworkConfig, switch_zone_underlay_ip: Ipv6Addr, - ) -> Result, EarlyNetworkSetupError> { + ) -> Result, EarlyNetworkSetupError> { // First, we have to know which switch we are: ask MGS. info!( self.log, @@ -385,10 +386,10 @@ impl<'a> EarlyNetworkSetup<'a> { }; // We now know which switch we are: filter the uplinks to just ours. - let our_uplinks = rack_network_config - .uplinks + let our_ports = rack_network_config + .ports .iter() - .filter(|uplink| uplink.switch == switch_location) + .filter(|port| port.switch == switch_location) .cloned() .collect::>(); @@ -396,7 +397,7 @@ impl<'a> EarlyNetworkSetup<'a> { self.log, "Initializing {} Uplinks on {switch_location:?} at \ {switch_zone_underlay_ip}", - our_uplinks.len(), + our_ports.len(), ); let dpd = DpdClient::new( &format!("http://[{}]:{}", switch_zone_underlay_ip, DENDRITE_PORT), @@ -408,9 +409,9 @@ impl<'a> EarlyNetworkSetup<'a> { // configure uplink for each requested uplink in configuration that // matches our switch_location - for uplink_config in &our_uplinks { + for port_config in &our_ports { let (ipv6_entry, dpd_port_settings, port_id) = - self.build_uplink_config(uplink_config)?; + self.build_port_config(port_config)?; self.wait_for_dendrite(&dpd).await; @@ -446,14 +447,14 @@ impl<'a> EarlyNetworkSetup<'a> { ddmd_client.advertise_prefix(Ipv6Subnet::new(ipv6_entry.addr)); } - Ok(our_uplinks) + Ok(our_ports) } - fn build_uplink_config( + fn build_port_config( &self, - uplink_config: &UplinkConfig, + port_config: &PortConfigV1, ) -> Result<(Ipv6Entry, PortSettings, PortId), EarlyNetworkSetupError> { - info!(self.log, "Building Uplink Configuration"); + info!(self.log, "Building Port Configuration"); let ipv6_entry = Ipv6Entry { addr: BOUNDARY_SERVICES_ADDR.parse().map_err(|e| { EarlyNetworkSetupError::BadConfig(format!( @@ -469,41 +470,57 @@ impl<'a> EarlyNetworkSetup<'a> { v6_routes: HashMap::new(), }; let link_id = LinkId(0); + + let mut addrs = Vec::new(); + for a in &port_config.addresses { + addrs.push(a.ip()); + } + // TODO We're discarding the `uplink_cidr.prefix()` here and only using // the IP address; at some point we probably need to give the full CIDR // to dendrite? - let addr = IpAddr::V4(uplink_config.uplink_cidr.ip()); let link_settings = LinkSettings { // TODO Allow user to configure link properties // https://github.com/oxidecomputer/omicron/issues/3061 params: LinkCreate { autoneg: false, kr: false, - fec: convert_fec(&uplink_config.uplink_port_fec), - speed: convert_speed(&uplink_config.uplink_port_speed), + fec: convert_fec(&port_config.uplink_port_fec), + speed: convert_speed(&port_config.uplink_port_speed), }, - addrs: vec![addr], + //addrs: vec![addr], + addrs, }; dpd_port_settings.links.insert(link_id.to_string(), link_settings); - let port_id: PortId = - uplink_config.uplink_port.parse().map_err(|e| { - EarlyNetworkSetupError::BadConfig(format!( - concat!( - "could not use value provided to", - "rack_network_config.uplink_port as PortID: {}" - ), - e - )) - })?; - dpd_port_settings.v4_routes.insert( - Ipv4Cidr { prefix: "0.0.0.0".parse().unwrap(), prefix_len: 0 } - .to_string(), - RouteSettingsV4 { - link_id: link_id.0, - vid: uplink_config.uplink_vid, - nexthop: uplink_config.gateway_ip, - }, - ); + let port_id: PortId = port_config.port.parse().map_err(|e| { + EarlyNetworkSetupError::BadConfig(format!( + concat!( + "could not use value provided to", + "rack_network_config.uplink_port as PortID: {}" + ), + e + )) + })?; + + for r in &port_config.routes { + if let (IpNetwork::V4(dst), IpAddr::V4(nexthop)) = + (r.destination, r.nexthop) + { + dpd_port_settings.v4_routes.insert( + dst.to_string(), + RouteSettingsV4 { link_id: link_id.0, nexthop, vid: None }, + ); + } + if let (IpNetwork::V6(dst), IpAddr::V6(nexthop)) = + (r.destination, r.nexthop) + { + dpd_port_settings.v6_routes.insert( + dst.to_string(), + RouteSettingsV6 { link_id: link_id.0, nexthop, vid: None }, + ); + } + } + Ok((ipv6_entry, dpd_port_settings, port_id)) } @@ -552,9 +569,9 @@ fn retry_policy_switch_mapping() -> ExponentialBackoff { /// [`super::params::RackInitializeRequest`] necessary for use beyond RSS. This /// is just for the initial rack configuration and cold boot purposes. Updates /// will come from Nexus in the future. -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct EarlyNetworkConfig { - // The version of data. Always `1` when created from RSS. + // The version of data. pub generation: u64, pub rack_subnet: Ipv6Addr, diff --git a/sled-agent/src/bootstrap/maghemite.rs b/sled-agent/src/bootstrap/maghemite.rs index 1adc677b23..2cf0eaf190 100644 --- a/sled-agent/src/bootstrap/maghemite.rs +++ b/sled-agent/src/bootstrap/maghemite.rs @@ -8,7 +8,7 @@ use illumos_utils::addrobj::AddrObject; use slog::Logger; use thiserror::Error; -const SERVICE_FMRI: &str = "svc:/system/illumos/mg-ddm"; +const SERVICE_FMRI: &str = "svc:/oxide/mg-ddm"; const MANIFEST_PATH: &str = "/opt/oxide/mg-ddm/pkg/ddm/manifest.xml"; #[derive(Debug, Error)] diff --git a/sled-agent/src/bootstrap/secret_retriever.rs b/sled-agent/src/bootstrap/secret_retriever.rs index 5cae06310c..51d81fd364 100644 --- a/sled-agent/src/bootstrap/secret_retriever.rs +++ b/sled-agent/src/bootstrap/secret_retriever.rs @@ -14,9 +14,9 @@ use std::sync::OnceLock; static MAYBE_LRTQ_RETRIEVER: OnceLock = OnceLock::new(); -/// A [`key-manager::SecretRetriever`] that either uses a -/// [`LocalSecretRetriever`] or [`LrtqSecretRetriever`] under the hood depending -/// upon how many sleds are in the cluster at rack init time. +/// A [`key_manager::SecretRetriever`] that either uses a +/// [`SecretRetriever`] or [`LrtqSecretRetriever`] under the +/// hood depending upon how many sleds are in the cluster at rack init time. pub struct LrtqOrHardcodedSecretRetriever {} impl LrtqOrHardcodedSecretRetriever { diff --git a/sled-agent/src/bootstrap/server.rs b/sled-agent/src/bootstrap/server.rs index 0cbbf0678b..9ed3ad582d 100644 --- a/sled-agent/src/bootstrap/server.rs +++ b/sled-agent/src/bootstrap/server.rs @@ -528,7 +528,7 @@ fn start_dropshot_server( /// /// TODO-correctness Subsequent steps may assume all M.2s that will ever be /// present are present once we return from this function; see -/// https://github.com/oxidecomputer/omicron/issues/3815. +/// . async fn wait_for_boot_m2(storage_resources: &StorageResources, log: &Logger) { // Wait for at least the M.2 we booted from to show up. loop { diff --git a/sled-agent/src/http_entrypoints.rs b/sled-agent/src/http_entrypoints.rs index 2ab8273e39..4fbd8a3ba0 100644 --- a/sled-agent/src/http_entrypoints.rs +++ b/sled-agent/src/http_entrypoints.rs @@ -5,6 +5,7 @@ //! HTTP entrypoint functions for the sled agent's exposed API use super::sled_agent::SledAgent; +use crate::bootstrap::early_networking::EarlyNetworkConfig; use crate::params::{ CleanupContextUpdate, DiskEnsureBody, InstanceEnsureBody, InstancePutMigrationIdsBody, InstancePutStateBody, @@ -14,6 +15,7 @@ use crate::params::{ }; use crate::sled_agent::Error as SledAgentError; use crate::zone_bundle; +use bootstore::schemes::v0::NetworkConfig; use camino::Utf8PathBuf; use dropshot::{ endpoint, ApiDescription, FreeformBody, HttpError, HttpResponseCreated, @@ -24,9 +26,10 @@ use illumos_utils::opte::params::{ DeleteVirtualNetworkInterfaceHost, SetVirtualNetworkInterfaceHost, }; use omicron_common::api::external::Error; -use omicron_common::api::internal::nexus::DiskRuntimeState; -use omicron_common::api::internal::nexus::SledInstanceState; -use omicron_common::api::internal::nexus::UpdateArtifactId; +use omicron_common::api::internal::nexus::{ + DiskRuntimeState, SledInstanceState, UpdateArtifactId, +}; +use omicron_common::api::internal::shared::SwitchPorts; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; @@ -62,6 +65,9 @@ pub fn api() -> SledApiDescription { api.register(update_artifact)?; api.register(vpc_firewall_rules_put)?; api.register(zpools_get)?; + api.register(uplink_ensure)?; + api.register(read_network_bootstore_config)?; + api.register(write_network_bootstore_config)?; Ok(()) } @@ -630,3 +636,73 @@ async fn timesync_get( let sa = rqctx.context(); Ok(HttpResponseOk(sa.timesync_get().await.map_err(|e| Error::from(e))?)) } + +#[endpoint { + method = POST, + path = "/switch-ports", +}] +async fn uplink_ensure( + rqctx: RequestContext, + body: TypedBody, +) -> Result { + let sa = rqctx.context(); + sa.ensure_scrimlet_host_ports(body.into_inner().uplinks).await?; + Ok(HttpResponseUpdatedNoContent()) +} + +#[endpoint { + method = GET, + path = "/network-bootstore-config", +}] +async fn read_network_bootstore_config( + rqctx: RequestContext, +) -> Result, HttpError> { + let sa = rqctx.context(); + let bs = sa.bootstore(); + + let config = bs.get_network_config().await.map_err(|e| { + HttpError::for_internal_error(format!("failed to get bootstore: {e}")) + })?; + + let config = match config { + Some(config) => EarlyNetworkConfig::try_from(config).map_err(|e| { + HttpError::for_internal_error(format!( + "deserialize early network config: {e}" + )) + })?, + None => { + return Err(HttpError::for_unavail( + None, + "early network config does not exist yet".into(), + )); + } + }; + + Ok(HttpResponseOk(config)) +} + +#[endpoint { + method = PUT, + path = "/network-bootstore-config", +}] +async fn write_network_bootstore_config( + rqctx: RequestContext, + body: TypedBody, +) -> Result { + let sa = rqctx.context(); + let bs = sa.bootstore(); + let config = body.into_inner(); + + //TODO is the bootstore checking the generation number, do we need to do + // that here? + + bs.update_network_config(NetworkConfig::from(config)).await.map_err( + |e| { + HttpError::for_internal_error(format!( + "failed to write updated config to boot store: {e}" + )) + }, + )?; + + Ok(HttpResponseUpdatedNoContent()) +} diff --git a/sled-agent/src/params.rs b/sled-agent/src/params.rs index 84ec1ef0dc..09a674758a 100644 --- a/sled-agent/src/params.rs +++ b/sled-agent/src/params.rs @@ -349,10 +349,12 @@ pub enum ServiceType { #[serde(skip)] Uplink, #[serde(skip)] - Maghemite { + MgDdm { mode: String, }, #[serde(skip)] + Mgd, + #[serde(skip)] SpSim, CruciblePantry { address: SocketAddrV6, @@ -402,7 +404,8 @@ impl std::fmt::Display for ServiceType { ServiceType::CruciblePantry { .. } => write!(f, "crucible/pantry"), ServiceType::BoundaryNtp { .. } | ServiceType::InternalNtp { .. } => write!(f, "ntp"), - ServiceType::Maghemite { .. } => write!(f, "mg-ddm"), + ServiceType::MgDdm { .. } => write!(f, "mg-ddm"), + ServiceType::Mgd => write!(f, "mgd"), ServiceType::SpSim => write!(f, "sp-sim"), ServiceType::Clickhouse { .. } => write!(f, "clickhouse"), ServiceType::ClickhouseKeeper { .. } => { @@ -419,13 +422,7 @@ impl crate::smf_helper::Service for ServiceType { self.to_string() } fn smf_name(&self) -> String { - match self { - // NOTE: This style of service-naming is deprecated - ServiceType::Maghemite { .. } => { - format!("svc:/system/illumos/{}", self.service_name()) - } - _ => format!("svc:/oxide/{}", self.service_name()), - } + format!("svc:/oxide/{}", self.service_name()) } fn should_import(&self) -> bool { true @@ -525,7 +522,8 @@ impl TryFrom for sled_agent_client::types::ServiceType { | St::Dendrite { .. } | St::Tfport { .. } | St::Uplink - | St::Maghemite { .. } => Err(AutonomousServiceOnlyError), + | St::Mgd + | St::MgDdm { .. } => Err(AutonomousServiceOnlyError), } } } @@ -824,7 +822,8 @@ impl ServiceZoneRequest { | ServiceType::SpSim | ServiceType::Wicketd { .. } | ServiceType::Dendrite { .. } - | ServiceType::Maghemite { .. } + | ServiceType::MgDdm { .. } + | ServiceType::Mgd | ServiceType::Tfport { .. } | ServiceType::Uplink => { return Err(AutonomousServiceOnlyError); diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index 2183aa7b63..4f864cfbcb 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -19,10 +19,11 @@ use internal_dns::{ServiceName, DNS_ZONE}; use omicron_common::address::{ get_sled_address, get_switch_zone_address, Ipv6Subnet, ReservedRackSubnet, DENDRITE_PORT, DNS_HTTP_PORT, DNS_PORT, DNS_REDUNDANCY, MAX_DNS_REDUNDANCY, - MGS_PORT, NTP_PORT, NUM_SOURCE_NAT_PORTS, RSS_RESERVED_ADDRESSES, + MGD_PORT, MGS_PORT, NTP_PORT, NUM_SOURCE_NAT_PORTS, RSS_RESERVED_ADDRESSES, SLED_PREFIX, }; use omicron_common::api::external::{MacAddr, Vni}; +use omicron_common::api::internal::shared::SwitchLocation; use omicron_common::api::internal::shared::{ NetworkInterface, NetworkInterfaceKind, SourceNatConfig, }; @@ -45,8 +46,17 @@ use uuid::Uuid; // The number of boundary NTP servers to create from RSS. const BOUNDARY_NTP_COUNT: usize = 2; +//XXX CHANGE BACK TO 3 +//XXX CHANGE BACK TO 3 +//XXX CHANGE BACK TO 3 +//XXX CHANGE BACK TO 3 +//XXX CHANGE BACK TO 3 +//XXX CHANGE BACK TO 3 +//XXX CHANGE BACK TO 3 +//XXX CHANGE BACK TO 3 +//XXX CHANGE BACK TO 3 // The number of Nexus instances to create from RSS. -const NEXUS_COUNT: usize = 3; +const NEXUS_COUNT: usize = 1; // The number of CRDB instances to create from RSS. const CRDB_COUNT: usize = 5; @@ -276,7 +286,7 @@ impl Plan { "No scrimlets observed".to_string(), )); } - for sled in scrimlets { + for (i, sled) in scrimlets.iter().enumerate() { let address = get_switch_zone_address(sled.subnet); let zone = dns_builder.host_dendrite(sled.sled_id, address).unwrap(); @@ -294,6 +304,18 @@ impl Plan { MGS_PORT, ) .unwrap(); + dns_builder + .service_backend_zone(ServiceName::Mgd, &zone, MGD_PORT) + .unwrap(); + + // TODO only works for single rack + let sled_address = get_sled_address(sled.subnet); + let switch_location = if i == 0 { + SwitchLocation::Switch0 + } else { + SwitchLocation::Switch1 + }; + dns_builder.host_scrimlet(switch_location, sled_address).unwrap(); } // We'll stripe most services across all available Sleds, round-robin diff --git a/sled-agent/src/rack_setup/service.rs b/sled-agent/src/rack_setup/service.rs index 805c889295..212a554c47 100644 --- a/sled-agent/src/rack_setup/service.rs +++ b/sled-agent/src/rack_setup/service.rs @@ -578,14 +578,21 @@ impl ServiceInner { let value = NexusTypes::RackNetworkConfig { infra_ip_first: config.infra_ip_first, infra_ip_last: config.infra_ip_last, - uplinks: config - .uplinks + ports: config + .ports .iter() - .map(|config| NexusTypes::UplinkConfig { - gateway_ip: config.gateway_ip, + .map(|config| NexusTypes::PortConfigV1 { + port: config.port.clone(), + routes: config + .routes + .iter() + .map(|r| NexusTypes::RouteConfig { + destination: r.destination, + nexthop: r.nexthop, + }) + .collect(), + addresses: config.addresses.clone(), switch: config.switch.into(), - uplink_cidr: config.uplink_cidr, - uplink_port: config.uplink_port.clone(), uplink_port_speed: config .uplink_port_speed .clone() @@ -594,7 +601,23 @@ impl ServiceInner { .uplink_port_fec .clone() .into(), - uplink_vid: config.uplink_vid, + bgp_peers: config + .bgp_peers + .iter() + .map(|b| NexusTypes::BgpPeerConfig { + addr: b.addr, + asn: b.asn, + port: b.port.clone(), + }) + .collect(), + }) + .collect(), + bgp: config + .bgp + .iter() + .map(|config| NexusTypes::BgpConfig { + asn: config.asn, + originate: config.originate.clone(), }) .collect(), }; @@ -877,6 +900,8 @@ impl ServiceInner { rack_network_config: config.rack_network_config.clone(), }; info!(self.log, "Writing Rack Network Configuration to bootstore"); + //NOTE(ry) this is where the early network config gets saved. + //NOTE(ry) generation number must be bumped. Nexus owns generation number. bootstore.update_network_config(early_network_config.into()).await?; // Forward the sled initialization requests to our sled-agent. diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index 60f0965612..8197668448 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -76,7 +76,9 @@ use omicron_common::address::RACK_PREFIX; use omicron_common::address::SLED_PREFIX; use omicron_common::address::WICKETD_PORT; use omicron_common::api::external::Generation; -use omicron_common::api::internal::shared::RackNetworkConfig; +use omicron_common::api::internal::shared::{ + HostPortConfig, RackNetworkConfig, +}; use omicron_common::backoff::{ retry_notify, retry_policy_internal_service_aggressive, retry_policy_local, BackoffError, @@ -95,8 +97,8 @@ use sled_hardware::underlay::BOOTSTRAP_PREFIX; use sled_hardware::Baseboard; use sled_hardware::SledMode; use slog::Logger; +use std::collections::BTreeMap; use std::collections::HashSet; -use std::collections::{BTreeMap, HashMap}; use std::iter; use std::iter::FromIterator; use std::net::{IpAddr, Ipv6Addr, SocketAddr}; @@ -757,7 +759,7 @@ impl ServiceManager { } } } - ServiceType::Maghemite { .. } => { + ServiceType::MgDdm { .. } => { // If on a non-gimlet, sled-agent can be configured to map // links into the switch zone. Validate those links here. for link in &self.inner.switch_zone_maghemite_links { @@ -1928,8 +1930,13 @@ impl ServiceManager { // Nothing to do here - this service is special and // configured in `ensure_switch_zone_uplinks_configured` } - ServiceType::Maghemite { mode } => { - info!(self.inner.log, "Setting up Maghemite service"); + ServiceType::Mgd => { + info!(self.inner.log, "Setting up mgd service"); + smfh.setprop("config/admin_host", "::")?; + smfh.refresh()?; + } + ServiceType::MgDdm { mode } => { + info!(self.inner.log, "Setting up mg-ddm service"); smfh.setprop("config/mode", &mode)?; smfh.setprop("config/admin_host", "::")?; @@ -1990,8 +1997,8 @@ impl ServiceManager { )?; if is_gimlet { - // Maghemite for a scrimlet needs to be configured to - // talk to dendrite + // Ddm for a scrimlet needs to be configured to talk to + // dendrite smfh.setprop("config/dpd_host", "[::1]")?; smfh.setprop("config/dpd_port", DENDRITE_PORT)?; } @@ -2480,7 +2487,8 @@ impl ServiceManager { ServiceType::Tfport { pkt_source: "tfpkt0".to_string() }, ServiceType::Uplink, ServiceType::Wicketd { baseboard }, - ServiceType::Maghemite { mode: "transit".to_string() }, + ServiceType::Mgd, + ServiceType::MgDdm { mode: "transit".to_string() }, ] } @@ -2503,7 +2511,8 @@ impl ServiceManager { ServiceType::ManagementGatewayService, ServiceType::Uplink, ServiceType::Wicketd { baseboard }, - ServiceType::Maghemite { mode: "transit".to_string() }, + ServiceType::Mgd, + ServiceType::MgDdm { mode: "transit".to_string() }, ServiceType::Tfport { pkt_source: "tfpkt0".to_string() }, ServiceType::SpSim, ] @@ -2558,10 +2567,20 @@ impl ServiceManager { let log = &self.inner.log; // Configure uplinks via DPD in our switch zone. - let our_uplinks = EarlyNetworkSetup::new(log) + let our_ports = EarlyNetworkSetup::new(log) .init_switch_config(rack_network_config, switch_zone_ip) - .await?; + .await? + .into_iter() + .map(From::from) + .collect(); + self.ensure_scrimlet_host_ports(our_ports).await + } + + pub async fn ensure_scrimlet_host_ports( + &self, + our_ports: Vec, + ) -> Result<(), Error> { // We expect the switch zone to be running, as we're called immediately // after `ensure_zone()` above and we just successfully configured // uplinks via DPD running in our switch zone. If somehow we're in any @@ -2592,22 +2611,14 @@ impl ServiceManager { smfh.delpropgroup("uplinks")?; smfh.addpropgroup("uplinks", "application")?; - // When naming the uplink ports, we need to append `_0`, `_1`, etc., for - // each use of any given port. We use a hashmap of counters of port name - // -> number of uplinks to correctly supply that suffix. - let mut port_count = HashMap::new(); - for uplink_config in &our_uplinks { - let this_port_count: &mut usize = - port_count.entry(&uplink_config.uplink_port).or_insert(0); - smfh.addpropvalue_type( - &format!( - "uplinks/{}_{}", - uplink_config.uplink_port, *this_port_count - ), - &uplink_config.uplink_cidr.to_string(), - "astring", - )?; - *this_port_count += 1; + for port_config in &our_ports { + for addr in &port_config.addrs { + smfh.addpropvalue_type( + &format!("uplinks/{}_0", port_config.port,), + &addr.to_string(), + "astring", + )?; + } } smfh.refresh()?; @@ -2821,7 +2832,7 @@ impl ServiceManager { // Only configured in // `ensure_switch_zone_uplinks_configured` } - ServiceType::Maghemite { mode } => { + ServiceType::MgDdm { mode } => { smfh.delpropvalue("config/mode", "*")?; smfh.addpropvalue("config/mode", &mode)?; smfh.refresh()?; diff --git a/sled-agent/src/sim/http_entrypoints.rs b/sled-agent/src/sim/http_entrypoints.rs index 08f6c7d10b..fdbaa84cc9 100644 --- a/sled-agent/src/sim/http_entrypoints.rs +++ b/sled-agent/src/sim/http_entrypoints.rs @@ -4,6 +4,7 @@ //! HTTP entrypoint functions for the sled agent's exposed API +use crate::bootstrap::early_networking::EarlyNetworkConfig; use crate::params::{ DiskEnsureBody, InstanceEnsureBody, InstancePutMigrationIdsBody, InstancePutStateBody, InstancePutStateResponse, InstanceUnregisterResponse, @@ -22,8 +23,11 @@ use illumos_utils::opte::params::SetVirtualNetworkInterfaceHost; use omicron_common::api::internal::nexus::DiskRuntimeState; use omicron_common::api::internal::nexus::SledInstanceState; use omicron_common::api::internal::nexus::UpdateArtifactId; +use omicron_common::api::internal::shared::RackNetworkConfig; +use omicron_common::api::internal::shared::SwitchPorts; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; +use std::net::{Ipv4Addr, Ipv6Addr}; use std::sync::Arc; use uuid::Uuid; @@ -46,6 +50,9 @@ pub fn api() -> SledApiDescription { api.register(vpc_firewall_rules_put)?; api.register(set_v2p)?; api.register(del_v2p)?; + api.register(uplink_ensure)?; + api.register(read_network_bootstore_config)?; + api.register(write_network_bootstore_config)?; Ok(()) } @@ -327,3 +334,46 @@ async fn del_v2p( Ok(HttpResponseUpdatedNoContent()) } + +#[endpoint { + method = POST, + path = "/switch-ports", +}] +async fn uplink_ensure( + _rqctx: RequestContext>, + _body: TypedBody, +) -> Result { + Ok(HttpResponseUpdatedNoContent()) +} + +#[endpoint { + method = GET, + path = "/network-bootstore-config", +}] +async fn read_network_bootstore_config( + _rqctx: RequestContext>, +) -> Result, HttpError> { + let config = EarlyNetworkConfig { + generation: 0, + rack_subnet: Ipv6Addr::UNSPECIFIED, + ntp_servers: Vec::new(), + rack_network_config: Some(RackNetworkConfig { + infra_ip_first: Ipv4Addr::UNSPECIFIED, + infra_ip_last: Ipv4Addr::UNSPECIFIED, + ports: Vec::new(), + bgp: Vec::new(), + }), + }; + Ok(HttpResponseOk(config)) +} + +#[endpoint { + method = PUT, + path = "/network-bootstore-config", +}] +async fn write_network_bootstore_config( + _rqctx: RequestContext>, + _body: TypedBody, +) -> Result { + Ok(HttpResponseUpdatedNoContent()) +} diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index b6f910220e..d8d43d1895 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -38,7 +38,9 @@ use omicron_common::api::external::Vni; use omicron_common::api::internal::nexus::{ SledInstanceState, VmmRuntimeState, }; -use omicron_common::api::internal::shared::RackNetworkConfig; +use omicron_common::api::internal::shared::{ + HostPortConfig, RackNetworkConfig, +}; use omicron_common::api::{ internal::nexus::DiskRuntimeState, internal::nexus::InstanceRuntimeState, internal::nexus::UpdateArtifactId, @@ -237,6 +239,9 @@ struct SledAgentInner { // Object managing zone bundles. zone_bundler: zone_bundle::ZoneBundler, + + // A handle to the bootstore. + bootstore: bootstore::NodeHandle, } impl SledAgentInner { @@ -458,6 +463,7 @@ impl SledAgent { nexus_request_queue: NexusRequestQueue::new(), rack_network_config, zone_bundler, + bootstore: bootstore.clone(), }), log: log.clone(), }; @@ -769,7 +775,7 @@ impl SledAgent { /// Idempotently ensures that a given instance is registered with this sled, /// i.e., that it can be addressed by future calls to - /// [`instance_ensure_state`]. + /// [`Self::instance_ensure_state`]. pub async fn instance_ensure_registered( &self, instance_id: Uuid, @@ -918,4 +924,19 @@ impl SledAgent { pub async fn timesync_get(&self) -> Result { self.inner.services.timesync_get().await.map_err(Error::from) } + + pub async fn ensure_scrimlet_host_ports( + &self, + uplinks: Vec, + ) -> Result<(), Error> { + self.inner + .services + .ensure_scrimlet_host_ports(uplinks) + .await + .map_err(Error::from) + } + + pub fn bootstore(&self) -> bootstore::NodeHandle { + self.inner.bootstore.clone() + } } diff --git a/smf/sled-agent/gimlet-standalone/config-rss.toml b/smf/sled-agent/gimlet-standalone/config-rss.toml index c6fbab49de..636e846b7a 100644 --- a/smf/sled-agent/gimlet-standalone/config-rss.toml +++ b/smf/sled-agent/gimlet-standalone/config-rss.toml @@ -94,21 +94,27 @@ last = "192.168.1.29" infra_ip_first = "192.168.1.30" infra_ip_last = "192.168.1.30" +# Configurations for BGP routers to run on the scrimlets. +bgp = [] + # You can configure multiple uplinks by repeating the following stanza -[[rack_network_config.uplinks]] -# The gateway IP for the rack's external network -gateway_ip = "192.168.1.199" +[[rack_network_config.ports]] +# Routes associated with this port. +routes = [{nexthop = "192.168.1.199", destination = "0.0.0.0/0"}] +# Addresses associated with this port. +addresses = ["192.168.1.30/32"] # Name of the uplink port. This should always be "qsfp0" when using softnpu. -uplink_port = "qsfp0" +port = "qsfp0" +# The speed of this port. uplink_port_speed = "40G" +# The forward error correction mode for this port. uplink_port_fec="none" -# For softnpu, an address within the "infra" block above that will be used for -# the softnpu uplink port. You can just pick the first address in that pool. -uplink_cidr = "192.168.1.30/32" # Switch to use for the uplink. For single-rack deployments this can be # "switch0" (upper slot) or "switch1" (lower slot). For single-node softnpu # and dendrite stub environments, use "switch0" switch = "switch0" +# Neighbors we expect to peer with over BGP on this port. +bgp_peers = [] # Configuration for the initial Silo, user, and password. # diff --git a/smf/sled-agent/non-gimlet/config-rss.toml b/smf/sled-agent/non-gimlet/config-rss.toml index 8a009dd687..bbc42b3375 100644 --- a/smf/sled-agent/non-gimlet/config-rss.toml +++ b/smf/sled-agent/non-gimlet/config-rss.toml @@ -94,21 +94,27 @@ last = "192.168.1.29" infra_ip_first = "192.168.1.30" infra_ip_last = "192.168.1.30" +# Configurations for BGP routers to run on the scrimlets. +bgp = [] + # You can configure multiple uplinks by repeating the following stanza -[[rack_network_config.uplinks]] -# The gateway IP for the rack's external network -gateway_ip = "192.168.1.199" +[[rack_network_config.ports]] +# Routes associated with this port. +routes = [{nexthop = "192.168.1.199", destination = "0.0.0.0/0"}] +# Addresses associated with this port. +addresses = ["192.168.1.30/32"] # Name of the uplink port. This should always be "qsfp0" when using softnpu. -uplink_port = "qsfp0" +port = "qsfp0" +# The speed of this port. uplink_port_speed = "40G" +# The forward error correction mode for this port. uplink_port_fec="none" -# For softnpu, an address within the "infra" block above that will be used for -# the softnpu uplink port. You can just pick the first address in that pool. -uplink_cidr = "192.168.1.30/32" # Switch to use for the uplink. For single-rack deployments this can be # "switch0" (upper slot) or "switch1" (lower slot). For single-node softnpu # and dendrite stub environments, use "switch0" switch = "switch0" +# Neighbors we expect to peer with over BGP on this port. +bgp_peers = [] # Configuration for the initial Silo, user, and password. # diff --git a/test-utils/src/dev/dendrite.rs b/test-utils/src/dev/dendrite.rs index 520bf12401..8938595aa2 100644 --- a/test-utils/src/dev/dendrite.rs +++ b/test-utils/src/dev/dendrite.rs @@ -19,7 +19,7 @@ use tokio::{ /// Specifies the amount of time we will wait for `dpd` to launch, /// which is currently confirmed by watching `dpd`'s log output /// for a message specifying the address and port `dpd` is listening on. -pub const DENDRITE_TIMEOUT: Duration = Duration::new(5, 0); +pub const DENDRITE_TIMEOUT: Duration = Duration::new(30, 0); /// Represents a running instance of the Dendrite dataplane daemon (dpd). pub struct DendriteInstance { diff --git a/test-utils/src/dev/maghemite.rs b/test-utils/src/dev/maghemite.rs new file mode 100644 index 0000000000..fa1f353896 --- /dev/null +++ b/test-utils/src/dev/maghemite.rs @@ -0,0 +1,155 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Tools for managing Maghemite during development + +use std::path::{Path, PathBuf}; +use std::process::Stdio; +use std::time::Duration; + +use anyhow::Context; +use tempfile::TempDir; +use tokio::{ + fs::File, + io::{AsyncBufReadExt, BufReader}, + time::{sleep, Instant}, +}; + +/// Specifies the amount of time we will wait for `mgd` to launch, +/// which is currently confirmed by watching `mgd`'s log output +/// for a message specifying the address and port `mgd` is listening on. +pub const MGD_TIMEOUT: Duration = Duration::new(5, 0); + +pub struct MgdInstance { + /// Port number the mgd instance is listening on. This can be provided + /// manually, or dynamically determined if a value of 0 is provided. + pub port: u16, + /// Arguments provided to the `mgd` cli command. + pub args: Vec, + /// Child process spawned by running `mgd` + pub child: Option, + /// Temporary directory where logging output and other files generated by + /// `mgd` are stored. + pub data_dir: Option, +} + +impl MgdInstance { + pub async fn start(mut port: u16) -> Result { + let temp_dir = TempDir::new()?; + + let args = vec![ + "run".to_string(), + "--admin-addr".into(), + "::1".into(), + "--admin-port".into(), + port.to_string(), + "--no-bgp-dispatcher".into(), + "--data-dir".into(), + temp_dir.path().display().to_string(), + ]; + + let child = tokio::process::Command::new("mgd") + .args(&args) + .stdin(Stdio::null()) + .stdout(Stdio::from(redirect_file(temp_dir.path(), "mgd_stdout")?)) + .stderr(Stdio::from(redirect_file(temp_dir.path(), "mgd_stderr")?)) + .spawn() + .with_context(|| { + format!("failed to spawn `mgd` (with args: {:?})", &args) + })?; + + let child = Some(child); + + let temp_dir = temp_dir.into_path(); + if port == 0 { + port = discover_port( + temp_dir.join("mgd_stdout").display().to_string(), + ) + .await + .with_context(|| { + format!( + "failed to discover mgd port from files in {}", + temp_dir.display() + ) + })?; + } + + Ok(Self { port, args, child, data_dir: Some(temp_dir) }) + } + + pub async fn cleanup(&mut self) -> Result<(), anyhow::Error> { + if let Some(mut child) = self.child.take() { + child.start_kill().context("Sending SIGKILL to child")?; + child.wait().await.context("waiting for child")?; + } + if let Some(dir) = self.data_dir.take() { + std::fs::remove_dir_all(&dir).with_context(|| { + format!("cleaning up temporary directory {}", dir.display()) + })?; + } + Ok(()) + } +} + +impl Drop for MgdInstance { + fn drop(&mut self) { + if self.child.is_some() || self.data_dir.is_some() { + eprintln!( + "WARN: dropped MgdInstance without cleaning it up first \ + (there may still be a child process running and a \ + temporary directory leaked)" + ); + if let Some(child) = self.child.as_mut() { + let _ = child.start_kill(); + } + if let Some(path) = self.data_dir.take() { + eprintln!( + "WARN: mgd temporary directory leaked: {}", + path.display() + ); + } + } + } +} + +fn redirect_file( + temp_dir_path: &Path, + label: &str, +) -> Result { + let out_path = temp_dir_path.join(label); + std::fs::OpenOptions::new() + .write(true) + .create_new(true) + .open(&out_path) + .with_context(|| format!("open \"{}\"", out_path.display())) +} + +async fn discover_port(logfile: String) -> Result { + let timeout = Instant::now() + MGD_TIMEOUT; + tokio::time::timeout_at(timeout, find_mgd_port_in_log(logfile)) + .await + .context("time out while discovering mgd port number")? +} + +async fn find_mgd_port_in_log(logfile: String) -> Result { + let re = regex::Regex::new(r#""local_addr":"\[::1\]:?([0-9]+)""#).unwrap(); + let reader = BufReader::new(File::open(logfile).await?); + let mut lines = reader.lines(); + loop { + match lines.next_line().await? { + Some(line) => { + if let Some(cap) = re.captures(&line) { + // unwrap on get(1) should be ok, since captures() returns + // `None` if there are no matches found + let port = cap.get(1).unwrap(); + let result = port.as_str().parse::()?; + return Ok(result); + } + } + None => { + sleep(Duration::from_millis(10)).await; + } + } + } +} diff --git a/test-utils/src/dev/mod.rs b/test-utils/src/dev/mod.rs index dbd66fe1f8..e29da9c51e 100644 --- a/test-utils/src/dev/mod.rs +++ b/test-utils/src/dev/mod.rs @@ -8,6 +8,7 @@ pub mod clickhouse; pub mod db; pub mod dendrite; +pub mod maghemite; pub mod poll; #[cfg(feature = "seed-gen")] pub mod seed; diff --git a/tools/build-global-zone-packages.sh b/tools/build-global-zone-packages.sh index 54af9d6327..fc1ab42ade 100755 --- a/tools/build-global-zone-packages.sh +++ b/tools/build-global-zone-packages.sh @@ -12,7 +12,7 @@ out_dir="$(readlink -f "${2:-"$tarball_src_dir"}")" # Make sure needed packages exist deps=( "$tarball_src_dir/omicron-sled-agent.tar" - "$tarball_src_dir/maghemite.tar" + "$tarball_src_dir/mg-ddm-gz.tar" "$tarball_src_dir/propolis-server.tar.gz" "$tarball_src_dir/overlay.tar.gz" ) @@ -46,7 +46,7 @@ cd - pkg_dir="$tmp_gz/root/opt/oxide/mg-ddm" mkdir -p "$pkg_dir" cd "$pkg_dir" -tar -xvfz "$tarball_src_dir/maghemite.tar" +tar -xvfz "$tarball_src_dir/mg-ddm-gz.tar" cd - # propolis should be bundled with this OS: Put the propolis-server zone image diff --git a/tools/build-trampoline-global-zone-packages.sh b/tools/build-trampoline-global-zone-packages.sh index 87013fb563..d8df0f8921 100755 --- a/tools/build-trampoline-global-zone-packages.sh +++ b/tools/build-trampoline-global-zone-packages.sh @@ -12,7 +12,7 @@ out_dir="$(readlink -f "${2:-$tarball_src_dir}")" # Make sure needed packages exist deps=( "$tarball_src_dir"/installinator.tar - "$tarball_src_dir"/maghemite.tar + "$tarball_src_dir"/mg-ddm-gz.tar ) for dep in "${deps[@]}"; do if [[ ! -e $dep ]]; then @@ -44,7 +44,7 @@ cd - pkg_dir="$tmp_trampoline/root/opt/oxide/mg-ddm" mkdir -p "$pkg_dir" cd "$pkg_dir" -tar -xvfz "$tarball_src_dir/maghemite.tar" +tar -xvfz "$tarball_src_dir/mg-ddm-gz.tar" cd - # Create the final output and we're done diff --git a/tools/ci_download_maghemite_mgd b/tools/ci_download_maghemite_mgd new file mode 100755 index 0000000000..eff680d7fd --- /dev/null +++ b/tools/ci_download_maghemite_mgd @@ -0,0 +1,168 @@ +#!/bin/bash + +# +# ci_download_maghemite_mgd: fetches the maghemite mgd binary tarball, unpacks +# it, and creates a copy called mgd, all in the current directory +# + +set -o pipefail +set -o xtrace +set -o errexit + +SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +ARG0="$(basename "${BASH_SOURCE[0]}")" + +source "$SOURCE_DIR/maghemite_mgd_checksums" +source "$SOURCE_DIR/maghemite_mg_openapi_version" + +TARGET_DIR="out" +# Location where intermediate artifacts are downloaded / unpacked. +DOWNLOAD_DIR="$TARGET_DIR/downloads" +# Location where the final mgd directory should end up. +DEST_DIR="./$TARGET_DIR/mgd" +BIN_DIR="$DEST_DIR/root/opt/oxide/mgd/bin" + +ARTIFACT_URL="https://buildomat.eng.oxide.computer/public/file" + +REPO='oxidecomputer/maghemite' +PACKAGE_BASE_URL="$ARTIFACT_URL/$REPO/image/$COMMIT" + +function main +{ + # + # Process command-line arguments. We generally don't expect any, but + # we allow callers to specify a value to override OSTYPE, just for + # testing. + # + if [[ $# != 0 ]]; then + CIDL_OS="$1" + shift + else + CIDL_OS="$OSTYPE" + fi + + if [[ $# != 0 ]]; then + echo "unexpected arguments" >&2 + exit 2 + fi + + # Configure this program + configure_os "$CIDL_OS" + + CIDL_SHA256FUNC="do_sha256sum" + TARBALL_FILENAME="mgd.tar.gz" + PACKAGE_URL="$PACKAGE_BASE_URL/$TARBALL_FILENAME" + TARBALL_FILE="$DOWNLOAD_DIR/$TARBALL_FILENAME" + + # Download the file. + echo "URL: $PACKAGE_URL" + echo "Local file: $TARBALL_FILE" + + mkdir -p "$DOWNLOAD_DIR" + mkdir -p "$DEST_DIR" + + fetch_and_verify + + do_untar "$TARBALL_FILE" + + do_assemble + + $SET_BINARIES +} + +function fail +{ + echo "$ARG0: $@" >&2 + exit 1 +} + +function configure_os +{ + echo "current directory: $PWD" + echo "configuring based on OS: \"$1\"" + case "$1" in + linux-gnu*) + SET_BINARIES="linux_binaries" + ;; + solaris*) + SET_BINARIES="" + ;; + *) + echo "WARNING: binaries for $1 are not published by maghemite" + echo "Dynamic routing apis will be unavailable" + SET_BINARIES="unsupported_os" + ;; + esac +} + +function do_download_curl +{ + curl --silent --show-error --fail --location --output "$2" "$1" +} + +function do_sha256sum +{ + sha256sum < "$1" | awk '{print $1}' +} + +function do_untar +{ + tar xzf "$1" -C "$DOWNLOAD_DIR" +} + +function do_assemble +{ + rm -r "$DEST_DIR" || true + mkdir "$DEST_DIR" + cp -r "$DOWNLOAD_DIR/root" "$DEST_DIR/root" +} + +function fetch_and_verify +{ + local DO_DOWNLOAD="true" + if [[ -f "$TARBALL_FILE" ]]; then + # If the file exists with a valid checksum, we can skip downloading. + calculated_sha256="$($CIDL_SHA256FUNC "$TARBALL_FILE")" || \ + fail "failed to calculate sha256sum" + if [[ "$calculated_sha256" == "$CIDL_SHA256" ]]; then + DO_DOWNLOAD="false" + fi + fi + + if [ "$DO_DOWNLOAD" == "true" ]; then + echo "Downloading..." + do_download_curl "$PACKAGE_URL" "$TARBALL_FILE" || \ + fail "failed to download file" + + # Verify the sha256sum. + calculated_sha256="$($CIDL_SHA256FUNC "$TARBALL_FILE")" || \ + fail "failed to calculate sha256sum" + if [[ "$calculated_sha256" != "$CIDL_SHA256" ]]; then + fail "sha256sum mismatch \ + (expected $CIDL_SHA256, found $calculated_sha256)" + fi + fi + +} + +function linux_binaries +{ + PACKAGE_BASE_URL="$ARTIFACT_URL/$REPO/linux/$COMMIT" + CIDL_SHA256="$MGD_LINUX_SHA256" + CIDL_SHA256FUNC="do_sha256sum" + TARBALL_FILENAME="mgd" + PACKAGE_URL="$PACKAGE_BASE_URL/$TARBALL_FILENAME" + TARBALL_FILE="$DOWNLOAD_DIR/$TARBALL_FILENAME" + fetch_and_verify + chmod +x "$DOWNLOAD_DIR/mgd" + cp "$DOWNLOAD_DIR/mgd" "$BIN_DIR" +} + +function unsupported_os +{ + mkdir -p "$BIN_DIR" + echo "echo 'unsupported os' && exit 1" >> "$BIN_DIR/dpd" + chmod +x "$BIN_DIR/dpd" +} + +main "$@" diff --git a/tools/ci_download_maghemite_openapi b/tools/ci_download_maghemite_openapi index 37ff4f5547..db53f68d2c 100755 --- a/tools/ci_download_maghemite_openapi +++ b/tools/ci_download_maghemite_openapi @@ -15,10 +15,7 @@ TARGET_DIR="out" # Location where intermediate artifacts are downloaded / unpacked. DOWNLOAD_DIR="$TARGET_DIR/downloads" -source "$SOURCE_DIR/maghemite_openapi_version" -URL="https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/openapi/$COMMIT/ddm-admin.json" -LOCAL_FILE="$DOWNLOAD_DIR/ddm-admin-$COMMIT.json" function main { @@ -83,4 +80,14 @@ function do_sha256sum $SHA < "$1" | awk '{print $1}' } +source "$SOURCE_DIR/maghemite_ddm_openapi_version" +URL="https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/openapi/$COMMIT/ddm-admin.json" +LOCAL_FILE="$DOWNLOAD_DIR/ddm-admin-$COMMIT.json" + +main "$@" + +source "$SOURCE_DIR/maghemite_mg_openapi_version" +URL="https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/openapi/$COMMIT/mg-admin.json" +LOCAL_FILE="$DOWNLOAD_DIR/mg-admin-$COMMIT.json" + main "$@" diff --git a/tools/ci_download_softnpu_machinery b/tools/ci_download_softnpu_machinery index 7975a310f0..cb5ea40210 100755 --- a/tools/ci_download_softnpu_machinery +++ b/tools/ci_download_softnpu_machinery @@ -15,7 +15,7 @@ OUT_DIR="out/npuzone" # Pinned commit for softnpu ASIC simulator SOFTNPU_REPO="softnpu" -SOFTNPU_COMMIT="eb27e6a00f1082c9faac7cf997e57d0609f7a309" +SOFTNPU_COMMIT="c1c42398c82b0220c8b5fa3bfba9c7a3bcaa0943" # This is the softnpu ASIC simulator echo "fetching npuzone" diff --git a/tools/create_virtual_hardware.sh b/tools/create_virtual_hardware.sh index 95c2aa63df..086a40fbe2 100755 --- a/tools/create_virtual_hardware.sh +++ b/tools/create_virtual_hardware.sh @@ -44,6 +44,9 @@ function ensure_simulated_links { if [[ -z "$(get_vnic_name_if_exists "sc0_1")" ]]; then dladm create-vnic -t "sc0_1" -l "$PHYSICAL_LINK" -m a8:e1:de:01:70:1d + if [[ -v PROMISC_FILT_OFF ]]; then + dladm set-linkprop -p promisc-filtered=off sc0_1 + fi fi success "Vnic sc0_1 exists" } diff --git a/tools/delete-reservoir.sh b/tools/delete-reservoir.sh new file mode 100755 index 0000000000..77e814f0c7 --- /dev/null +++ b/tools/delete-reservoir.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +size=`pfexec /usr/lib/rsrvrctl -q | grep Free | awk '{print $3}'` +let x=$size/1024 + +pfexec /usr/lib/rsrvrctl -r $x diff --git a/tools/dendrite_openapi_version b/tools/dendrite_openapi_version index b1f210a647..9a2ea85ac0 100644 --- a/tools/dendrite_openapi_version +++ b/tools/dendrite_openapi_version @@ -1,2 +1,2 @@ -COMMIT="7712104585266a2898da38c1345210ad26f9e71d" +COMMIT="c0cbc39b55fac54b95468304c497e00f3d3cf686" SHA2="cb3f0cfbe6216d2441d34e0470252e0fb142332e47b33b65c24ef7368a694b6d" diff --git a/tools/dendrite_stub_checksums b/tools/dendrite_stub_checksums index 9538bc0d00..fe52c59381 100644 --- a/tools/dendrite_stub_checksums +++ b/tools/dendrite_stub_checksums @@ -1,3 +1,3 @@ -CIDL_SHA256_ILLUMOS="486b0b016c0df06947810b90f3a3dd40423f0ee6f255ed079dc8e5618c9a7281" -CIDL_SHA256_LINUX_DPD="af97aaf7e1046a5c651d316c384171df6387b4c54c8ae4a3ef498e532eaa5a4c" -CIDL_SHA256_LINUX_SWADM="909e400dcc9880720222c6dc3919404d83687f773f668160f66f38b51a81c188" +CIDL_SHA256_ILLUMOS="3706e0e8230b7f76407ec0acea9020b9efc7d6c78b74c304102fd8e62cac6760" +CIDL_SHA256_LINUX_DPD="b275a1c688eae1024b9ce1cbb766a66e37072e84b4a6cbc18746c903739ccf51" +CIDL_SHA256_LINUX_SWADM="7e604cc4b67c1a711a63ece2a8d0e2e7c8ef2b9ac6bb433b3c2e02f5f66018ba" diff --git a/tools/install_builder_prerequisites.sh b/tools/install_builder_prerequisites.sh index 62603ecac7..d3ecd8eaa8 100755 --- a/tools/install_builder_prerequisites.sh +++ b/tools/install_builder_prerequisites.sh @@ -197,6 +197,10 @@ retry ./tools/ci_download_dendrite_openapi # asic and running dendrite instance retry ./tools/ci_download_dendrite_stub +# Download mgd. This is required to run tests that invovle dynamic external +# routing +retry ./tools/ci_download_maghemite_mgd + # Download transceiver-control. This is used as the source for the # xcvradm binary which is bundled with the switch zone. retry ./tools/ci_download_transceiver_control diff --git a/tools/install_runner_prerequisites.sh b/tools/install_runner_prerequisites.sh index 7ece993bc9..42347f518d 100755 --- a/tools/install_runner_prerequisites.sh +++ b/tools/install_runner_prerequisites.sh @@ -105,6 +105,7 @@ function install_packages { 'pkg-config' 'brand/omicron1/tools' 'library/libxmlsec1' + 'chrony' ) # Install/update the set of packages. @@ -119,13 +120,15 @@ function install_packages { exit "$rc" fi + pfexec svcadm enable chrony + pkg list -v "${packages[@]}" elif [[ "${HOST_OS}" == "Linux" ]]; then packages=( 'ca-certificates' 'libpq5' 'libsqlite3-0' - 'libssl1.1' + 'libssl3' 'libxmlsec1-openssl' ) sudo apt-get update diff --git a/tools/maghemite_openapi_version b/tools/maghemite_ddm_openapi_version similarity index 59% rename from tools/maghemite_openapi_version rename to tools/maghemite_ddm_openapi_version index 8f84b30cb1..8f1537fa62 100644 --- a/tools/maghemite_openapi_version +++ b/tools/maghemite_ddm_openapi_version @@ -1,2 +1,2 @@ -COMMIT="12703675393459e74139f8140e0b3c4c4f129d5d" +COMMIT="7b88dbcc7810f4fe9c82a7459862a7340d7e09ce" SHA2="9737906555a60911636532f00f1dc2866dc7cd6553beb106e9e57beabad41cdf" diff --git a/tools/maghemite_mg_openapi_version b/tools/maghemite_mg_openapi_version new file mode 100644 index 0000000000..390560e241 --- /dev/null +++ b/tools/maghemite_mg_openapi_version @@ -0,0 +1,2 @@ +COMMIT="7b88dbcc7810f4fe9c82a7459862a7340d7e09ce" +SHA2="72a6344a16f98d384c50258f354d32aa52e4c3c413c6c3ecebc128d56b06abc6" diff --git a/tools/maghemite_mgd_checksums b/tools/maghemite_mgd_checksums new file mode 100644 index 0000000000..876c8befc5 --- /dev/null +++ b/tools/maghemite_mgd_checksums @@ -0,0 +1,2 @@ +CIDL_SHA256="82e965486e7acdbc4661258f1bc8d7751155f406f0973d55e61f6506d18984c2" +MGD_LINUX_SHA256="7f42776e053f415cba4b4b52171381af9da6f3aeec3cc8bcc745a2c9684ca0e5" diff --git a/tools/update_maghemite.sh b/tools/update_maghemite.sh index a4a9b1291e..eebece1aa5 100755 --- a/tools/update_maghemite.sh +++ b/tools/update_maghemite.sh @@ -15,8 +15,9 @@ function usage { } PACKAGES=( - "maghemite" + "mg-ddm-gz" "mg-ddm" + "mgd" ) REPO="oxidecomputer/maghemite" @@ -26,13 +27,14 @@ REPO="oxidecomputer/maghemite" function update_openapi { TARGET_COMMIT="$1" DRY_RUN="$2" - SHA=$(get_sha "$REPO" "$TARGET_COMMIT" "ddm-admin.json" "openapi") + DAEMON="$3" + SHA=$(get_sha "$REPO" "$TARGET_COMMIT" "${DAEMON}-admin.json" "openapi") OUTPUT=$(printf "COMMIT=\"%s\"\nSHA2=\"%s\"\n" "$TARGET_COMMIT" "$SHA") if [ -n "$DRY_RUN" ]; then OPENAPI_PATH="/dev/null" else - OPENAPI_PATH="$SOURCE_DIR/maghemite_openapi_version" + OPENAPI_PATH="$SOURCE_DIR/maghemite_${DAEMON}_openapi_version" fi echo "Updating Maghemite OpenAPI from: $TARGET_COMMIT" set -x @@ -40,6 +42,27 @@ function update_openapi { set +x } +function update_mgd { + TARGET_COMMIT="$1" + DRY_RUN="$2" + DAEMON="$3" + SHA=$(get_sha "$REPO" "$TARGET_COMMIT" "mgd" "image") + OUTPUT=$(printf "CIDL_SHA256=\"%s\"\n" "$SHA") + + SHA_LINUX=$(get_sha "$REPO" "$TARGET_COMMIT" "mgd" "linux") + OUTPUT_LINUX=$(printf "MGD_LINUX_SHA256=\"%s\"\n" "$SHA_LINUX") + + if [ -n "$DRY_RUN" ]; then + MGD_PATH="/dev/null" + else + MGD_PATH="$SOURCE_DIR/maghemite_mgd_checksums" + fi + echo "Updating Maghemite mgd from: $TARGET_COMMIT" + set -x + echo "$OUTPUT\n$OUTPUT_LINUX" > $MGD_PATH + set +x +} + function main { TARGET_COMMIT="" DRY_RUN="" @@ -60,7 +83,9 @@ function main { TARGET_COMMIT=$(get_latest_commit_from_gh "$REPO" "$TARGET_COMMIT") install_toml2json do_update_packages "$TARGET_COMMIT" "$DRY_RUN" "$REPO" "${PACKAGES[@]}" - update_openapi "$TARGET_COMMIT" "$DRY_RUN" + update_openapi "$TARGET_COMMIT" "$DRY_RUN" ddm + update_openapi "$TARGET_COMMIT" "$DRY_RUN" mg + update_mgd "$TARGET_COMMIT" "$DRY_RUN" do_update_packages "$TARGET_COMMIT" "$DRY_RUN" "$REPO" "${PACKAGES[@]}" } diff --git a/update-engine/src/context.rs b/update-engine/src/context.rs index d232d931a2..cd85687cf9 100644 --- a/update-engine/src/context.rs +++ b/update-engine/src/context.rs @@ -223,7 +223,7 @@ impl StepContext { } } -/// Tracker for [`StepContext::add_nested_report`]. +/// Tracker for [`StepContext::send_nested_report`]. /// /// Nested event reports might contain events already seen in prior runs: /// `NestedEventBuffer` deduplicates those events such that only deltas are sent diff --git a/wicket/src/rack_setup/config_template.toml b/wicket/src/rack_setup/config_template.toml index 4b193a0c29..dd371c7628 100644 --- a/wicket/src/rack_setup/config_template.toml +++ b/wicket/src/rack_setup/config_template.toml @@ -43,15 +43,19 @@ bootstrap_sleds = [] infra_ip_first = "" infra_ip_last = "" -[[rack_network_config.uplinks]] +[[rack_network_config.ports]] +# Routes associated with this port. +# { nexthop = "1.2.3.4", destination = "0.0.0.0/0" } +routes = [] + +# Addresses associated with this port. +addresses = [] + # Either `switch0` or `switch1`, matching the hardware. switch = "" -# IP address this uplink should use as its gateway. -gateway_ip = "" - # qsfp0, qsfp1, ... -uplink_port = "" +port = "" # `speed40_g`, `speed100_g`, ... uplink_port_speed = "" @@ -59,8 +63,14 @@ uplink_port_speed = "" # `none`, `firecode`, or `rs` uplink_port_fec = "" -# IP address and prefix for this uplink; e.g., `192.168.100.100/16` -uplink_cidr = "" +# A list of bgp peers +# { addr = "1.7.0.1", asn = "47", port = "qsfp0" } +bgp_peers = [] + +# Optional BGP configuration. Remove this section if not needed. +[[rack_network_config.bgp]] +# The autonomous system numer +asn = 0 -# VLAN ID for this uplink; omit if no VLAN ID is needed -uplink_vid = 1234 +# Prefixes to originate e.g., ["10.0.0.0/16"] +originate = [] diff --git a/wicket/src/rack_setup/config_toml.rs b/wicket/src/rack_setup/config_toml.rs index 5f0bb9e876..43af459324 100644 --- a/wicket/src/rack_setup/config_toml.rs +++ b/wicket/src/rack_setup/config_toml.rs @@ -202,20 +202,17 @@ fn populate_network_table( Value::String(Formatted::new(value)); } - // If `config.uplinks` is empty, we'll leave the template uplinks in place; - // otherwise, replace it with the user's uplinks. - if !config.uplinks.is_empty() { - *table.get_mut("uplinks").unwrap().as_array_of_tables_mut().unwrap() = + if !config.ports.is_empty() { + *table.get_mut("ports").unwrap().as_array_of_tables_mut().unwrap() = config - .uplinks + .ports .iter() .map(|cfg| { let mut uplink = Table::new(); - let mut last_key = None; + let mut _last_key = None; for (property, value) in [ ("switch", cfg.switch.to_string()), - ("gateway_ip", cfg.gateway_ip.to_string()), - ("uplink_port", cfg.uplink_port.to_string()), + ("port", cfg.port.to_string()), ( "uplink_port_speed", enum_to_toml_string(&cfg.uplink_port_speed), @@ -224,41 +221,93 @@ fn populate_network_table( "uplink_port_fec", enum_to_toml_string(&cfg.uplink_port_fec), ), - ("uplink_cidr", cfg.uplink_cidr.to_string()), ] { uplink.insert( property, Item::Value(Value::String(Formatted::new(value))), ); - last_key = Some(property); + _last_key = Some(property); } - if let Some(uplink_vid) = cfg.uplink_vid { - uplink.insert( - "uplink_vid", - Item::Value(Value::Integer(Formatted::new( - i64::from(uplink_vid), - ))), + let mut routes = Array::new(); + for r in &cfg.routes { + let mut route = InlineTable::new(); + route.insert( + "nexthop", + Value::String(Formatted::new( + r.nexthop.to_string(), + )), + ); + route.insert( + "destination", + Value::String(Formatted::new( + r.destination.to_string(), + )), ); - } else { - // Unwraps: We know `last_key` is `Some(_)`, because we - // set it in every iteration of the loop above, and we - // know it's present in `uplink` because we set it to - // the `property` we just inserted. - let last = uplink.get_mut(last_key.unwrap()).unwrap(); - - // Every item we insert is an `Item::Value`, so we can - // unwrap this conversion. - last.as_value_mut() - .unwrap() - .decor_mut() - .set_suffix("\n# uplink_vid ="); + routes.push(Value::InlineTable(route)); } + uplink.insert("routes", Item::Value(Value::Array(routes))); + let mut addresses = Array::new(); + for a in &cfg.addresses { + addresses + .push(Value::String(Formatted::new(a.to_string()))) + } + uplink.insert( + "addresses", + Item::Value(Value::Array(addresses)), + ); + + let mut peers = Array::new(); + for p in &cfg.bgp_peers { + let mut peer = InlineTable::new(); + peer.insert( + "addr", + Value::String(Formatted::new(p.addr.to_string())), + ); + peer.insert( + "asn", + Value::Integer(Formatted::new(p.asn as i64)), + ); + peer.insert( + "port", + Value::String(Formatted::new(p.port.to_string())), + ); + peers.push(Value::InlineTable(peer)); + } + uplink + .insert("bgp_peers", Item::Value(Value::Array(peers))); uplink }) .collect(); } + if !config.bgp.is_empty() { + *table.get_mut("bgp").unwrap().as_array_of_tables_mut().unwrap() = + config + .bgp + .iter() + .map(|cfg| { + let mut bgp = Table::new(); + bgp.insert( + "asn", + Item::Value(Value::Integer(Formatted::new( + cfg.asn as i64, + ))), + ); + + let mut originate = Array::new(); + for o in &cfg.originate { + originate + .push(Value::String(Formatted::new(o.to_string()))); + } + bgp.insert( + "originate", + Item::Value(Value::Array(originate)), + ); + bgp + }) + .collect(); + } } #[cfg(test)] @@ -268,19 +317,25 @@ mod tests { use std::net::Ipv6Addr; use wicket_common::rack_setup::PutRssUserConfigInsensitive; use wicketd_client::types::Baseboard; + use wicketd_client::types::BgpConfig; + use wicketd_client::types::BgpPeerConfig; + use wicketd_client::types::PortConfigV1; use wicketd_client::types::PortFec; use wicketd_client::types::PortSpeed; + use wicketd_client::types::RouteConfig; use wicketd_client::types::SpIdentifier; use wicketd_client::types::SwitchLocation; - use wicketd_client::types::UplinkConfig; fn put_config_from_current_config( value: CurrentRssUserConfigInsensitive, ) -> PutRssUserConfigInsensitive { + use omicron_common::api::internal::shared::BgpConfig as InternalBgpConfig; + use omicron_common::api::internal::shared::BgpPeerConfig as InternalBgpPeerConfig; + use omicron_common::api::internal::shared::PortConfigV1 as InternalPortConfig; use omicron_common::api::internal::shared::PortFec as InternalPortFec; use omicron_common::api::internal::shared::PortSpeed as InternalPortSpeed; + use omicron_common::api::internal::shared::RouteConfig as InternalRouteConfig; use omicron_common::api::internal::shared::SwitchLocation as InternalSwitchLocation; - use omicron_common::api::internal::shared::UplinkConfig as InternalUplinkConfig; let rnc = value.rack_network_config.unwrap(); @@ -312,12 +367,29 @@ mod tests { rack_network_config: InternalRackNetworkConfig { infra_ip_first: rnc.infra_ip_first, infra_ip_last: rnc.infra_ip_last, - uplinks: rnc - .uplinks + ports: rnc + .ports .iter() - .map(|config| InternalUplinkConfig { - gateway_ip: config.gateway_ip, - uplink_port: config.uplink_port.clone(), + .map(|config| InternalPortConfig { + routes: config + .routes + .iter() + .map(|r| InternalRouteConfig { + destination: r.destination, + nexthop: r.nexthop, + }) + .collect(), + addresses: config.addresses.clone(), + bgp_peers: config + .bgp_peers + .iter() + .map(|p| InternalBgpPeerConfig { + asn: p.asn, + port: p.port.clone(), + addr: p.addr, + }) + .collect(), + port: config.port.clone(), uplink_port_speed: match config.uplink_port_speed { PortSpeed::Speed0G => InternalPortSpeed::Speed0G, PortSpeed::Speed1G => InternalPortSpeed::Speed1G, @@ -340,8 +412,6 @@ mod tests { PortFec::None => InternalPortFec::None, PortFec::Rs => InternalPortFec::Rs, }, - uplink_cidr: config.uplink_cidr, - uplink_vid: config.uplink_vid, switch: match config.switch { SwitchLocation::Switch0 => { InternalSwitchLocation::Switch0 @@ -352,6 +422,14 @@ mod tests { }, }) .collect(), + bgp: rnc + .bgp + .iter() + .map(|config| InternalBgpConfig { + asn: config.asn, + originate: config.originate.clone(), + }) + .collect(), }, } } @@ -395,15 +473,26 @@ mod tests { rack_network_config: Some(RackNetworkConfig { infra_ip_first: "172.30.0.1".parse().unwrap(), infra_ip_last: "172.30.0.10".parse().unwrap(), - uplinks: vec![UplinkConfig { - gateway_ip: "172.30.0.10".parse().unwrap(), - uplink_cidr: "172.30.0.1/24".parse().unwrap(), + ports: vec![PortConfigV1 { + addresses: vec!["172.30.0.1/24".parse().unwrap()], + routes: vec![RouteConfig { + destination: "0.0.0.0/0".parse().unwrap(), + nexthop: "172.30.0.10".parse().unwrap(), + }], + bgp_peers: vec![BgpPeerConfig { + asn: 47, + addr: "10.2.3.4".parse().unwrap(), + port: "port0".into(), + }], uplink_port_speed: PortSpeed::Speed400G, uplink_port_fec: PortFec::Firecode, - uplink_port: "port0".into(), - uplink_vid: None, + port: "port0".into(), switch: SwitchLocation::Switch0, }], + bgp: vec![BgpConfig { + asn: 47, + originate: vec!["10.0.0.0/16".parse().unwrap()], + }], }), }; let template = TomlTemplate::populate(&config).to_string(); diff --git a/wicket/src/ui/main.rs b/wicket/src/ui/main.rs index 42cc6bf587..58ea6c1771 100644 --- a/wicket/src/ui/main.rs +++ b/wicket/src/ui/main.rs @@ -23,7 +23,7 @@ use wicketd_client::types::GetLocationResponse; /// This structure allows us to maintain similar styling and navigation /// throughout wicket with a minimum of code. /// -/// Specific functionality is put inside [`Pane`]s, which can be customized +/// Specific functionality is put inside Panes, which can be customized /// as needed. pub struct MainScreen { #[allow(unused)] diff --git a/wicket/src/ui/panes/rack_setup.rs b/wicket/src/ui/panes/rack_setup.rs index 212ddff4da..1600bc40ac 100644 --- a/wicket/src/ui/panes/rack_setup.rs +++ b/wicket/src/ui/panes/rack_setup.rs @@ -695,12 +695,13 @@ fn rss_config_text<'a>( }; if let Some(cfg) = insensitive.rack_network_config.as_ref() { - for (i, uplink) in cfg.uplinks.iter().enumerate() { - let mut items = vec![ + for (i, uplink) in cfg.ports.iter().enumerate() { + let /*mut*/ items = vec![ vec![ Span::styled(" • Switch : ", label_style), Span::styled(uplink.switch.to_string(), ok_style), ], + /* TODO(ry) vec![ Span::styled(" • Gateway IP : ", label_style), Span::styled(uplink.gateway_ip.to_string(), ok_style), @@ -713,6 +714,7 @@ fn rss_config_text<'a>( Span::styled(" • Uplink port : ", label_style), Span::styled(uplink.uplink_port.clone(), ok_style), ], + */ vec![ Span::styled(" • Uplink port speed: ", label_style), Span::styled( @@ -725,6 +727,7 @@ fn rss_config_text<'a>( Span::styled(uplink.uplink_port_fec.to_string(), ok_style), ], ]; + /* TODO(ry) if let Some(uplink_vid) = uplink.uplink_vid { items.push(vec![ Span::styled(" • Uplink VLAN id : ", label_style), @@ -736,6 +739,7 @@ fn rss_config_text<'a>( Span::styled("none", ok_style), ]); } + */ append_list( &mut spans, diff --git a/wicket/src/ui/wrap.rs b/wicket/src/ui/wrap.rs index 6cd5f7010a..9cd57d45d5 100644 --- a/wicket/src/ui/wrap.rs +++ b/wicket/src/ui/wrap.rs @@ -324,7 +324,7 @@ impl<'a> Fragment for StyledWord<'a> { /// Forcibly break spans wider than `line_width` into smaller spans. /// -/// This simply calls [`Span::break_apart`] on spans that are too wide. +/// This simply calls [`StyledWord::break_apart`] on spans that are too wide. fn break_words<'a, I>(spans: I, line_width: usize) -> Vec> where I: IntoIterator>, diff --git a/wicketd/Cargo.toml b/wicketd/Cargo.toml index 1044e1ff51..56388afd34 100644 --- a/wicketd/Cargo.toml +++ b/wicketd/Cargo.toml @@ -24,6 +24,7 @@ hubtools.workspace = true http.workspace = true hyper.workspace = true illumos-utils.workspace = true +ipnetwork.workspace = true itertools.workspace = true reqwest.workspace = true schemars.workspace = true diff --git a/wicketd/src/installinator_progress.rs b/wicketd/src/installinator_progress.rs index ba3f743171..77baec2c94 100644 --- a/wicketd/src/installinator_progress.rs +++ b/wicketd/src/installinator_progress.rs @@ -165,7 +165,7 @@ enum RunningUpdate { /// Reports from the installinator have been received. /// /// This is an `UnboundedSender` to avoid cancel-safety issues (see - /// https://github.com/oxidecomputer/omicron/pull/3579). + /// ). ReportsReceived(watch::Sender), /// All messages have been received. diff --git a/wicketd/src/preflight_check/uplink.rs b/wicketd/src/preflight_check/uplink.rs index 58955d04d6..ebcba90645 100644 --- a/wicketd/src/preflight_check/uplink.rs +++ b/wicketd/src/preflight_check/uplink.rs @@ -17,12 +17,13 @@ use dpd_client::ClientState as DpdClientState; use either::Either; use illumos_utils::zone::SVCCFG; use illumos_utils::PFEXEC; +use ipnetwork::IpNetwork; use omicron_common::address::DENDRITE_PORT; +use omicron_common::api::internal::shared::PortConfigV1; use omicron_common::api::internal::shared::PortFec as OmicronPortFec; use omicron_common::api::internal::shared::PortSpeed as OmicronPortSpeed; use omicron_common::api::internal::shared::RackNetworkConfig; use omicron_common::api::internal::shared::SwitchLocation; -use omicron_common::api::internal::shared::UplinkConfig; use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; @@ -32,7 +33,6 @@ use slog::Logger; use std::collections::BTreeSet; use std::collections::HashMap; use std::net::IpAddr; -use std::net::Ipv4Addr; use std::str::FromStr; use std::sync::Arc; use std::sync::Mutex; @@ -66,8 +66,6 @@ const CHRONYD: &str = "/usr/sbin/chronyd"; const IPADM: &str = "/usr/sbin/ipadm"; const ROUTE: &str = "/usr/sbin/route"; -const DPD_DEFAULT_IPV4_CIDR: &str = "0.0.0.0/0"; - pub(super) async fn run_local_uplink_preflight_check( network_config: RackNetworkConfig, dns_servers: Vec, @@ -90,7 +88,7 @@ pub(super) async fn run_local_uplink_preflight_check( let mut engine = UpdateEngine::new(log, sender); for uplink in network_config - .uplinks + .ports .iter() .filter(|uplink| uplink.switch == our_switch_location) { @@ -131,7 +129,7 @@ pub(super) async fn run_local_uplink_preflight_check( fn add_steps_for_single_local_uplink_preflight_check<'a>( engine: &mut UpdateEngine<'a>, dpd_client: &'a DpdClient, - uplink: &'a UplinkConfig, + uplink: &'a PortConfigV1, dns_servers: &'a [IpAddr], ntp_servers: &'a [String], dns_name_to_query: Option<&'a str>, @@ -153,7 +151,7 @@ fn add_steps_for_single_local_uplink_preflight_check<'a>( // Timeout we give to chronyd during the NTP check, in seconds. const CHRONYD_CHECK_TIMEOUT_SECS: &str = "30"; - let registrar = engine.for_component(uplink.uplink_port.clone()); + let registrar = engine.for_component(uplink.port.clone()); let prev_step = registrar .new_step( @@ -162,7 +160,7 @@ fn add_steps_for_single_local_uplink_preflight_check<'a>( |_cx| async { // Check that the port name is valid and that it has no links // configured already. - let port_id = PortId::from_str(&uplink.uplink_port) + let port_id = PortId::from_str(&uplink.port) .map_err(UplinkPreflightTerminalError::InvalidPortName)?; let links = dpd_client .link_list(&port_id) @@ -192,11 +190,11 @@ fn add_steps_for_single_local_uplink_preflight_check<'a>( { Ok(_response) => { let metadata = vec![format!( - "configured {}/{}: ip {}, gateway {}", + "configured {}/{}: ips {:#?}, routes {:#?}", *port_id, link_id.0, - uplink.uplink_cidr, - uplink.gateway_ip + uplink.addresses, + uplink.routes )]; StepSuccess::new((port_id, link_id)) .with_metadata(metadata) @@ -298,93 +296,99 @@ fn add_steps_for_single_local_uplink_preflight_check<'a>( // Tell the `uplink` service about the IP address we created on // the switch when configuring the uplink. let uplink_property = - UplinkProperty(format!("uplinks/{}_0", uplink.uplink_port)); - let uplink_cidr = uplink.uplink_cidr.to_string(); - - if let Err(err) = execute_command(&[ - SVCCFG, - "-s", - UPLINK_SMF_NAME, - "addpropvalue", - &uplink_property.0, - "astring:", - &uplink_cidr, - ]) - .await - { - return StepWarning::new( - Err(L2Failure::UplinkAddProperty(level1)), - format!("could not add uplink property: {err}"), - ) - .into(); - }; - - if let Err(err) = execute_command(&[ - SVCCFG, - "-s", - UPLINK_DEFAULT_SMF_NAME, - "refresh", - ]) - .await - { - return StepWarning::new( - Err(L2Failure::UplinkRefresh(level1, uplink_property)), - format!("could not add uplink property: {err}"), - ) - .into(); - }; - - // Wait for the `uplink` service to create the IP address. - let start_waiting_addr = Instant::now(); - 'waiting_for_addr: loop { - let ipadm_out = match execute_command(&[ - IPADM, - "show-addr", - "-p", - "-o", - "addr", + UplinkProperty(format!("uplinks/{}_0", uplink.port)); + + for addr in &uplink.addresses { + let uplink_cidr = addr.to_string(); + if let Err(err) = execute_command(&[ + SVCCFG, + "-s", + UPLINK_SMF_NAME, + "addpropvalue", + &uplink_property.0, + "astring:", + &uplink_cidr, ]) .await { - Ok(stdout) => stdout, - Err(err) => { - return StepWarning::new( - Err(L2Failure::RunIpadm( - level1, - uplink_property, - )), - format!("failed running ipadm: {err}"), - ) - .into(); - } + return StepWarning::new( + Err(L2Failure::UplinkAddProperty(level1)), + format!("could not add uplink property: {err}"), + ) + .into(); }; - for line in ipadm_out.split('\n') { - if line == uplink_cidr { - break 'waiting_for_addr; - } - } - - // We did not find `uplink_cidr` in the output of ipadm; - // sleep a bit and try again, unless we've been waiting too - // long already. - if start_waiting_addr.elapsed() < UPLINK_SVC_WAIT_TIMEOUT { - tokio::time::sleep(UPLINK_SVC_RETRY_DELAY).await; - } else { + if let Err(err) = execute_command(&[ + SVCCFG, + "-s", + UPLINK_DEFAULT_SMF_NAME, + "refresh", + ]) + .await + { return StepWarning::new( - Err(L2Failure::WaitingForHostAddr( + Err(L2Failure::UplinkRefresh( level1, uplink_property, )), - format!( - "timed out waiting for `uplink` to \ - create {uplink_cidr}" - ), + format!("could not add uplink property: {err}"), ) .into(); + }; + + // Wait for the `uplink` service to create the IP address. + let start_waiting_addr = Instant::now(); + 'waiting_for_addr: loop { + let ipadm_out = match execute_command(&[ + IPADM, + "show-addr", + "-p", + "-o", + "addr", + ]) + .await + { + Ok(stdout) => stdout, + Err(err) => { + return StepWarning::new( + Err(L2Failure::RunIpadm( + level1, + uplink_property, + )), + format!("failed running ipadm: {err}"), + ) + .into(); + } + }; + + for line in ipadm_out.split('\n') { + if line == uplink_cidr { + break 'waiting_for_addr; + } + } + + // We did not find `uplink_cidr` in the output of ipadm; + // sleep a bit and try again, unless we've been waiting too + // long already. + if start_waiting_addr.elapsed() + < UPLINK_SVC_WAIT_TIMEOUT + { + tokio::time::sleep(UPLINK_SVC_RETRY_DELAY).await; + } else { + return StepWarning::new( + Err(L2Failure::WaitingForHostAddr( + level1, + uplink_property, + )), + format!( + "timed out waiting for `uplink` to \ + create {uplink_cidr}" + ), + ) + .into(); + } } } - let metadata = vec![format!("configured {}", uplink_property.0)]; StepSuccess::new(Ok(L2Success { level1, uplink_property })) @@ -410,27 +414,29 @@ fn add_steps_for_single_local_uplink_preflight_check<'a>( } }; - // Add the gateway as the default route in illumos. - if let Err(err) = execute_command(&[ - ROUTE, - "add", - "-inet", - "default", - &uplink.gateway_ip.to_string(), - ]) - .await - { - return StepWarning::new( - Err(RoutingFailure::HostDefaultRoute(level2)), - format!("could not add default route: {err}"), - ) - .into(); - }; + for r in &uplink.routes { + // Add the gateway as the default route in illumos. + if let Err(err) = execute_command(&[ + ROUTE, + "add", + "-inet", + &r.destination.to_string(), + &r.nexthop.to_string(), + ]) + .await + { + return StepWarning::new( + Err(RoutingFailure::HostDefaultRoute(level2)), + format!("could not add default route: {err}"), + ) + .into(); + }; + } StepSuccess::new(Ok(RoutingSuccess { level2 })) .with_metadata(vec![format!( - "added default route to {}", - uplink.gateway_ip + "added routes {:#?}", + uplink.routes, )]) .into() }, @@ -595,21 +601,24 @@ fn add_steps_for_single_local_uplink_preflight_check<'a>( } }; - if remove_host_route { - execute_command(&[ - ROUTE, - "delete", - "-inet", - "default", - &uplink.gateway_ip.to_string(), - ]) - .await - .map_err(|err| { - UplinkPreflightTerminalError::RemoveHostRoute { - err, - gateway_ip: uplink.gateway_ip, - } - })?; + for r in &uplink.routes { + if remove_host_route { + execute_command(&[ + ROUTE, + "delete", + "-inet", + &r.destination.to_string(), + &r.nexthop.to_string(), + ]) + .await + .map_err(|err| { + UplinkPreflightTerminalError::RemoveHostRoute { + err, + destination: r.destination, + nexthop: r.nexthop, + } + })?; + } } StepSuccess::new(Ok(level2)).into() @@ -730,7 +739,7 @@ fn add_steps_for_single_local_uplink_preflight_check<'a>( } fn build_port_settings( - uplink: &UplinkConfig, + uplink: &PortConfigV1, link_id: &LinkId, ) -> PortSettings { // Map from omicron_common types to dpd_client types @@ -758,10 +767,12 @@ fn build_port_settings( v6_routes: HashMap::new(), }; + let addrs = uplink.addresses.iter().map(|a| a.ip()).collect(); + port_settings.links.insert( link_id.to_string(), LinkSettings { - addrs: vec![IpAddr::V4(uplink.uplink_cidr.ip())], + addrs, params: LinkCreate { // TODO we should take these parameters too // https://github.com/oxidecomputer/omicron/issues/3061 @@ -773,14 +784,16 @@ fn build_port_settings( }, ); - port_settings.v4_routes.insert( - DPD_DEFAULT_IPV4_CIDR.parse().unwrap(), - RouteSettingsV4 { - link_id: link_id.0, - nexthop: uplink.gateway_ip, - vid: uplink.uplink_vid, - }, - ); + for r in &uplink.routes { + if let (IpNetwork::V4(dst), IpAddr::V4(nexthop)) = + (r.destination, r.nexthop) + { + port_settings.v4_routes.insert( + dst.to_string(), + RouteSettingsV4 { link_id: link_id.0, nexthop, vid: None }, + ); + } + } port_settings } @@ -890,8 +903,10 @@ pub(crate) enum UplinkPreflightTerminalError { err: DpdError, port_id: PortId, }, - #[error("failed to remove host OS route to gateway {gateway_ip}: {err}")] - RemoveHostRoute { err: String, gateway_ip: Ipv4Addr }, + #[error( + "failed to remove host OS route {destination} -> {nexthop}: {err}" + )] + RemoveHostRoute { err: String, destination: IpNetwork, nexthop: IpAddr }, #[error("failed to remove uplink SMF property {property:?}: {err}")] RemoveSmfProperty { property: String, err: String }, #[error("failed to refresh uplink service config: {0}")] diff --git a/wicketd/src/rss_config.rs b/wicketd/src/rss_config.rs index 1dc9f84985..f335754318 100644 --- a/wicketd/src/rss_config.rs +++ b/wicketd/src/rss_config.rs @@ -454,17 +454,20 @@ impl From<&'_ CurrentRssConfig> for CurrentRssUserConfig { fn validate_rack_network_config( config: &RackNetworkConfig, ) -> Result { + use bootstrap_agent_client::types::BgpConfig as BaBgpConfig; + use bootstrap_agent_client::types::BgpPeerConfig as BaBgpPeerConfig; + use bootstrap_agent_client::types::PortConfigV1 as BaPortConfigV1; use bootstrap_agent_client::types::PortFec as BaPortFec; use bootstrap_agent_client::types::PortSpeed as BaPortSpeed; + use bootstrap_agent_client::types::RouteConfig as BaRouteConfig; use bootstrap_agent_client::types::SwitchLocation as BaSwitchLocation; - use bootstrap_agent_client::types::UplinkConfig as BaUplinkConfig; use omicron_common::api::internal::shared::PortFec; use omicron_common::api::internal::shared::PortSpeed; use omicron_common::api::internal::shared::SwitchLocation; // Ensure that there is at least one uplink - if config.uplinks.is_empty() { - return Err(anyhow!("Must have at least one uplink configured")); + if config.ports.is_empty() { + return Err(anyhow!("Must have at least one port configured")); } // Make sure `infra_ip_first`..`infra_ip_last` is a well-defined range... @@ -475,16 +478,20 @@ fn validate_rack_network_config( }, )?; - // iterate through each UplinkConfig - for uplink_config in &config.uplinks { - // ... and check that it contains `uplink_ip`. - if uplink_config.uplink_cidr.ip() < infra_ip_range.first - || uplink_config.uplink_cidr.ip() > infra_ip_range.last - { - bail!( + // TODO this implies a single contiguous range for port IPs which is over + // constraining + // iterate through each port config + for port_config in &config.ports { + for addr in &port_config.addresses { + // ... and check that it contains `uplink_ip`. + if addr.ip() < infra_ip_range.first + || addr.ip() > infra_ip_range.last + { + bail!( "`uplink_cidr`'s IP address must be in the range defined by \ `infra_ip_first` and `infra_ip_last`" ); + } } } // TODO Add more client side checks on `rack_network_config` contents? @@ -492,17 +499,33 @@ fn validate_rack_network_config( Ok(bootstrap_agent_client::types::RackNetworkConfig { infra_ip_first: config.infra_ip_first, infra_ip_last: config.infra_ip_last, - uplinks: config - .uplinks + ports: config + .ports .iter() - .map(|config| BaUplinkConfig { - gateway_ip: config.gateway_ip, + .map(|config| BaPortConfigV1 { + port: config.port.clone(), + routes: config + .routes + .iter() + .map(|r| BaRouteConfig { + destination: r.destination, + nexthop: r.nexthop, + }) + .collect(), + addresses: config.addresses.clone(), + bgp_peers: config + .bgp_peers + .iter() + .map(|p| BaBgpPeerConfig { + addr: p.addr, + asn: p.asn, + port: p.port.clone(), + }) + .collect(), switch: match config.switch { SwitchLocation::Switch0 => BaSwitchLocation::Switch0, SwitchLocation::Switch1 => BaSwitchLocation::Switch1, }, - uplink_cidr: config.uplink_cidr, - uplink_port: config.uplink_port.clone(), uplink_port_speed: match config.uplink_port_speed { PortSpeed::Speed0G => BaPortSpeed::Speed0G, PortSpeed::Speed1G => BaPortSpeed::Speed1G, @@ -519,7 +542,14 @@ fn validate_rack_network_config( PortFec::None => BaPortFec::None, PortFec::Rs => BaPortFec::Rs, }, - uplink_vid: config.uplink_vid, + }) + .collect(), + bgp: config + .bgp + .iter() + .map(|config| BaBgpConfig { + asn: config.asn, + originate: config.originate.clone(), }) .collect(), })