diff --git a/api_identity/src/lib.rs b/api_identity/src/lib.rs index 50a3fff6d1..4d933ed3c0 100644 --- a/api_identity/src/lib.rs +++ b/api_identity/src/lib.rs @@ -3,10 +3,8 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. // Copyright 2020 Oxide Computer Company -/*! - * This macro is a helper to generate an accessor for the identity of any - * `api::Object`. - */ +//! This macro is a helper to generate an accessor for the identity of any +//! `api::Object`. extern crate proc_macro; @@ -15,10 +13,8 @@ use quote::quote; use syn::Fields; use syn::ItemStruct; -/** - * Generates an "identity()" accessor for any `api::Object` having an `identity` - * field. - */ +/// Generates an "identity()" accessor for any `api::Object` having an `identity` +/// field. #[proc_macro_derive(ObjectIdentity)] pub fn object_identity( item: proc_macro::TokenStream, diff --git a/common/src/api/external/error.rs b/common/src/api/external/error.rs index 6e4fcf7743..f66de9cd30 100644 --- a/common/src/api/external/error.rs +++ b/common/src/api/external/error.rs @@ -2,11 +2,9 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Error handling facilities for the Oxide control plane - * - * For HTTP-level error handling, see Dropshot. - */ +//! Error handling facilities for the Oxide control plane +//! +//! For HTTP-level error handling, see Dropshot. use crate::api::external::Name; use crate::api::external::ResourceType; @@ -15,64 +13,58 @@ use serde::Deserialize; use serde::Serialize; use uuid::Uuid; -/** - * An error that can be generated within a control plane component - * - * These may be generated while handling a client request or as part of - * background operation. When generated as part of an HTTP request, an - * `Error` will be converted into an HTTP error as one of the last steps in - * processing the request. This allows most of the system to remain agnostic to - * the transport with which the system communicates with clients. - * - * General best practices for error design apply here. Where possible, we want - * to reuse existing variants rather than inventing new ones to distinguish - * cases that no programmatic consumer needs to distinguish. - */ +/// An error that can be generated within a control plane component +/// +/// These may be generated while handling a client request or as part of +/// background operation. When generated as part of an HTTP request, an +/// `Error` will be converted into an HTTP error as one of the last steps in +/// processing the request. This allows most of the system to remain agnostic to +/// the transport with which the system communicates with clients. +/// +/// General best practices for error design apply here. Where possible, we want +/// to reuse existing variants rather than inventing new ones to distinguish +/// cases that no programmatic consumer needs to distinguish. #[derive(Debug, Deserialize, thiserror::Error, PartialEq, Serialize)] pub enum Error { - /** An object needed as part of this operation was not found. */ + /// An object needed as part of this operation was not found. #[error("Object (of type {lookup_type:?}) not found: {type_name}")] ObjectNotFound { type_name: ResourceType, lookup_type: LookupType }, - /** An object already exists with the specified name or identifier. */ + /// An object already exists with the specified name or identifier. #[error("Object (of type {type_name:?}) already exists: {object_name}")] ObjectAlreadyExists { type_name: ResourceType, object_name: String }, - /** - * The request was well-formed, but the operation cannot be completed given - * the current state of the system. - */ + /// The request was well-formed, but the operation cannot be completed given + /// the current state of the system. #[error("Invalid Request: {message}")] InvalidRequest { message: String }, - /** - * Authentication credentials were required but either missing or invalid. - * The HTTP status code is called "Unauthorized", but it's more accurate to - * call it "Unauthenticated". - */ + /// Authentication credentials were required but either missing or invalid. + /// The HTTP status code is called "Unauthorized", but it's more accurate to + /// call it "Unauthenticated". #[error("Missing or invalid credentials")] Unauthenticated { internal_message: String }, - /** The specified input field is not valid. */ + /// The specified input field is not valid. #[error("Invalid Value: {label}, {message}")] InvalidValue { label: String, message: String }, - /** The request is not authorized to perform the requested operation. */ + /// The request is not authorized to perform the requested operation. #[error("Forbidden")] Forbidden, - /** The system encountered an unhandled operational error. */ + /// The system encountered an unhandled operational error. #[error("Internal Error: {internal_message}")] InternalError { internal_message: String }, - /** The system (or part of it) is unavailable. */ + /// The system (or part of it) is unavailable. #[error("Service Unavailable: {internal_message}")] ServiceUnavailable { internal_message: String }, - /** Method Not Allowed */ + /// Method Not Allowed #[error("Method Not Allowed: {internal_message}")] MethodNotAllowed { internal_message: String }, } -/** Indicates how an object was looked up (for an `ObjectNotFound` error) */ +/// Indicates how an object was looked up (for an `ObjectNotFound` error) #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] pub enum LookupType { - /** a specific name was requested */ + /// a specific name was requested ByName(String), - /** a specific id was requested */ + /// a specific id was requested ById(Uuid), } @@ -97,10 +89,8 @@ impl From<&Name> for LookupType { } impl Error { - /** - * Returns whether the error is likely transient and could reasonably be - * retried - */ + /// Returns whether the error is likely transient and could reasonably be + /// retried pub fn retryable(&self) -> bool { match self { Error::ServiceUnavailable { .. } => true, @@ -116,67 +106,55 @@ impl Error { } } - /** - * Generates an [`Error::ObjectNotFound`] error for a lookup by object - * name. - */ + /// Generates an [`Error::ObjectNotFound`] error for a lookup by object + /// name. pub fn not_found_by_name(type_name: ResourceType, name: &Name) -> Error { LookupType::from(name).into_not_found(type_name) } - /** - * Generates an [`Error::ObjectNotFound`] error for a lookup by object id. - */ + /// Generates an [`Error::ObjectNotFound`] error for a lookup by object id. pub fn not_found_by_id(type_name: ResourceType, id: &Uuid) -> Error { LookupType::ById(*id).into_not_found(type_name) } - /** - * Generates an [`Error::InternalError`] error with the specific message - * - * InternalError should be used for operational conditions that should not - * happen but that we cannot reasonably handle at runtime (e.g., - * deserializing a value from the database, or finding two records for - * something that is supposed to be unique). - */ + /// Generates an [`Error::InternalError`] error with the specific message + /// + /// InternalError should be used for operational conditions that should not + /// happen but that we cannot reasonably handle at runtime (e.g., + /// deserializing a value from the database, or finding two records for + /// something that is supposed to be unique). pub fn internal_error(internal_message: &str) -> Error { Error::InternalError { internal_message: internal_message.to_owned() } } - /** - * Generates an [`Error::InvalidRequest`] error with the specific message - * - * This should be used for failures due possibly to invalid client input - * or malformed requests. - */ + /// Generates an [`Error::InvalidRequest`] error with the specific message + /// + /// This should be used for failures due possibly to invalid client input + /// or malformed requests. pub fn invalid_request(message: &str) -> Error { Error::InvalidRequest { message: message.to_owned() } } - /** - * Generates an [`Error::ServiceUnavailable`] error with the specific - * message - * - * This should be used for transient failures where the caller might be - * expected to retry. Logic errors or other problems indicating that a - * retry would not work should probably be an InternalError (if it's a - * server problem) or InvalidRequest (if it's a client problem) instead. - */ + /// Generates an [`Error::ServiceUnavailable`] error with the specific + /// message + /// + /// This should be used for transient failures where the caller might be + /// expected to retry. Logic errors or other problems indicating that a + /// retry would not work should probably be an InternalError (if it's a + /// server problem) or InvalidRequest (if it's a client problem) instead. pub fn unavail(message: &str) -> Error { Error::ServiceUnavailable { internal_message: message.to_owned() } } } impl From for HttpError { - /** - * Converts an `Error` error into an `HttpError`. This defines how - * errors that are represented internally using `Error` are ultimately - * exposed to clients over HTTP. - */ + /// Converts an `Error` error into an `HttpError`. This defines how + /// errors that are represented internally using `Error` are ultimately + /// exposed to clients over HTTP. fn from(error: Error) -> HttpError { match error { Error::ObjectNotFound { type_name: t, lookup_type: lt } => { - /* TODO-cleanup is there a better way to express this? */ + // TODO-cleanup is there a better way to express this? let (lookup_field, lookup_value) = match lt { LookupType::ByName(name) => ("name", name), LookupType::ById(id) => ("id", id.to_string()), @@ -318,11 +296,9 @@ impl From> for Error { } } -/** - * Like [`assert!`], except that instead of panicking, this function returns an - * `Err(Error::InternalError)` with an appropriate message if the given - * condition is not true. - */ +/// Like [`assert!`], except that instead of panicking, this function returns an +/// `Err(Error::InternalError)` with an appropriate message if the given +/// condition is not true. #[macro_export] macro_rules! bail_unless { ($cond:expr $(,)?) => { @@ -343,7 +319,7 @@ mod test { #[test] fn test_bail_unless() { #![allow(clippy::eq_op)] - /* Success cases */ + // Success cases let no_bail = || { bail_unless!(1 + 1 == 2, "wrong answer: {}", 3); Ok(()) @@ -355,7 +331,7 @@ mod test { assert_eq!(Ok(()), no_bail()); assert_eq!(Ok(()), no_bail_label_args()); - /* Failure cases */ + // Failure cases let do_bail = || { bail_unless!(1 + 1 == 3); Ok(()) diff --git a/common/src/api/external/http_pagination.rs b/common/src/api/external/http_pagination.rs index 3cf5279451..8595f4ca2a 100644 --- a/common/src/api/external/http_pagination.rs +++ b/common/src/api/external/http_pagination.rs @@ -2,45 +2,43 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Pagination support - * - * All list operations in the API are paginated, meaning that there's a limit on - * the number of objects returned in a single request and clients are expected - * to make additional requests to fetch the next page of results until the end - * of the list is reached or the client has found what it needs. For any list - * operation, objects are sorted by a particular field that is unique among - * objects in the list (usually a UTF-8 name or a UUID). With each response, - * the server will return a page of objects, plus a token that can be used to - * fetch the next page. - * - * See Dropshot's pagination documentation for more background on this. - * - * For our API, we expect that most resources will support pagination in the - * same way, which will include: - * - * * definitely: sorting in ascending order of the resource's "name" - * * maybe in the future: sorting in descending order of the resource's "name" - * * maybe in the future: sorting in ascending order of the resource's "id" - * * maybe in the future: sorting in descending order of the resource's "id" - * * maybe in the future: sorting in descending order of the resource's "mtime" - * and then "name" or "id" - * - * Dropshot's pagination support requires that we define the query parameters we - * support with the first request ("scan params"), the information we need on - * subsequent requests to resume a scan ("page selector"), and a way to generate - * the page selector from a given object in the collection. We can share these - * definitions across as many resources as we want. Below, we provide - * definitions for resources that implement the `ObjectIdentity` trait. With - * these definitions, any type that has identity metadata can be paginated by - * "name" in ascending order, "id" in ascending order, or either of those (plus - * name in descending order) without any new boilerplate for that type. - * - * There may be resources that can't be paginated using one of the above three - * ways, and we can define new ways to paginate them. As you will notice below, - * there's a fair bit of boilerplate for each way of paginating (rather than for - * each resource paginated that way). Where possible, we should share code. - */ +//! Pagination support +//! +//! All list operations in the API are paginated, meaning that there's a limit on +//! the number of objects returned in a single request and clients are expected +//! to make additional requests to fetch the next page of results until the end +//! of the list is reached or the client has found what it needs. For any list +//! operation, objects are sorted by a particular field that is unique among +//! objects in the list (usually a UTF-8 name or a UUID). With each response, +//! the server will return a page of objects, plus a token that can be used to +//! fetch the next page. +//! +//! See Dropshot's pagination documentation for more background on this. +//! +//! For our API, we expect that most resources will support pagination in the +//! same way, which will include: +//! +//! * definitely: sorting in ascending order of the resource's "name" +//! * maybe in the future: sorting in descending order of the resource's "name" +//! * maybe in the future: sorting in ascending order of the resource's "id" +//! * maybe in the future: sorting in descending order of the resource's "id" +//! * maybe in the future: sorting in descending order of the resource's "mtime" +//! and then "name" or "id" +//! +//! Dropshot's pagination support requires that we define the query parameters we +//! support with the first request ("scan params"), the information we need on +//! subsequent requests to resume a scan ("page selector"), and a way to generate +//! the page selector from a given object in the collection. We can share these +//! definitions across as many resources as we want. Below, we provide +//! definitions for resources that implement the `ObjectIdentity` trait. With +//! these definitions, any type that has identity metadata can be paginated by +//! "name" in ascending order, "id" in ascending order, or either of those (plus +//! name in descending order) without any new boilerplate for that type. +//! +//! There may be resources that can't be paginated using one of the above three +//! ways, and we can define new ways to paginate them. As you will notice below, +//! there's a fair bit of boilerplate for each way of paginating (rather than for +//! each resource paginated that way). Where possible, we should share code. use crate::api::external::DataPageParams; use crate::api::external::Name; @@ -60,76 +58,60 @@ use std::num::NonZeroU32; use std::sync::Arc; use uuid::Uuid; -/* - * General pagination infrastructure - */ +// General pagination infrastructure -/** - * Specifies which page of results we're on - * - * This type is generic over the different scan modes that we support. - */ +/// Specifies which page of results we're on +/// +/// This type is generic over the different scan modes that we support. #[derive(Debug, Deserialize, JsonSchema, Serialize)] pub struct PageSelector { - /** parameters describing the scan */ + /// parameters describing the scan #[serde(flatten)] scan: ScanParams, - /** value of the marker field last seen by the client */ + /// value of the marker field last seen by the client last_seen: MarkerType, } -/** - * Describes one of our supported scan modes - * - * To minimize boilerplate, we provide common functions needed by our consumers - * (e.g., `ScanParams::results_page`) as well as the Dropshot interface (e.g., - * `page_selector_for`). This trait encapsulates the functionality that differs - * among the different scan modes that we support. Much of the functionality - * here isn't so much a property of the Dropshot "scan parameters" as much as it - * is specific to a scan using those parameters. As a result, several of these - * are associated functions rather than methods. - */ +/// Describes one of our supported scan modes +/// +/// To minimize boilerplate, we provide common functions needed by our consumers +/// (e.g., `ScanParams::results_page`) as well as the Dropshot interface (e.g., +/// `page_selector_for`). This trait encapsulates the functionality that differs +/// among the different scan modes that we support. Much of the functionality +/// here isn't so much a property of the Dropshot "scan parameters" as much as it +/// is specific to a scan using those parameters. As a result, several of these +/// are associated functions rather than methods. pub trait ScanParams: Clone + Debug + DeserializeOwned + JsonSchema + PartialEq + Serialize { - /** - * Type of the "marker" field for this scan mode - * - * For example, when scanning by name, this would be `Name`. - */ + /// Type of the "marker" field for this scan mode + /// + /// For example, when scanning by name, this would be `Name`. type MarkerValue: Clone + Debug + DeserializeOwned + PartialEq + Serialize; - /** - * Return the direction of the scan - */ + /// Return the direction of the scan fn direction(&self) -> PaginationOrder; - /** - * Given an item, return the appropriate marker value - * - * For example, when scanning by name, this returns the "name" field of the - * item. - */ + /// Given an item, return the appropriate marker value + /// + /// For example, when scanning by name, this returns the "name" field of the + /// item. fn marker_for_item(&self, t: &T) -> Self::MarkerValue; - /** - * Given pagination parameters, return the current scan parameters - * - * This can fail if the pagination parameters are not self-consistent (e.g., - * if the scan parameters indicate we're going in ascending order by name, - * but the marker is an id rather than a name). - */ + /// Given pagination parameters, return the current scan parameters + /// + /// This can fail if the pagination parameters are not self-consistent (e.g., + /// if the scan parameters indicate we're going in ascending order by name, + /// but the marker is an id rather than a name). fn from_query( q: &PaginationParams>, ) -> Result<&Self, HttpError>; - /** - * Generate a page of results for a paginated endpoint that lists items of - * type `T` - * - * `list` contains the items that should appear on the page. It's not - * expected that consumers would override this implementation. - */ + /// Generate a page of results for a paginated endpoint that lists items of + /// type `T` + /// + /// `list` contains the items that should appear on the page. It's not + /// expected that consumers would override this implementation. fn results_page( query: &PaginationParams>, list: Vec, @@ -142,9 +124,7 @@ pub trait ScanParams: } } -/** - * See `dropshot::ResultsPage::new` - */ +/// See `dropshot::ResultsPage::new` fn page_selector_for(item: &T, scan_params: &S) -> PageSelector where T: ObjectIdentity, @@ -157,14 +137,12 @@ where } } -/** - * Given a request and pagination parameters, return a [`DataPageParams`] - * describing the current page of results to return - * - * This implementation is used for `ScanByName` and `ScanById`. See - * [`data_page_params_nameid_name`] and [`data_page_params_nameid_id`] for - * variants that can be used for `ScanByNameOrId`. - */ +/// Given a request and pagination parameters, return a [`DataPageParams`] +/// describing the current page of results to return +/// +/// This implementation is used for `ScanByName` and `ScanById`. See +/// [`data_page_params_nameid_name`] and [`data_page_params_nameid_id`] for +/// variants that can be used for `ScanByNameOrId`. pub fn data_page_params_for<'a, S, C>( rqctx: &'a Arc>, pag_params: &'a PaginationParams>, @@ -177,11 +155,9 @@ where data_page_params_with_limit(limit, &pag_params) } -/** - * Provided separately from data_page_params_for() so that the test suite can - * test the bulk of the logic without needing to cons up a Dropshot - * `RequestContext` just to get the limit. - */ +/// Provided separately from data_page_params_for() so that the test suite can +/// test the bulk of the logic without needing to cons up a Dropshot +/// `RequestContext` just to get the limit. fn data_page_params_with_limit( limit: NonZeroU32, pag_params: &PaginationParams>, @@ -199,29 +175,25 @@ where Ok(DataPageParams { marker, direction, limit }) } -/* - * Pagination by name in ascending order only (most resources today) - */ +// Pagination by name in ascending order only (most resources today) -/** Query parameters for pagination by name only */ +/// Query parameters for pagination by name only pub type PaginatedByName = PaginationParams; -/** Page selector for pagination by name only */ +/// Page selector for pagination by name only pub type PageSelectorByName = PageSelector; -/** Scan parameters for resources that support scanning by name only */ +/// Scan parameters for resources that support scanning by name only #[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] pub struct ScanByName { #[serde(default = "default_name_sort_mode")] sort_by: NameSortMode, } -/** - * Supported set of sort modes for scanning by name only - * - * Currently, we only support scanning in ascending order. - */ +/// Supported set of sort modes for scanning by name only +/// +/// Currently, we only support scanning in ascending order. #[derive(Copy, Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "kebab-case")] pub enum NameSortMode { - /** sort in increasing order of "name" */ + /// sort in increasing order of "name" NameAscending, } @@ -247,30 +219,26 @@ impl ScanParams for ScanByName { } } -/* - * Pagination by id in ascending order only (for some anonymous resources today) - */ +// Pagination by id in ascending order only (for some anonymous resources today) -/** Query parameters for pagination by id only */ +/// Query parameters for pagination by id only pub type PaginatedById = PaginationParams; -/** Page selector for pagination by name only */ +/// Page selector for pagination by name only pub type PageSelectorById = PageSelector; -/** Scan parameters for resources that support scanning by id only */ +/// Scan parameters for resources that support scanning by id only #[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] pub struct ScanById { #[serde(default = "default_id_sort_mode")] sort_by: IdSortMode, } -/** - * Supported set of sort modes for scanning by id only. - * - * Currently, we only support scanning in ascending order. - */ +/// Supported set of sort modes for scanning by id only. +/// +/// Currently, we only support scanning in ascending order. #[derive(Copy, Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "kebab-case")] pub enum IdSortMode { - /** sort in increasing order of "id" */ + /// sort in increasing order of "id" IdAscending, } @@ -294,31 +262,29 @@ impl ScanParams for ScanById { } } -/* - * Pagination by any of: name ascending, name descending, or id ascending. - * We include this now primarily to exercise the interface for doing so. - */ +// Pagination by any of: name ascending, name descending, or id ascending. +// We include this now primarily to exercise the interface for doing so. -/** Query parameters for pagination by name or id */ +/// Query parameters for pagination by name or id pub type PaginatedByNameOrId = PaginationParams; -/** Page selector for pagination by name or id */ +/// Page selector for pagination by name or id pub type PageSelectorByNameOrId = PageSelector; -/** Scan parameters for resources that support scanning by name or id */ +/// Scan parameters for resources that support scanning by name or id #[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] pub struct ScanByNameOrId { #[serde(default = "default_nameid_sort_mode")] sort_by: NameOrIdSortMode, } -/** Supported set of sort modes for scanning by name or id */ +/// Supported set of sort modes for scanning by name or id #[derive(Copy, Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "kebab-case")] pub enum NameOrIdSortMode { - /** sort in increasing order of "name" */ + /// sort in increasing order of "name" NameAscending, - /** sort in decreasing order of "name" */ + /// sort in decreasing order of "name" NameDescending, - /** sort in increasing order of "id" */ + /// sort in increasing order of "id" IdAscending, } @@ -326,17 +292,15 @@ fn default_nameid_sort_mode() -> NameOrIdSortMode { NameOrIdSortMode::NameAscending } -/* - * TODO-correctness It's tempting to make this a serde(untagged) enum, which - * would clean up the format of the page selector parameter. However, it would - * have the side effect that if the name happened to be a valid uuid, then we'd - * parse it as a uuid here, even if the corresponding scan parameters indicated - * that we were doing a scan by name. Then we'd fail later on an invalid - * combination. We could infer the correct variant here from the "sort_by" - * field of the adjacent scan params, but we'd have to write our own - * `Deserialize` to do this. This might be worth revisiting before we commit to - * any particular version of the API. - */ +// TODO-correctness It's tempting to make this a serde(untagged) enum, which +// would clean up the format of the page selector parameter. However, it would +// have the side effect that if the name happened to be a valid uuid, then we'd +// parse it as a uuid here, even if the corresponding scan parameters indicated +// that we were doing a scan by name. Then we'd fail later on an invalid +// combination. We could infer the correct variant here from the "sort_by" +// field of the adjacent scan params, but we'd have to write our own +// `Deserialize` to do this. This might be worth revisiting before we commit to +// any particular version of the API. #[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub enum NameOrIdMarker { @@ -409,19 +373,17 @@ impl ScanParams for ScanByNameOrId { } } -/** - * Serves the same purpose as [`data_page_params_for`] for the specific case of - * `ScanByNameOrId` when scanning by `name` - * - * Why do we need a separate function here? Because `data_page_params_for` only - * knows how to return the (statically-defined) marker value from the page - * selector. For `ScanByNameOrId`, this would return the enum - * `NameOrIdMarker`. But at some point our caller needs the specific type - * (e.g., `Name` for a scan by name or `Uuid` for a scan by Uuid). They get - * that from this function and its partner, [`data_page_params_nameid_id`]. - * These functions are where we look at the enum variant and extract the - * specific marker value out. - */ +/// Serves the same purpose as [`data_page_params_for`] for the specific case of +/// `ScanByNameOrId` when scanning by `name` +/// +/// Why do we need a separate function here? Because `data_page_params_for` only +/// knows how to return the (statically-defined) marker value from the page +/// selector. For `ScanByNameOrId`, this would return the enum +/// `NameOrIdMarker`. But at some point our caller needs the specific type +/// (e.g., `Name` for a scan by name or `Uuid` for a scan by Uuid). They get +/// that from this function and its partner, [`data_page_params_nameid_id`]. +/// These functions are where we look at the enum variant and extract the +/// specific marker value out. pub fn data_page_params_nameid_name<'a, C>( rqctx: &'a Arc>, pag_params: &'a PaginatedByNameOrId, @@ -442,19 +404,15 @@ fn data_page_params_nameid_name_limit( let marker = match data_page.marker { None => None, Some(NameOrIdMarker::Name(name)) => Some(name), - /* - * This should arguably be a panic or a 500 error, since the caller - * should not have invoked this version of the function if they didn't - * know they were looking at a name-based marker. - */ + // This should arguably be a panic or a 500 error, since the caller + // should not have invoked this version of the function if they didn't + // know they were looking at a name-based marker. Some(NameOrIdMarker::Id(_)) => return Err(bad_token_error()), }; Ok(DataPageParams { limit, direction, marker }) } -/** - * See [`data_page_params_nameid_name`]. - */ +/// See [`data_page_params_nameid_name`]. pub fn data_page_params_nameid_id<'a, C>( rqctx: &'a Arc>, pag_params: &'a PaginatedByNameOrId, @@ -475,11 +433,9 @@ fn data_page_params_nameid_id_limit( let marker = match data_page.marker { None => None, Some(NameOrIdMarker::Id(id)) => Some(id), - /* - * This should arguably be a panic or a 500 error, since the caller - * should not have invoked this version of the function if they didn't - * know they were looking at an id-based marker. - */ + // This should arguably be a panic or a 500 error, since the caller + // should not have invoked this version of the function if they didn't + // know they were looking at an id-based marker. Some(NameOrIdMarker::Name(_)) => return Err(bad_token_error()), }; Ok(DataPageParams { limit, direction, marker }) @@ -523,17 +479,15 @@ mod test { use std::num::NonZeroU32; use uuid::Uuid; - /* - * It's important to verify the schema for the page selectors because this - * is a part of our interface that does not appear in the OpenAPI spec - * because it's obscured by Dropshot's automatic encoding of the page - * selector. - * - * Below, we also check the schema for the scan parameters because it's easy - * to do and useful to have the examples there. We may want to remove this - * if/when we add a test case that checks the entire OpenAPI schema for our - * various APIs, since this will then be redundant. - */ + // It's important to verify the schema for the page selectors because this + // is a part of our interface that does not appear in the OpenAPI spec + // because it's obscured by Dropshot's automatic encoding of the page + // selector. + // + // Below, we also check the schema for the scan parameters because it's easy + // to do and useful to have the examples there. We may want to remove this + // if/when we add a test case that checks the entire OpenAPI schema for our + // various APIs, since this will then be redundant. #[test] fn test_pagination_schemas() { let schemas = vec![ @@ -566,10 +520,8 @@ mod test { assert_contents("tests/output/pagination-schema.txt", &found_output); } - /* - * As much for illustration as anything, we check examples of the scan - * parameters and page selectors here. - */ + // As much for illustration as anything, we check examples of the scan + // parameters and page selectors here. #[test] fn test_pagination_examples() { let scan_by_id = ScanById { sort_by: IdSortMode::IdAscending }; @@ -581,7 +533,7 @@ mod test { let id: Uuid = "61a78113-d3c6-4b35-a410-23e9eae64328".parse().unwrap(); let name: Name = "bort".parse().unwrap(); let examples = vec![ - /* scan parameters only */ + // scan parameters only ("scan by id ascending", to_string_pretty(&scan_by_id).unwrap()), ( "scan by name ascending", @@ -595,7 +547,7 @@ mod test { "scan by name or id, using name ascending", to_string_pretty(&scan_by_nameid_name).unwrap(), ), - /* page selectors */ + // page selectors ( "page selector: by id ascending", to_string_pretty(&PageSelectorById { @@ -664,9 +616,7 @@ mod test { .collect() } - /** - * Function for running a bunch of tests on a ScanParams type. - */ + /// Function for running a bunch of tests on a ScanParams type. fn test_scan_param_common( list: &Vec, scan: &S, @@ -683,11 +633,11 @@ mod test { { let li = list.len() - 1; - /* Test basic parts of ScanParams interface. */ + // Test basic parts of ScanParams interface. assert_eq!(&scan.marker_for_item(&list[0]), item0_marker); assert_eq!(&scan.marker_for_item(&list[li]), itemlast_marker); - /* Test page_selector_for(). */ + // Test page_selector_for(). let page_selector = page_selector_for(&list[0], scan); assert_eq!(&page_selector.scan, scan); assert_eq!(&page_selector.last_seen, item0_marker); @@ -696,23 +646,19 @@ mod test { assert_eq!(&page_selector.scan, scan); assert_eq!(&page_selector.last_seen, itemlast_marker); - /* Test from_query() with the default scan parameters. */ + // Test from_query() with the default scan parameters. let p: PaginationParams> = serde_urlencoded::from_str("").unwrap(); assert_eq!(S::from_query(&p).unwrap(), scan_default); - /* - * Test from_query() based on an explicit querystring corresponding to - * the first page in a scan with "scan" as the scan parameters. - */ + // Test from_query() based on an explicit querystring corresponding to + // the first page in a scan with "scan" as the scan parameters. let p0: PaginationParams> = serde_urlencoded::from_str(querystring).unwrap(); assert_eq!(S::from_query(&p0).unwrap(), scan); - /* - * Generate a results page from that, verify it, pull the token out, and - * use it to generate pagination parameters for a NextPage request. - */ + // Generate a results page from that, verify it, pull the token out, and + // use it to generate pagination parameters for a NextPage request. let page = S::results_page(&p0, list.clone()).unwrap(); assert_eq!(&page.items, list); assert!(page.next_page.is_some()); @@ -720,11 +666,9 @@ mod test { let p1: PaginationParams> = serde_urlencoded::from_str(&q).unwrap(); - /* - * Now pull the information out of that, including the "last_seen" - * marker. This should match `itemlast_marker`. That will tell us that - * the results page was properly generated. - */ + // Now pull the information out of that, including the "last_seen" + // marker. This should match `itemlast_marker`. That will tell us that + // the results page was properly generated. assert_eq!(S::from_query(&p1).unwrap(), scan); if let WhichPage::Next(PageSelector { ref last_seen, .. }) = p1.page { assert_eq!(last_seen, itemlast_marker); @@ -732,16 +676,14 @@ mod test { panic!("expected WhichPage::Next"); } - /* - * Return these two sets of pagination parameters to the caller for more - * testing. - */ + // Return these two sets of pagination parameters to the caller for more + // testing. (p0, p1) } #[test] fn test_scan_by_name() { - /* Start with the common battery of tests. */ + // Start with the common battery of tests. let scan = ScanByName { sort_by: NameSortMode::NameAscending }; let list = list_of_things(); @@ -755,7 +697,7 @@ mod test { ); assert_eq!(scan.direction(), PaginationOrder::Ascending); - /* Verify data pages based on the query params. */ + // Verify data pages based on the query params. let limit = NonZeroU32::new(123).unwrap(); let data_page = data_page_params_with_limit(limit, &p0).unwrap(); assert_eq!(data_page.marker, None); @@ -767,7 +709,7 @@ mod test { assert_eq!(data_page.direction, PaginationOrder::Ascending); assert_eq!(data_page.limit, limit); - /* Test from_query(): error case. */ + // Test from_query(): error case. let error = serde_urlencoded::from_str::( "sort_by=name-descending", ) @@ -780,7 +722,7 @@ mod test { #[test] fn test_scan_by_id() { - /* Start with the common battery of tests. */ + // Start with the common battery of tests. let scan = ScanById { sort_by: IdSortMode::IdAscending }; let list = list_of_things(); @@ -794,7 +736,7 @@ mod test { ); assert_eq!(scan.direction(), PaginationOrder::Ascending); - /* Verify data pages based on the query params. */ + // Verify data pages based on the query params. let limit = NonZeroU32::new(123).unwrap(); let data_page = data_page_params_with_limit(limit, &p0).unwrap(); assert_eq!(data_page.marker, None); @@ -806,7 +748,7 @@ mod test { assert_eq!(data_page.direction, PaginationOrder::Ascending); assert_eq!(data_page.limit, limit); - /* Test from_query(): error case. */ + // Test from_query(): error case. let error = serde_urlencoded::from_str::( "sort_by=id-descending", ) @@ -819,7 +761,7 @@ mod test { #[test] fn test_scan_by_nameid_generic() { - /* Test from_query(): error case. */ + // Test from_query(): error case. let error = serde_urlencoded::from_str::( "sort_by=id-descending", ) @@ -830,19 +772,17 @@ mod test { `name-ascending`, `name-descending`, `id-ascending`" ); - /* - * TODO-coverage It'd be nice to exercise the from_query() error cases - * where the scan params doesn't match the last_seen value kind. - * However, we can't easily generate these, either directly or by - * causing Dropshot to parse a querystring. In the latter case, it - * would have to be a page token that Dropshot generated, but by - * design we can't get Dropshot to construct such a token. - */ + // TODO-coverage It'd be nice to exercise the from_query() error cases + // where the scan params doesn't match the last_seen value kind. + // However, we can't easily generate these, either directly or by + // causing Dropshot to parse a querystring. In the latter case, it + // would have to be a page token that Dropshot generated, but by + // design we can't get Dropshot to construct such a token. } #[test] fn test_scan_by_nameid_name() { - /* Start with the common battery of tests. */ + // Start with the common battery of tests. let scan = ScanByNameOrId { sort_by: NameOrIdSortMode::NameDescending }; assert_eq!(pagination_field_for_scan_params(&scan), PagField::Name); assert_eq!(scan.direction(), PaginationOrder::Descending); @@ -860,7 +800,7 @@ mod test { &ScanByNameOrId { sort_by: NameOrIdSortMode::NameAscending }, ); - /* Verify data pages based on the query params. */ + // Verify data pages based on the query params. let limit = NonZeroU32::new(123).unwrap(); let data_page = data_page_params_nameid_name_limit(limit, &p0).unwrap(); assert_eq!(data_page.marker, None); @@ -879,7 +819,7 @@ mod test { #[test] fn test_scan_by_nameid_id() { - /* Start with the common battery of tests. */ + // Start with the common battery of tests. let scan = ScanByNameOrId { sort_by: NameOrIdSortMode::IdAscending }; assert_eq!(pagination_field_for_scan_params(&scan), PagField::Id); assert_eq!(scan.direction(), PaginationOrder::Ascending); @@ -898,7 +838,7 @@ mod test { &ScanByNameOrId { sort_by: NameOrIdSortMode::NameAscending }, ); - /* Verify data pages based on the query params. */ + // Verify data pages based on the query params. let limit = NonZeroU32::new(123).unwrap(); let data_page = data_page_params_nameid_id_limit(limit, &p0).unwrap(); assert_eq!(data_page.marker, None); diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index a0aec6172f..c5b249fd1c 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -2,12 +2,10 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Data structures and related facilities for representing resources in the API - * - * This includes all representations over the wire for both the external and - * internal APIs. The contents here are all HTTP-agnostic. - */ +//! Data structures and related facilities for representing resources in the API +//! +//! This includes all representations over the wire for both the external and +//! internal APIs. The contents here are all HTTP-agnostic. mod error; pub mod http_pagination; @@ -41,74 +39,58 @@ use std::num::{NonZeroU16, NonZeroU32}; use std::str::FromStr; use uuid::Uuid; -/* - * The type aliases below exist primarily to ensure consistency among return - * types for functions in the `nexus::Nexus` and `nexus::DataStore`. The - * type argument `T` generally implements `Object`. - */ +// The type aliases below exist primarily to ensure consistency among return +// types for functions in the `nexus::Nexus` and `nexus::DataStore`. The +// type argument `T` generally implements `Object`. -/** Result of a create operation for the specified type */ +/// Result of a create operation for the specified type pub type CreateResult = Result; -/** Result of a delete operation for the specified type */ +/// Result of a delete operation for the specified type pub type DeleteResult = Result<(), Error>; -/** Result of a list operation that returns an ObjectStream */ +/// Result of a list operation that returns an ObjectStream pub type ListResult = Result, Error>; -/** Result of a list operation that returns a vector */ +/// Result of a list operation that returns a vector pub type ListResultVec = Result, Error>; -/** Result of a lookup operation for the specified type */ +/// Result of a lookup operation for the specified type pub type LookupResult = Result; -/** Result of an update operation for the specified type */ +/// Result of an update operation for the specified type pub type UpdateResult = Result; -/** - * A stream of Results, each potentially representing an object in the API - */ +/// A stream of Results, each potentially representing an object in the API pub type ObjectStream = BoxStream<'static, Result>; -/* - * General-purpose types used for client request parameters and return values. - */ +// General-purpose types used for client request parameters and return values. -/** - * Describes an `Object` that has its own identity metadata. This is - * currently used only for pagination. - */ +/// Describes an `Object` that has its own identity metadata. This is +/// currently used only for pagination. pub trait ObjectIdentity { fn identity(&self) -> &IdentityMetadata; } -/** - * Parameters used to request a specific page of results when listing a - * collection of objects - * - * This is logically analogous to Dropshot's `PageSelector` (plus the limit from - * Dropshot's `PaginationParams). However, this type is HTTP-agnostic. More - * importantly, by the time this struct is generated, we know the type of the - * sort field and we can specialize `DataPageParams` to that type. This makes - * it considerably simpler to implement the backend for most of our paginated - * APIs. - * - * `NameType` is the type of the field used to sort the returned values and it's - * usually `Name`. - */ +/// Parameters used to request a specific page of results when listing a +/// collection of objects +/// +/// This is logically analogous to Dropshot's `PageSelector` (plus the limit from +/// Dropshot's `PaginationParams). However, this type is HTTP-agnostic. More +/// importantly, by the time this struct is generated, we know the type of the +/// sort field and we can specialize `DataPageParams` to that type. This makes +/// it considerably simpler to implement the backend for most of our paginated +/// APIs. +/// +/// `NameType` is the type of the field used to sort the returned values and it's +/// usually `Name`. #[derive(Debug)] pub struct DataPageParams<'a, NameType> { - /** - * If present, this is the value of the sort field for the last object seen - */ + /// If present, this is the value of the sort field for the last object seen pub marker: Option<&'a NameType>, - /** - * Whether the sort is in ascending order - */ + /// Whether the sort is in ascending order pub direction: PaginationOrder, - /** - * This identifies how many results should be returned on this page. - * Backend implementations must provide this many results unless we're at - * the end of the scan. Dropshot assumes that if we provide fewer results - * than this number, then we're done with the scan. - */ + /// This identifies how many results should be returned on this page. + /// Backend implementations must provide this many results unless we're at + /// the end of the scan. Dropshot assumes that if we provide fewer results + /// than this number, then we're done with the scan. pub limit: NonZeroU32, } @@ -128,13 +110,11 @@ impl<'a, NameType> DataPageParams<'a, NameType> { } } -/** - * A name used in the API - * - * Names are generally user-provided unique identifiers, highly constrained as - * described in RFD 4. An `Name` can only be constructed with a string - * that's valid as a name. - */ +/// A name used in the API +/// +/// Names are generally user-provided unique identifiers, highly constrained as +/// described in RFD 4. An `Name` can only be constructed with a string +/// that's valid as a name. #[derive( Clone, Debug, @@ -151,12 +131,10 @@ impl<'a, NameType> DataPageParams<'a, NameType> { #[serde(try_from = "String")] pub struct Name(String); -/** - * `Name::try_from(String)` is the primary method for constructing an Name - * from an input string. This validates the string according to our - * requirements for a name. - * TODO-cleanup why shouldn't callers use TryFrom<&str>? - */ +/// `Name::try_from(String)` is the primary method for constructing an Name +/// from an input string. This validates the string according to our +/// requirements for a name. +/// TODO-cleanup why shouldn't callers use TryFrom<&str>? impl TryFrom for Name { type Error = String; fn try_from(value: String) -> Result { @@ -212,10 +190,8 @@ impl<'a> From<&'a Name> for &'a str { } } -/** - * `Name` instances are comparable like Strings, primarily so that they can - * be used as keys in trees. - */ +/// `Name` instances are comparable like Strings, primarily so that they can +/// be used as keys in trees. impl PartialEq for Name where S: AsRef, @@ -225,13 +201,9 @@ where } } -/** - * Custom JsonSchema implementation to encode the constraints on Name - */ -/* - * TODO: 1. make this part of schemars w/ rename and maxlen annotations - * TODO: 2. integrate the regex with `try_from` - */ +/// Custom JsonSchema implementation to encode the constraints on Name +// TODO: 1. make this part of schemars w/ rename and maxlen annotations +// TODO: 2. integrate the regex with `try_from` impl JsonSchema for Name { fn schema_name() -> String { "Name".to_string() @@ -277,11 +249,9 @@ impl JsonSchema for Name { } impl Name { - /** - * Parse an `Name`. This is a convenience wrapper around - * `Name::try_from(String)` that marshals any error into an appropriate - * `Error`. - */ + /// Parse an `Name`. This is a convenience wrapper around + /// `Name::try_from(String)` that marshals any error into an appropriate + /// `Error`. pub fn from_param(value: String, label: &str) -> Result { value.parse().map_err(|e| Error::InvalidValue { label: String::from(label), @@ -289,17 +259,13 @@ impl Name { }) } - /** - * Return the `&str` representing the actual name. - */ + /// Return the `&str` representing the actual name. pub fn as_str(&self) -> &str { self.0.as_str() } } -/** - * Name for a built-in role - */ +/// Name for a built-in role #[derive( Clone, Debug, @@ -334,10 +300,8 @@ impl RoleName { } } -/** - * Custom JsonSchema implementation to encode the constraints on Name - */ -/* TODO see TODOs on Name above */ +/// Custom JsonSchema implementation to encode the constraints on Name +// TODO see TODOs on Name above impl JsonSchema for RoleName { fn schema_name() -> String { "RoleName".to_string() @@ -381,25 +345,21 @@ impl JsonSchema for RoleName { } } -/** - * A count of bytes, typically used either for memory or storage capacity - * - * The maximum supported byte count is [`i64::MAX`]. This makes it somewhat - * inconvenient to define constructors: a u32 constructor can be infallible, but - * an i64 constructor can fail (if the value is negative) and a u64 constructor - * can fail (if the value is larger than i64::MAX). We provide all of these for - * consumers' convenience. - */ -/* - * TODO-cleanup This could benefit from a more complete implementation. - * TODO-correctness RFD 4 requires that this be a multiple of 256 MiB. We'll - * need to write a validator for that. - */ -/* - * The maximum byte count of i64::MAX comes from the fact that this is stored in - * the database as an i64. Constraining it here ensures that we can't fail to - * serialize the value. - */ +/// A count of bytes, typically used either for memory or storage capacity +/// +/// The maximum supported byte count is [`i64::MAX`]. This makes it somewhat +/// inconvenient to define constructors: a u32 constructor can be infallible, but +/// an i64 constructor can fail (if the value is negative) and a u64 constructor +/// can fail (if the value is larger than i64::MAX). We provide all of these for +/// consumers' convenience. +// TODO-cleanup This could benefit from a more complete implementation. +// TODO-correctness RFD 4 requires that this be a multiple of 256 MiB. We'll +// need to write a validator for that. +// / +// +// The maximum byte count of i64::MAX comes from the fact that this is stored in +// the database as an i64. Constraining it here ensures that we can't fail to +// serialize the value. #[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] pub struct ByteCount(u64); @@ -433,7 +393,7 @@ impl ByteCount { } } -/* TODO-cleanup This could use the experimental std::num::IntErrorKind. */ +// TODO-cleanup This could use the experimental std::num::IntErrorKind. #[derive(Debug, Eq, thiserror::Error, Ord, PartialEq, PartialOrd)] pub enum ByteCountRangeError { #[error("value is too small for a byte count")] @@ -471,19 +431,15 @@ impl From for ByteCount { impl From for i64 { fn from(b: ByteCount) -> Self { - /* We have already validated that this value is in range. */ + // We have already validated that this value is in range. i64::try_from(b.0).unwrap() } } -/** - * Generation numbers stored in the database, used for optimistic concurrency - * control - */ -/* - * Because generation numbers are stored in the database, we represent them as - * i64. - */ +/// Generation numbers stored in the database, used for optimistic concurrency +/// control +// Because generation numbers are stored in the database, we represent them as +// i64. #[derive( Copy, Clone, @@ -504,11 +460,9 @@ impl Generation { } pub fn next(&self) -> Generation { - /* - * It should technically be an operational error if this wraps or even - * exceeds the value allowed by an i64. But it seems unlikely enough to - * happen in practice that we can probably feel safe with this. - */ + // It should technically be an operational error if this wraps or even + // exceeds the value allowed by an i64. But it seems unlikely enough to + // happen in practice that we can probably feel safe with this. let next_gen = self.0 + 1; assert!(next_gen <= u64::try_from(i64::MAX).unwrap()); Generation(next_gen) @@ -523,11 +477,9 @@ impl Display for Generation { impl From<&Generation> for i64 { fn from(g: &Generation) -> Self { - /* We have already validated that the value is within range. */ - /* - * TODO-robustness We need to ensure that we don't deserialize a value - * out of range here. - */ + // We have already validated that the value is within range. + // TODO-robustness We need to ensure that we don't deserialize a value + // out of range here. i64::try_from(g.0).unwrap() } } @@ -543,13 +495,9 @@ impl TryFrom for Generation { } } -/* - * General types used to implement API resources - */ +// General types used to implement API resources -/** - * Identifies a type of API resource - */ +/// Identifies a type of API resource #[derive( Clone, Copy, @@ -599,59 +547,45 @@ where .await } -/* - * IDENTITY METADATA - */ +// IDENTITY METADATA -/** - * Identity-related metadata that's included in nearly all public API objects - */ +/// Identity-related metadata that's included in nearly all public API objects #[derive(Clone, Debug, Deserialize, PartialEq, Serialize, JsonSchema)] pub struct IdentityMetadata { - /** unique, immutable, system-controlled identifier for each resource */ + /// unique, immutable, system-controlled identifier for each resource pub id: Uuid, - /** unique, mutable, user-controlled identifier for each resource */ + /// unique, mutable, user-controlled identifier for each resource pub name: Name, - /** human-readable free-form text about a resource */ + /// human-readable free-form text about a resource pub description: String, - /** timestamp when this resource was created */ + /// timestamp when this resource was created pub time_created: DateTime, - /** timestamp when this resource was last modified */ + /// timestamp when this resource was last modified pub time_modified: DateTime, } -/** - * Create-time identity-related parameters - */ +/// Create-time identity-related parameters #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct IdentityMetadataCreateParams { pub name: Name, pub description: String, } -/** - * Updateable identity-related parameters - */ +/// Updateable identity-related parameters #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct IdentityMetadataUpdateParams { pub name: Option, pub description: Option, } -/* - * Specific API resources - */ +// Specific API resources -/* - * INSTANCES - */ +// INSTANCES -/** - * Running state of an Instance (primarily: booted or stopped) - * - * This typically reflects whether it's starting, running, stopping, or stopped, - * but also includes states related to the Instance's lifecycle - */ +/// Running state of an Instance (primarily: booted or stopped) +/// +/// This typically reflects whether it's starting, running, stopping, or stopped, +/// but also includes states related to the Instance's lifecycle #[derive( Copy, Clone, @@ -666,7 +600,7 @@ pub struct IdentityMetadataUpdateParams { )] #[serde(rename_all = "snake_case")] pub enum InstanceState { - Creating, /* TODO-polish: paper over Creating in the API with Starting? */ + Creating, // TODO-polish: paper over Creating in the API with Starting? Starting, Running, /// Implied that a transition to "Stopped" is imminent. @@ -691,12 +625,10 @@ impl Display for InstanceState { } } -/* - * TODO-cleanup why is this error type different from the one for Name? The - * reason is probably that Name can be provided by the user, so we want a - * good validation error. InstanceState cannot. Still, is there a way to - * unify these? - */ +// TODO-cleanup why is this error type different from the one for Name? The +// reason is probably that Name can be provided by the user, so we want a +// good validation error. InstanceState cannot. Still, is there a way to +// unify these? impl TryFrom<&str> for InstanceState { type Error = String; @@ -734,11 +666,9 @@ impl InstanceState { } } - /** - * Returns true if the given state represents a fully stopped Instance. - * This means that a transition from an !is_stopped() state must go - * through Stopping. - */ + /// Returns true if the given state represents a fully stopped Instance. + /// This means that a transition from an !is_stopped() state must go + /// through Stopping. pub fn is_stopped(&self) -> bool { match self { InstanceState::Starting => false, @@ -756,7 +686,7 @@ impl InstanceState { } } -/** The number of CPUs in an Instance */ +/// The number of CPUs in an Instance #[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct InstanceCpuCount(pub u16); @@ -774,9 +704,7 @@ impl From<&InstanceCpuCount> for i64 { } } -/** - * Client view of an [`InstanceRuntimeState`] - */ +/// Client view of an [`InstanceRuntimeState`] #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct InstanceRuntimeState { pub run_state: InstanceState, @@ -794,36 +722,30 @@ impl From } } -/** - * Client view of an [`Instance`] - */ +/// Client view of an [`Instance`] #[derive(ObjectIdentity, Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct Instance { - /* TODO is flattening here the intent in RFD 4? */ + // TODO is flattening here the intent in RFD 4? #[serde(flatten)] pub identity: IdentityMetadata, - /** id for the project containing this Instance */ + /// id for the project containing this Instance pub project_id: Uuid, - /** number of CPUs allocated for this Instance */ + /// number of CPUs allocated for this Instance pub ncpus: InstanceCpuCount, - /** memory allocated for this Instance */ + /// memory allocated for this Instance pub memory: ByteCount, - /** RFC1035-compliant hostname for the Instance. */ - pub hostname: String, /* TODO-cleanup different type? */ + /// RFC1035-compliant hostname for the Instance. + pub hostname: String, // TODO-cleanup different type? #[serde(flatten)] pub runtime: InstanceRuntimeState, } -/* - * DISKS - */ +// DISKS -/** - * Client view of an [`Disk`] - */ +/// Client view of an [`Disk`] #[derive(ObjectIdentity, Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct Disk { #[serde(flatten)] @@ -835,9 +757,7 @@ pub struct Disk { pub device_path: String, } -/** - * State of a Disk (primarily: attached or not) - */ +/// State of a Disk (primarily: attached or not) #[derive( Clone, Debug, @@ -851,19 +771,19 @@ pub struct Disk { )] #[serde(tag = "state", content = "instance", rename_all = "snake_case")] pub enum DiskState { - /** Disk is being initialized */ + /// Disk is being initialized Creating, - /** Disk is ready but detached from any Instance */ + /// Disk is ready but detached from any Instance Detached, - /** Disk is being attached to the given Instance */ - Attaching(Uuid), /* attached Instance id */ - /** Disk is attached to the given Instance */ - Attached(Uuid), /* attached Instance id */ - /** Disk is being detached from the given Instance */ - Detaching(Uuid), /* attached Instance id */ - /** Disk has been destroyed */ + /// Disk is being attached to the given Instance + Attaching(Uuid), // attached Instance id + /// Disk is attached to the given Instance + Attached(Uuid), // attached Instance id + /// Disk is being detached from the given Instance + Detaching(Uuid), // attached Instance id + /// Disk has been destroyed Destroyed, - /** Disk is unavailable */ + /// Disk is unavailable Faulted, } @@ -896,9 +816,7 @@ impl TryFrom<(&str, Option)> for DiskState { } impl DiskState { - /** - * Returns the string label for this disk state - */ + /// Returns the string label for this disk state pub fn label(&self) -> &'static str { match self { DiskState::Creating => "creating", @@ -911,18 +829,14 @@ impl DiskState { } } - /** - * Returns whether the Disk is currently attached to, being attached to, or - * being detached from any Instance. - */ + /// Returns whether the Disk is currently attached to, being attached to, or + /// being detached from any Instance. pub fn is_attached(&self) -> bool { self.attached_instance_id().is_some() } - /** - * If the Disk is attached to, being attached to, or being detached from an - * Instance, returns the id for that Instance. Otherwise returns `None`. - */ + /// If the Disk is attached to, being attached to, or being detached from an + /// Instance, returns the id for that Instance. Otherwise returns `None`. pub fn attached_instance_id(&self) -> Option<&Uuid> { match self { DiskState::Attaching(id) => Some(id), @@ -937,35 +851,31 @@ impl DiskState { } } -/* - * Sagas - * - * These are currently only intended for observability by developers. We will - * eventually want to flesh this out into something more observable for end - * users. - */ +// Sagas +// +// These are currently only intended for observability by developers. We will +// eventually want to flesh this out into something more observable for end +// users. #[derive(ObjectIdentity, Clone, Debug, Serialize, JsonSchema)] pub struct Saga { pub id: Uuid, pub state: SagaState, - /* - * TODO-cleanup This object contains a fake `IdentityMetadata`. Why? We - * want to paginate these objects. http_pagination.rs provides a bunch of - * useful facilities -- notably `PaginatedById`. `PaginatedById` - * requires being able to take an arbitrary object in the result set and get - * its id. To do that, it uses the `ObjectIdentity` trait, which expects - * to be able to return an `IdentityMetadata` reference from an object. - * Finally, the pagination facilities just pull the `id` out of that. - * - * In this case (as well as others, like sleds and racks), we have ids, and - * we want to be able to paginate by id, but we don't have full identity - * metadata. (Or we do, but it's similarly faked up.) What we should - * probably do is create a new trait, say `ObjectId`, that returns _just_ - * an id. We can provide a blanket impl for anything that impls - * IdentityMetadata. We can define one-off impls for structs like this - * one. Then the id-only pagination interfaces can require just - * `ObjectId`. - */ + // TODO-cleanup This object contains a fake `IdentityMetadata`. Why? We + // want to paginate these objects. http_pagination.rs provides a bunch of + // useful facilities -- notably `PaginatedById`. `PaginatedById` + // requires being able to take an arbitrary object in the result set and get + // its id. To do that, it uses the `ObjectIdentity` trait, which expects + // to be able to return an `IdentityMetadata` reference from an object. + // Finally, the pagination facilities just pull the `id` out of that. + // + // In this case (as well as others, like sleds and racks), we have ids, and + // we want to be able to paginate by id, but we don't have full identity + // metadata. (Or we do, but it's similarly faked up.) What we should + // probably do is create a new trait, say `ObjectId`, that returns _just_ + // an id. We can provide a blanket impl for anything that impls + // IdentityMetadata. We can define one-off impls for structs like this + // one. Then the id-only pagination interfaces can require just + // `ObjectId`. #[serde(skip)] pub identity: IdentityMetadata, } @@ -976,7 +886,7 @@ impl From for Saga { id: Uuid::from(s.id), state: SagaState::from(s.state), identity: IdentityMetadata { - /* TODO-cleanup See the note in Saga above. */ + // TODO-cleanup See the note in Saga above. id: Uuid::from(s.id), name: format!("saga-{}", s.id).parse().unwrap(), description: format!("saga {}", s.id), @@ -1413,9 +1323,7 @@ pub struct VpcFirewallRule { pub vpc_id: Uuid, } -/** - * Collection of a [`Vpc`]'s firewall rules - */ +/// Collection of a [`Vpc`]'s firewall rules #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct VpcFirewallRules { pub rules: Vec, @@ -1442,11 +1350,9 @@ pub struct VpcFirewallRuleUpdate { pub priority: VpcFirewallRulePriority, } -/** - * Updateable properties of a `Vpc`'s firewall - * Note that VpcFirewallRules are implicitly created along with a Vpc, - * so there is no explicit creation. - */ +/// Updateable properties of a `Vpc`'s firewall +/// Note that VpcFirewallRules are implicitly created along with a Vpc, +/// so there is no explicit creation. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct VpcFirewallRuleUpdateParams { pub rules: Vec, @@ -1540,7 +1446,7 @@ pub enum VpcFirewallRuleTarget { /// The rule applies to a specific IP subnet IpNet(IpNet), // Tags not yet implemented - //Tag(Name), + // Tag(Name), } /// The `VpcFirewallRuleHostFilter` is used to filter traffic on the basis of @@ -1811,9 +1717,7 @@ mod test { #[test] fn test_name_parse() { - /* - * Error cases - */ + // Error cases let long_name = "a234567890123456789012345678901234567890123456789012345678901234"; assert_eq!(long_name.len(), 64); @@ -1845,9 +1749,7 @@ mod test { assert_eq!(input.parse::().unwrap_err(), expected_message); } - /* - * Success cases - */ + // Success cases let valid_names: Vec<&str> = vec!["abc", "abc-123", "a123", &long_name[0..63]]; @@ -1953,7 +1855,7 @@ mod test { #[test] fn test_bytecount() { - /* Smallest supported value: all constructors */ + // Smallest supported value: all constructors let zero = ByteCount::from(0u32); assert_eq!(0, zero.to_bytes()); assert_eq!(0, zero.to_whole_kibibytes()); @@ -1965,7 +1867,7 @@ mod test { let zero = ByteCount::try_from(0u64).unwrap(); assert_eq!(0, zero.to_bytes()); - /* Largest supported value: both constructors that support it. */ + // Largest supported value: both constructors that support it. let max = ByteCount::try_from(i64::MAX).unwrap(); assert_eq!(i64::MAX, max.to_bytes() as i64); assert_eq!(i64::MAX, i64::from(max)); @@ -1979,22 +1881,20 @@ mod test { max.to_whole_tebibytes() ); - /* Value too large (only one constructor can hit this) */ + // Value too large (only one constructor can hit this) let bogus = ByteCount::try_from(maxu64 + 1).unwrap_err(); assert_eq!(bogus.to_string(), "value is too large for a byte count"); - /* Value too small (only one constructor can hit this) */ + // Value too small (only one constructor can hit this) let bogus = ByteCount::try_from(-1i64).unwrap_err(); assert_eq!(bogus.to_string(), "value is too small for a byte count"); - /* For good measure, let's check i64::MIN */ + // For good measure, let's check i64::MIN let bogus = ByteCount::try_from(i64::MIN).unwrap_err(); assert_eq!(bogus.to_string(), "value is too small for a byte count"); - /* - * We've now exhaustively tested both sides of all boundary conditions - * for all three constructors (to the extent that that's possible). - * Check non-trivial cases for the various accessor functions. This - * means picking values in the middle of the range. - */ + // We've now exhaustively tested both sides of all boundary conditions + // for all three constructors (to the extent that that's possible). + // Check non-trivial cases for the various accessor functions. This + // means picking values in the middle of the range. let three_terabytes = 3_000_000_000_000u64; let tb3 = ByteCount::try_from(three_terabytes).unwrap(); assert_eq!(three_terabytes, tb3.to_bytes()); diff --git a/common/src/api/internal/nexus.rs b/common/src/api/internal/nexus.rs index 1052bca088..efba6245a8 100644 --- a/common/src/api/internal/nexus.rs +++ b/common/src/api/internal/nexus.rs @@ -71,9 +71,7 @@ pub struct ProducerEndpoint { } impl ProducerEndpoint { - /** - * Return the route that can be used to request metric data. - */ + /// Return the route that can be used to request metric data. pub fn collection_route(&self) -> String { format!("{}/{}", &self.base_route, &self.id) } diff --git a/common/src/backoff.rs b/common/src/backoff.rs index 57cb1ddb69..128bf932d0 100644 --- a/common/src/backoff.rs +++ b/common/src/backoff.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Module providing utilities for retrying operations with exponential backoff. - */ +//! Module providing utilities for retrying operations with exponential backoff. use std::time::Duration; @@ -12,10 +10,8 @@ pub use ::backoff::future::{retry, retry_notify}; pub use ::backoff::Error as BackoffError; pub use ::backoff::{backoff::Backoff, ExponentialBackoff, Notify}; -/** - * Return a backoff policy appropriate for retrying internal services - * indefinitely. - */ +/// Return a backoff policy appropriate for retrying internal services +/// indefinitely. pub fn internal_service_policy() -> ::backoff::ExponentialBackoff { const INITIAL_INTERVAL: Duration = Duration::from_millis(250); const MAX_INTERVAL: Duration = Duration::from_secs(60 * 60); diff --git a/common/src/cmd.rs b/common/src/cmd.rs index 031739e14c..d92ebe4c98 100644 --- a/common/src/cmd.rs +++ b/common/src/cmd.rs @@ -2,25 +2,21 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Facilities used by the command-line tools - */ +//! Facilities used by the command-line tools use std::env::current_exe; use std::process::exit; -/** represents a fatal error in a command-line program */ +/// represents a fatal error in a command-line program #[derive(Debug)] pub enum CmdError { - /** incorrect command-line arguments */ + /// incorrect command-line arguments Usage(String), - /** all other errors */ + /// all other errors Failure(String), } -/** - * Exits the current process on a fatal error. - */ +/// Exits the current process on a fatal error. pub fn fatal(cmd_error: CmdError) -> ! { let arg0_result = current_exe().ok(); let arg0 = arg0_result diff --git a/common/src/config.rs b/common/src/config.rs index 07f0466beb..2509ae4fca 100644 --- a/common/src/config.rs +++ b/common/src/config.rs @@ -2,32 +2,26 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Common objects used for configuration - */ +//! Common objects used for configuration use std::fmt; use std::ops::Deref; use std::str::FromStr; -/** - * Describes a URL for connecting to a PostgreSQL server - */ -/* - * The config pattern that we're using requires that types in the config impl - * Serialize. If tokio_postgres::config::Config impl'd Serialize or even - * Display, we'd just use that directly instead of this type. But it doesn't. - * We could implement a serialize function ourselves, but URLs support many - * different properties, and this could be brittle and easy to get wrong. - * Instead, this type just wraps tokio_postgres::config::Config and keeps the - * original String around. (The downside is that a consumer _generating_ a - * nexus::db::Config needs to generate a URL that matches the - * tokio_postgres::config::Config that they construct here, but this is not - * currently an important use case.) - * - * To ensure that the URL and config are kept in sync, we currently only support - * constructing one of these via `FromStr` and the fields are not public. - */ +/// Describes a URL for connecting to a PostgreSQL server +// The config pattern that we're using requires that types in the config impl +// Serialize. If tokio_postgres::config::Config impl'd Serialize or even +// Display, we'd just use that directly instead of this type. But it doesn't. +// We could implement a serialize function ourselves, but URLs support many +// different properties, and this could be brittle and easy to get wrong. +// Instead, this type just wraps tokio_postgres::config::Config and keeps the +// original String around. (The downside is that a consumer _generating_ a +// nexus::db::Config needs to generate a URL that matches the +// tokio_postgres::config::Config that they construct here, but this is not +// currently an important use case.) +// +// To ensure that the URL and config are kept in sync, we currently only support +// constructing one of these via `FromStr` and the fields are not public. #[derive(Clone, Debug, PartialEq)] pub struct PostgresConfigWithUrl { url_raw: String, @@ -68,11 +62,9 @@ mod test { #[test] fn test_bad_url() { - /* - * There is surprisingly little that we can rely on the - * tokio_postgres::config::Config parser to include in the error - * message. - */ + // There is surprisingly little that we can rely on the + // tokio_postgres::config::Config parser to include in the error + // message. let error = "foo".parse::().unwrap_err(); assert!(error.to_string().contains("unexpected EOF")); "http://127.0.0.1:1234".parse::().unwrap_err(); diff --git a/common/src/lib.rs b/common/src/lib.rs index 0b11cd8690..7e8b09173e 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -2,26 +2,22 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * # Oxide Control Plane - * - * The overall architecture for the Oxide Control Plane is described in [RFD - * 61](https://61.rfd.oxide.computer/). This crate implements common facilities - * used in the control plane. Other top-level crates implement pieces of the - * control plane (e.g., `omicron_nexus`). - * - * The best documentation for the control plane is RFD 61 and the rustdoc in - * this crate. Since this crate doesn't provide externally-consumable - * interfaces, the rustdoc (generated with `--document-private-items`) is - * intended primarily for engineers working on this crate. - */ +//! # Oxide Control Plane +//! +//! The overall architecture for the Oxide Control Plane is described in [RFD +//! 61](https://61.rfd.oxide.computer/). This crate implements common facilities +//! used in the control plane. Other top-level crates implement pieces of the +//! control plane (e.g., `omicron_nexus`). +//! +//! The best documentation for the control plane is RFD 61 and the rustdoc in +//! this crate. Since this crate doesn't provide externally-consumable +//! interfaces, the rustdoc (generated with `--document-private-items`) is +//! intended primarily for engineers working on this crate. -/* - * We only use rustdoc for internal documentation, including private items, so - * it's expected that we'll have links to private items in the docs. - */ +// We only use rustdoc for internal documentation, including private items, so +// it's expected that we'll have links to private items in the docs. #![allow(rustdoc::private_intra_doc_links)] -/* TODO(#32): Remove this exception once resolved. */ +// TODO(#32): Remove this exception once resolved. #![allow(clippy::field_reassign_with_default)] pub mod api; diff --git a/gateway/src/bin/gateway.rs b/gateway/src/bin/gateway.rs index f17b40f4f8..cfecceea8c 100644 --- a/gateway/src/bin/gateway.rs +++ b/gateway/src/bin/gateway.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! //! Executable program to run gateway, the management gateway service -//! use omicron_common::cmd::{fatal, CmdError}; use omicron_gateway::{run_openapi, run_server, Config}; diff --git a/gateway/src/config.rs b/gateway/src/config.rs index 3c7f34b97b..c53ea16292 100644 --- a/gateway/src/config.rs +++ b/gateway/src/config.rs @@ -2,10 +2,8 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! //! Interfaces for parsing configuration files and working with a gateway server //! configuration -//! use dropshot::{ConfigDropshot, ConfigLogging}; use serde::{Deserialize, Serialize}; diff --git a/gateway/src/lib.rs b/gateway/src/lib.rs index b47aa34824..d805d25a52 100644 --- a/gateway/src/lib.rs +++ b/gateway/src/lib.rs @@ -71,15 +71,13 @@ impl Server { } // TODO does MGS register itself with oximeter? - /* - /// Register the Nexus server as a metric producer with `oximeter. - pub async fn register_as_producer(&self) { - self.apictx - .nexus - .register_as_producer(self.http_server_internal.local_addr()) - .await; - } - */ + // Register the Nexus server as a metric producer with `oximeter. + // pub async fn register_as_producer(&self) { + // self.apictx + // .nexus + // .register_as_producer(self.http_server_internal.local_addr()) + // .await; + // } } /// Run an instance of the [Server]. @@ -101,6 +99,6 @@ pub async fn run_server(config: &Config) -> Result<(), String> { } let rack_id = Uuid::new_v4(); let server = Server::start(config, &rack_id, &log).await?; - //server.register_as_producer().await; + // server.register_as_producer().await; server.wait_for_finish().await } diff --git a/gateway/tests/test_commands.rs b/gateway/tests/test_commands.rs index e439167d7c..e1107d5e7c 100644 --- a/gateway/tests/test_commands.rs +++ b/gateway/tests/test_commands.rs @@ -30,16 +30,12 @@ fn test_gateway_openapi_sled() { let spec: OpenAPI = serde_json::from_str(&stdout_text) .expect("stdout was not valid OpenAPI"); - /* - * Check for lint errors. - */ + // Check for lint errors. let errors = openapi_lint::validate(&spec); assert!(errors.is_empty(), "{}", errors.join("\n\n")); - /* - * Confirm that the output hasn't changed. It's expected that we'll change - * this file as the API evolves, but pay attention to the diffs to ensure - * that the changes match your expectations. - */ + // Confirm that the output hasn't changed. It's expected that we'll change + // this file as the API evolves, but pay attention to the diffs to ensure + // that the changes match your expectations. assert_contents("../openapi/gateway.json", &stdout_text); } diff --git a/nexus-client/src/lib.rs b/nexus-client/src/lib.rs index 35ec58288c..b4e25c5adc 100644 --- a/nexus-client/src/lib.rs +++ b/nexus-client/src/lib.rs @@ -2,10 +2,8 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Interface for making API requests to the Oxide control plane at large - * from within the control plane - */ +//! Interface for making API requests to the Oxide control plane at large +//! from within the control plane use omicron_common::generate_logging_api; diff --git a/nexus/src/authn/mod.rs b/nexus/src/authn/mod.rs index c6c9df3661..c5eb280a56 100644 --- a/nexus/src/authn/mod.rs +++ b/nexus/src/authn/mod.rs @@ -201,7 +201,6 @@ NewtypeDisplay! { () pub struct SchemeName(&'static str); } /// type. This will generally have a lot less information to avoid leaking /// information to attackers, but it's still useful to distinguish between /// 400 and 401/403, for example. -/// #[derive(Debug, thiserror::Error)] #[error("authentication failed (tried schemes: {schemes_tried:?})")] pub struct Error { diff --git a/nexus/src/authz/api_resources.rs b/nexus/src/authz/api_resources.rs index 6734fa115e..1d6aaf7eba 100644 --- a/nexus/src/authz/api_resources.rs +++ b/nexus/src/authz/api_resources.rs @@ -232,7 +232,7 @@ impl oso::PolarClass for FleetChild { oso::Class::builder() .add_method( "has_role", - /* Roles are not supported on FleetChilds today. */ + // Roles are not supported on FleetChilds today. |_: &FleetChild, _: AuthenticatedActor, _: String| false, ) .add_attribute_getter("fleet", |_: &FleetChild| FLEET) diff --git a/nexus/src/authz/context.rs b/nexus/src/authz/context.rs index 8a9d66af46..3de7b74bac 100644 --- a/nexus/src/authz/context.rs +++ b/nexus/src/authz/context.rs @@ -153,15 +153,13 @@ pub trait AuthorizedResource: oso::ToPolar + Send + Sync + 'static { #[cfg(test)] mod test { - /* - * These are essentially unit tests for the policy itself. - * TODO-coverage This is just a start. But we need roles to do a more - * comprehensive test. - * TODO If this gets any more complicated, we could consider automatically - * generating the test cases. We could precreate a bunch of resources and - * some users with different roles. Then we could run through a table that - * says exactly which users should be able to do what to each resource. - */ + // These are essentially unit tests for the policy itself. + // TODO-coverage This is just a start. But we need roles to do a more + // comprehensive test. + // TODO If this gets any more complicated, we could consider automatically + // generating the test cases. We could precreate a bunch of resources and + // some users with different roles. Then we could run through a table that + // says exactly which users should be able to do what to each resource. use crate::authn; use crate::authz::Action; use crate::authz::Authz; diff --git a/nexus/src/authz/mod.rs b/nexus/src/authz/mod.rs index d7a4d704bb..3f2774caa9 100644 --- a/nexus/src/authz/mod.rs +++ b/nexus/src/authz/mod.rs @@ -158,7 +158,6 @@ //! in [`roles`] about how this is set up.) If Oso finds a role granting this //! permission that's associated with this actor and resource, the action is //! allowed. Otherwise, it's not. -//! mod actor; diff --git a/nexus/src/bin/nexus.rs b/nexus/src/bin/nexus.rs index 063bf45d60..161d88f7b9 100644 --- a/nexus/src/bin/nexus.rs +++ b/nexus/src/bin/nexus.rs @@ -2,20 +2,16 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Executable program to run Nexus, the heart of the control plane - */ +//! Executable program to run Nexus, the heart of the control plane -/* - * TODO - * - TCP and HTTP KeepAlive parameters - * - Server hostname - * - Disable signals? - * - Analogs for actix client_timeout (request timeout), client_shutdown (client - * shutdown timeout), server backlog, number of workers, max connections per - * worker, max connect-in-progress sockets, shutdown_timeout (server shutdown - * timeout) - */ +// TODO +// - TCP and HTTP KeepAlive parameters +// - Server hostname +// - Disable signals? +// - Analogs for actix client_timeout (request timeout), client_shutdown (client +// shutdown timeout), server backlog, number of workers, max connections per +// worker, max connect-in-progress sockets, shutdown_timeout (server shutdown +// timeout) use omicron_common::cmd::fatal; use omicron_common::cmd::CmdError; diff --git a/nexus/src/config.rs b/nexus/src/config.rs index b775b4122d..6450128824 100644 --- a/nexus/src/config.rs +++ b/nexus/src/config.rs @@ -2,10 +2,8 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Interfaces for parsing configuration files and working with a nexus server - * configuration - */ +//! Interfaces for parsing configuration files and working with a nexus server +//! configuration use crate::db; use anyhow::anyhow; @@ -19,64 +17,58 @@ use std::fmt; use std::net::SocketAddr; use std::path::{Path, PathBuf}; -/* - * By design, we require that all config properties be specified (i.e., we don't - * use `serde(default)`). - */ +// By design, we require that all config properties be specified (i.e., we don't +// use `serde(default)`). #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] pub struct AuthnConfig { - /** allowed authentication schemes for external HTTP server */ + /// allowed authentication schemes for external HTTP server pub schemes_external: Vec, } #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] pub struct ConsoleConfig { pub static_dir: PathBuf, - /** how long the browser can cache static assets */ + /// how long the browser can cache static assets pub cache_control_max_age_minutes: u32, - /** how long a session can be idle before expiring */ + /// how long a session can be idle before expiring pub session_idle_timeout_minutes: u32, - /** how long a session can exist before expiring */ + /// how long a session can exist before expiring pub session_absolute_timeout_minutes: u32, } #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] pub struct UpdatesConfig { - /** Trusted root.json role for the TUF updates repository. */ + /// Trusted root.json role for the TUF updates repository. pub trusted_root: PathBuf, - /** Default base URL for the TUF repository. */ + /// Default base URL for the TUF repository. pub default_base_url: String, } -/** - * Configuration for the timeseries database. - */ +/// Configuration for the timeseries database. #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] pub struct TimeseriesDbConfig { pub address: SocketAddr, } -/** - * Configuration for a nexus server - */ +/// Configuration for a nexus server #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] pub struct Config { - /** Dropshot configuration for external API server */ + /// Dropshot configuration for external API server pub dropshot_external: ConfigDropshot, - /** Dropshot configuration for internal API server */ + /// Dropshot configuration for internal API server pub dropshot_internal: ConfigDropshot, - /** Identifier for this instance of Nexus */ + /// Identifier for this instance of Nexus pub id: uuid::Uuid, - /** Console-related tunables */ + /// Console-related tunables pub console: ConsoleConfig, - /** Server-wide logging configuration. */ + /// Server-wide logging configuration. pub log: ConfigLogging, - /** Database parameters */ + /// Database parameters pub database: db::Config, - /** Authentication-related configuration */ + /// Authentication-related configuration pub authn: AuthnConfig, - /** Timeseries database configuration. */ + /// Timeseries database configuration. pub timeseries_db: TimeseriesDbConfig, /// Updates-related configuration. Updates APIs return 400 Bad Request when this is /// unconfigured. @@ -167,12 +159,10 @@ impl std::fmt::Display for SchemeName { } impl Config { - /** - * Load a `Config` from the given TOML file - * - * This config object can then be used to create a new `Nexus`. - * The format is described in the README. - */ + /// Load a `Config` from the given TOML file + /// + /// This config object can then be used to create a new `Nexus`. + /// The format is described in the README. pub fn from_file>(path: P) -> Result { let path = path.as_ref(); let file_contents = std::fs::read_to_string(path) @@ -200,9 +190,7 @@ mod test { use std::path::Path; use std::path::PathBuf; - /** - * Generates a temporary filesystem path unique for the given label. - */ + /// Generates a temporary filesystem path unique for the given label. fn temp_path(label: &str) -> PathBuf { let arg0str = std::env::args().next().expect("expected process arg0"); let arg0 = Path::new(&arg0str) @@ -216,13 +204,11 @@ mod test { pathbuf } - /** - * Load a Config with the given string `contents`. To exercise - * the full path, this function writes the contents to a file first, then - * loads the config from that file, then removes the file. `label` is used - * as a unique string for the filename and error messages. It should be - * unique for each test. - */ + /// Load a Config with the given string `contents`. To exercise + /// the full path, this function writes the contents to a file first, then + /// loads the config from that file, then removes the file. `label` is used + /// as a unique string for the filename and error messages. It should be + /// unique for each test. fn read_config(label: &str, contents: &str) -> Result { let pathbuf = temp_path(label); let path = pathbuf.as_path(); @@ -235,9 +221,7 @@ mod test { result } - /* - * Totally bogus config files (nonexistent, bad TOML syntax) - */ + // Totally bogus config files (nonexistent, bad TOML syntax) #[test] fn test_config_nonexistent() { @@ -265,10 +249,8 @@ mod test { } } - /* - * Empty config (special case of a missing required field, but worth calling - * out explicitly) - */ + // Empty config (special case of a missing required field, but worth calling + // out explicitly) #[test] fn test_config_empty() { @@ -284,12 +266,10 @@ mod test { } } - /* - * Success case. We don't need to retest semantics for either ConfigLogging - * or ConfigDropshot because those are both tested within Dropshot. If we - * add new configuration sections of our own, we will want to test those - * here (both syntax and semantics). - */ + // Success case. We don't need to retest semantics for either ConfigLogging + // or ConfigDropshot because those are both tested within Dropshot. If we + // add new configuration sections of our own, we will want to test those + // here (both syntax and semantics). #[test] fn test_valid() { let config = read_config( diff --git a/nexus/src/context.rs b/nexus/src/context.rs index 25fc6201ec..f09e415648 100644 --- a/nexus/src/context.rs +++ b/nexus/src/context.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Shared state used by API request handlers - */ +//! Shared state used by API request handlers use super::authn; use super::authz; use super::config; @@ -34,46 +32,42 @@ use std::time::Instant; use std::time::SystemTime; use uuid::Uuid; -/** - * Shared state available to all API request handlers - */ +/// Shared state available to all API request handlers pub struct ServerContext { - /** reference to the underlying nexus */ + /// reference to the underlying nexus pub nexus: Arc, - /** debug log */ + /// debug log pub log: Logger, - /** authenticator for external HTTP requests */ + /// authenticator for external HTTP requests pub external_authn: authn::external::Authenticator>, - /** authentication context used for internal HTTP requests */ + /// authentication context used for internal HTTP requests pub internal_authn: Arc, - /** authorizer */ + /// authorizer pub authz: Arc, - /** internal API request latency tracker */ + /// internal API request latency tracker pub internal_latencies: LatencyTracker, - /** external API request latency tracker */ + /// external API request latency tracker pub external_latencies: LatencyTracker, - /** registry of metric producers */ + /// registry of metric producers pub producer_registry: ProducerRegistry, - /** tunable settings needed for the console at runtime */ + /// tunable settings needed for the console at runtime pub console_config: ConsoleConfig, } pub struct ConsoleConfig { - /** how long a session can be idle before expiring */ + /// how long a session can be idle before expiring pub session_idle_timeout: Duration, - /** how long a session can exist before expiring */ + /// how long a session can exist before expiring pub session_absolute_timeout: Duration, - /** how long browsers can cache static assets */ + /// how long browsers can cache static assets pub cache_control_max_age: Duration, - /** directory containing static file to serve */ + /// directory containing static file to serve pub static_dir: Option, } impl ServerContext { - /** - * Create a new context with the given rack id and log. This creates the - * underlying nexus as well. - */ + /// Create a new context with the given rack id and log. This creates the + /// underlying nexus as well. pub fn new( rack_id: Uuid, log: Logger, @@ -412,12 +406,10 @@ impl OpContext { where Resource: AuthorizedResource + Debug + Clone, { - /* - * TODO-cleanup In an ideal world, Oso would consume &Action and - * &Resource. Instead, it consumes owned types. As a result, they're - * not available to us (even for logging) after we make the authorize() - * call. We work around this by cloning. - */ + // TODO-cleanup In an ideal world, Oso would consume &Action and + // &Resource. Instead, it consumes owned types. As a result, they're + // not available to us (even for logging) after we make the authorize() + // call. We work around this by cloning. trace!(self.log, "authorize begin"; "actor" => ?self.authn.actor(), "action" => ?action, diff --git a/nexus/src/db/config.rs b/nexus/src/db/config.rs index 1d13fac40e..b4066ce3cb 100644 --- a/nexus/src/db/config.rs +++ b/nexus/src/db/config.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Nexus database configuration - */ +//! Nexus database configuration use omicron_common::config::PostgresConfigWithUrl; use serde::Deserialize; @@ -12,13 +10,11 @@ use serde::Serialize; use serde_with::serde_as; use serde_with::DisplayFromStr; -/** - * Nexus database configuration - */ +/// Nexus database configuration #[serde_as] #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] pub struct Config { - /** database url */ + /// database url #[serde_as(as = "DisplayFromStr")] pub url: PostgresConfigWithUrl, } diff --git a/nexus/src/db/datastore.rs b/nexus/src/db/datastore.rs index 3b1e8c4c52..e7d05d2b0a 100644 --- a/nexus/src/db/datastore.rs +++ b/nexus/src/db/datastore.rs @@ -2,25 +2,21 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Primary control plane interface for database read and write operations - */ - -/* - * TODO-scalability review all queries for use of indexes (may need - * "time_deleted IS NOT NULL" conditions) Figure out how to automate this. - * - * TODO-design Better support for joins? - * The interfaces here often require that to do anything with an object, a - * caller must first look up the id and then do operations with the id. For - * example, the caller of project_list_disks() always looks up the project to - * get the project_id, then lists disks having that project_id. It's possible - * to implement this instead with a JOIN in the database so that we do it with - * one database round-trip. We could use CTEs similar to what we do with - * conditional updates to distinguish the case where the project didn't exist - * vs. there were no disks in it. This seems likely to be a fair bit more - * complicated to do safely and generally compared to what we have now. - */ +//! Primary control plane interface for database read and write operations + +// TODO-scalability review all queries for use of indexes (may need +// "time_deleted IS NOT NULL" conditions) Figure out how to automate this. +// +// TODO-design Better support for joins? +// The interfaces here often require that to do anything with an object, a +// caller must first look up the id and then do operations with the id. For +// example, the caller of project_list_disks() always looks up the project to +// get the project_id, then lists disks having that project_id. It's possible +// to implement this instead with a JOIN in the database so that we do it with +// one database round-trip. We could use CTEs similar to what we do with +// conditional updates to distinguish the case where the project didn't exist +// vs. there were no disks in it. This seems likely to be a fair bit more +// complicated to do safely and generally compared to what we have now. use super::collection_insert::{ AsyncInsertError, DatastoreCollection, SyncInsertError, @@ -929,11 +925,9 @@ impl DataStore { } /// Delete a project - /* - * TODO-correctness This needs to check whether there are any resources that - * depend on the Project (Disks, Instances). We can do this with a - * generation counter that gets bumped when these resources are created. - */ + // TODO-correctness This needs to check whether there are any resources that + // depend on the Project (Disks, Instances). We can do this with a + // generation counter that gets bumped when these resources are created. pub async fn project_delete( &self, opctx: &OpContext, @@ -1023,9 +1017,7 @@ impl DataStore { }) } - /* - * Instances - */ + // Instances /// Fetches an Instance from the database and returns both the database row /// and an [`authz::Instance`] for doing authz checks @@ -1148,12 +1140,10 @@ impl DataStore { /// In addition to the usual database errors (e.g., no connections /// available), this function can fail if there is already a different /// instance (having a different id) with the same name in the same project. - /* - * TODO-design Given that this is really oriented towards the saga - * interface, one wonders if it's even worth having an abstraction here, or - * if sagas shouldn't directly work with the database here (i.e., just do - * what this function does under the hood). - */ + // TODO-design Given that this is really oriented towards the saga + // interface, one wonders if it's even worth having an abstraction here, or + // if sagas shouldn't directly work with the database here (i.e., just do + // what this function does under the hood). pub async fn project_create_instance( &self, instance: Instance, @@ -1238,15 +1228,13 @@ impl DataStore { }) } - /* - * TODO-design It's tempting to return the updated state of the Instance - * here because it's convenient for consumers and by using a RETURNING - * clause, we could ensure that the "update" and "fetch" are atomic. - * But in the unusual case that we _don't_ update the row because our - * update is older than the one in the database, we would have to fetch - * the current state explicitly. For now, we'll just require consumers - * to explicitly fetch the state if they want that. - */ + // TODO-design It's tempting to return the updated state of the Instance + // here because it's convenient for consumers and by using a RETURNING + // clause, we could ensure that the "update" and "fetch" are atomic. + // But in the unusual case that we _don't_ update the row because our + // update is older than the one in the database, we would have to fetch + // the current state explicitly. For now, we'll just require consumers + // to explicitly fetch the state if they want that. pub async fn instance_update_runtime( &self, instance_id: &Uuid, @@ -1291,15 +1279,13 @@ impl DataStore { ) -> DeleteResult { opctx.authorize(authz::Action::Delete, authz_instance).await?; - /* - * This is subject to change, but for now we're going to say that an - * instance must be "stopped" or "failed" in order to delete it. The - * delete operation sets "time_deleted" (just like with other objects) - * and also sets the state to "destroyed". By virtue of being - * "stopped", we assume there are no dependencies on this instance - * (e.g., disk attachments). If that changes, we'll want to check for - * such dependencies here. - */ + // This is subject to change, but for now we're going to say that an + // instance must be "stopped" or "failed" in order to delete it. The + // delete operation sets "time_deleted" (just like with other objects) + // and also sets the state to "destroyed". By virtue of being + // "stopped", we assume there are no dependencies on this instance + // (e.g., disk attachments). If that changes, we'll want to check for + // such dependencies here. use api::external::InstanceState as ApiInstanceState; use db::model::InstanceState as DbInstanceState; use db::schema::instance::dsl; @@ -1338,9 +1324,7 @@ impl DataStore { } } - /* - * Disks - */ + // Disks /// Fetches a Disk from the database and returns both the database row /// and an [`authz::Disk`] for doing authz checks @@ -1450,9 +1434,7 @@ impl DataStore { Ok((authz_disk, db_disk)) } - /** - * List disks associated with a given instance. - */ + /// List disks associated with a given instance. pub async fn instance_list_disks( &self, opctx: &OpContext, @@ -1693,9 +1675,7 @@ impl DataStore { } } - /* - * Network interfaces - */ + // Network interfaces pub async fn instance_create_network_interface( &self, interface: IncompleteNetworkInterface, diff --git a/nexus/src/db/fixed_data/mod.rs b/nexus/src/db/fixed_data/mod.rs index 0a81e1a800..871a2f2f42 100644 --- a/nexus/src/db/fixed_data/mod.rs +++ b/nexus/src/db/fixed_data/mod.rs @@ -4,7 +4,6 @@ //! Fixed (hardcoded) data that gets inserted into the database programmatically //! either when the rack is set up or when Nexus starts up. -// // Here's a proposed convention for choosing uuids that we hardcode into // Omicron. // diff --git a/nexus/src/db/mod.rs b/nexus/src/db/mod.rs index 3ccf2846d4..341c023c9c 100644 --- a/nexus/src/db/mod.rs +++ b/nexus/src/db/mod.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Facilities for working with the Omicron database - */ +//! Facilities for working with the Omicron database // This is not intended to be public, but this is necessary to use it from // doctests diff --git a/nexus/src/db/model.rs b/nexus/src/db/model.rs index 5b5352c7f4..ddacbd3063 100644 --- a/nexus/src/db/model.rs +++ b/nexus/src/db/model.rs @@ -486,9 +486,7 @@ where pub struct MacAddr(pub external::MacAddr); impl MacAddr { - /** - * Generate a unique MAC address for an interface - */ + /// Generate a unique MAC address for an interface pub fn new() -> Result { use rand::Fill; // Use the Oxide OUI A8 40 25 diff --git a/nexus/src/db/pool.rs b/nexus/src/db/pool.rs index eea50d6304..b5ce26d36f 100644 --- a/nexus/src/db/pool.rs +++ b/nexus/src/db/pool.rs @@ -2,31 +2,27 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Database connection pooling - */ -/* - * This whole thing is a placeholder for prototyping. - * - * TODO-robustness TODO-resilience We will want to carefully think about the - * connection pool that we use and its parameters. It's not clear from the - * survey so far whether an existing module is suitable for our purposes. See - * the Cueball Internals document for details on the sorts of behaviors we'd - * like here. Even if by luck we stick with bb8, we definitely want to think - * through the various parameters. - * - * Notes about bb8's behavior: - * * When the database is completely offline, and somebody wants a connection, - * it still waits for the connection timeout before giving up. That seems - * like not what we want. (To be clear, this is a failure mode where we know - * the database is offline, not one where it's partitioned and we can't tell.) - * * Although the `build_unchecked()` builder allows the pool to start up with - * no connections established (good), it also _seems_ to not establish any - * connections even when it could, resulting in a latency bubble for the first - * operation after startup. That's not what we're looking for. - * - * TODO-design Need TLS support (the types below hardcode NoTls). - */ +//! Database connection pooling +// This whole thing is a placeholder for prototyping. +// +// TODO-robustness TODO-resilience We will want to carefully think about the +// connection pool that we use and its parameters. It's not clear from the +// survey so far whether an existing module is suitable for our purposes. See +// the Cueball Internals document for details on the sorts of behaviors we'd +// like here. Even if by luck we stick with bb8, we definitely want to think +// through the various parameters. +// +// Notes about bb8's behavior: +// * When the database is completely offline, and somebody wants a connection, +// it still waits for the connection timeout before giving up. That seems +// like not what we want. (To be clear, this is a failure mode where we know +// the database is offline, not one where it's partitioned and we can't tell.) +// * Although the `build_unchecked()` builder allows the pool to start up with +// no connections established (good), it also _seems_ to not establish any +// connections even when it could, resulting in a latency bubble for the first +// operation after startup. That's not what we're looking for. +// +// TODO-design Need TLS support (the types below hardcode NoTls). use super::Config as DbConfig; use async_bb8_diesel::ConnectionManager; diff --git a/nexus/src/db/saga_recovery.rs b/nexus/src/db/saga_recovery.rs index 584a93f6cb..49ef590520 100644 --- a/nexus/src/db/saga_recovery.rs +++ b/nexus/src/db/saga_recovery.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Handles recovery of sagas - */ +//! Handles recovery of sagas use crate::context::OpContext; use crate::db; @@ -85,23 +83,21 @@ where let join_handle = tokio::spawn(async move { info!(&opctx.log, "start saga recovery"); - /* - * We perform the initial list of sagas using a standard retry policy. - * We treat all errors as transient because there's nothing we can do - * about any of them except try forever. As a result, we never expect - * an error from the overall operation. - * TODO-monitoring we definitely want a way to raise a big red flag if - * saga recovery is not completing. - * TODO-robustness It would be better to retry the individual database - * operations within this operation than retrying the overall operation. - * As this is written today, if the listing requires a bunch of pages - * and the operation fails partway through, we'll re-fetch all the pages - * we successfully fetched before. If the database is overloaded and - * only N% of requests are completing, the probability of this operation - * succeeding decreases considerably as the number of separate queries - * (pages) goes up. We'd be much more likely to finish the overall - * operation if we didn't throw away the results we did get each time. - */ + // We perform the initial list of sagas using a standard retry policy. + // We treat all errors as transient because there's nothing we can do + // about any of them except try forever. As a result, we never expect + // an error from the overall operation. + // TODO-monitoring we definitely want a way to raise a big red flag if + // saga recovery is not completing. + // TODO-robustness It would be better to retry the individual database + // operations within this operation than retrying the overall operation. + // As this is written today, if the listing requires a bunch of pages + // and the operation fails partway through, we'll re-fetch all the pages + // we successfully fetched before. If the database is overloaded and + // only N% of requests are completing, the probability of this operation + // succeeding decreases considerably as the number of separate queries + // (pages) goes up. We'd be much more likely to finish the overall + // operation if we didn't throw away the results we did get each time. let found_sagas = retry_notify( internal_service_policy(), || async { @@ -124,19 +120,17 @@ where info!(&opctx.log, "listed sagas ({} total)", found_sagas.len()); let recovery_futures = found_sagas.into_iter().map(|saga| async { - /* - * TODO-robustness We should put this into a retry loop. We may - * also want to take any failed sagas and put them at the end of the - * queue. It shouldn't really matter, in that the transient - * failures here are likely to affect recovery of all sagas. - * However, it's conceivable we misclassify a permanent failure as a - * transient failure, or that a transient failure is more likely to - * affect some sagas than others (e.g, data on a different node, or - * it has a larger log that requires more queries). To avoid one - * bad saga ruining the rest, we should try to recover the rest - * before we go back to one that's failed. - */ - /* TODO-debug want visibility into "abandoned" sagas */ + // TODO-robustness We should put this into a retry loop. We may + // also want to take any failed sagas and put them at the end of the + // queue. It shouldn't really matter, in that the transient + // failures here are likely to affect recovery of all sagas. + // However, it's conceivable we misclassify a permanent failure as a + // transient failure, or that a transient failure is more likely to + // affect some sagas than others (e.g, data on a different node, or + // it has a larger log that requires more queries). To avoid one + // bad saga ruining the rest, we should try to recover the rest + // before we go back to one that's failed. + // TODO-debug want visibility into "abandoned" sagas let saga_id: steno::SagaId = saga.id.into(); recover_saga( &opctx, @@ -187,20 +181,16 @@ fn new_page_params( } } -/** - * Queries the database to return a list of uncompleted sagas assigned to SEC - * `sec_id` - */ -/* -* For now, we do the simplest thing: we fetch all the sagas that the -* caller's going to need before returning any of them. This is easier to -* implement than, say, using a channel or some other stream. In principle -* we're giving up some opportunity for parallelism. The caller could be -* going off and fetching the saga log for the first sagas that we find -* while we're still listing later sagas. Doing that properly would require -* concurrency limits to prevent overload or starvation of other database -* consumers. -*/ +/// Queries the database to return a list of uncompleted sagas assigned to SEC +/// `sec_id` +// For now, we do the simplest thing: we fetch all the sagas that the +// caller's going to need before returning any of them. This is easier to +// implement than, say, using a channel or some other stream. In principle +// we're giving up some opportunity for parallelism. The caller could be +// going off and fetching the saga log for the first sagas that we find +// while we're still listing later sagas. Doing that properly would require +// concurrency limits to prevent overload or starvation of other database +// consumers. async fn list_unfinished_sagas( opctx: &OpContext, datastore: &db::DataStore, @@ -279,10 +269,8 @@ where ) .await .map_err(|error| { - /* - * TODO-robustness We want to differentiate between retryable and - * not here - */ + // TODO-robustness We want to differentiate between retryable and + // not here Error::internal_error(&format!( "failed to resume saga: {:#}", error @@ -300,9 +288,7 @@ where }) } -/** - * Queries the database to load the full log for the specified saga - */ +/// Queries the database to load the full log for the specified saga async fn load_saga_log( datastore: &db::DataStore, saga: &db::saga_types::Saga, diff --git a/nexus/src/db/saga_types.rs b/nexus/src/db/saga_types.rs index 6cb70e6f07..655440bc5c 100644 --- a/nexus/src/db/saga_types.rs +++ b/nexus/src/db/saga_types.rs @@ -2,17 +2,15 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Types used for sagas - * - * Just like elsewhere, we run into Rust's orphan rules here. There are types - * in Steno that we want to put into the database, but we can't impl - * `ToSql`/`FromSql` directly on them because they're in different crates. We - * could create wrapper types and impl `ToSql`/`FromSql` on those. Instead, we - * use the Steno types directly in our own types, and the handful of places that - * actually serialize them to and from SQL take care of the necessary - * conversions. - */ +//! Types used for sagas +//! +//! Just like elsewhere, we run into Rust's orphan rules here. There are types +//! in Steno that we want to put into the database, but we can't impl +//! `ToSql`/`FromSql` directly on them because they're in different crates. We +//! could create wrapper types and impl `ToSql`/`FromSql` on those. Instead, we +//! use the Steno types directly in our own types, and the handful of places that +//! actually serialize them to and from SQL take care of the necessary +//! conversions. use super::schema::{saga, saga_node_event}; use diesel::backend::{Backend, RawValue}; @@ -25,12 +23,10 @@ use std::convert::TryFrom; use std::sync::Arc; use uuid::Uuid; -/** - * Unique identifier for an SEC (saga execution coordinator) instance - * - * For us, these will generally be Nexus instances, and the SEC id will match - * the Nexus id. - */ +/// Unique identifier for an SEC (saga execution coordinator) instance +/// +/// For us, these will generally be Nexus instances, and the SEC id will match +/// the Nexus id. #[derive( AsExpression, FromSqlRow, Clone, Copy, Eq, Ord, PartialEq, PartialOrd, )] diff --git a/nexus/src/db/sec_store.rs b/nexus/src/db/sec_store.rs index b97c9a5bcb..04f9e1ae8a 100644 --- a/nexus/src/db/sec_store.rs +++ b/nexus/src/db/sec_store.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Implementation of [`steno::SecStore`] backed by Omicron's database - */ +//! Implementation of [`steno::SecStore`] backed by Omicron's database use crate::db::{self, model::Generation}; use anyhow::Context; @@ -14,10 +12,8 @@ use std::fmt; use std::sync::Arc; use steno::SagaId; -/** - * Implementation of [`steno::SecStore`] backed by the Omicron CockroachDB - * database. - */ +/// Implementation of [`steno::SecStore`] backed by the Omicron CockroachDB +/// database. pub struct CockroachDbSecStore { sec_id: db::SecId, datastore: Arc, @@ -64,28 +60,22 @@ impl steno::SecStore for CockroachDbSecStore { ); let our_event = db::saga_types::SagaNodeEvent::new(event, self.sec_id); - /* - * TODO-robustness This should be wrapped with a retry loop rather than - * unwrapping the result. - */ + // TODO-robustness This should be wrapped with a retry loop rather than + // unwrapping the result. self.datastore.saga_create_event(&our_event).await.unwrap(); } async fn saga_update(&self, id: SagaId, update: steno::SagaCachedState) { - /* - * TODO-robustness We should track the current generation of the saga - * and use it. We'll know this either from when it was created or when - * it was recovered. - */ + // TODO-robustness We should track the current generation of the saga + // and use it. We'll know this either from when it was created or when + // it was recovered. info!(&self.log, "updating state"; "saga_id" => id.to_string(), "new_state" => update.to_string() ); - /* - * TODO-robustness This should be wrapped with a retry loop rather than - * unwrapping the result. - */ + // TODO-robustness This should be wrapped with a retry loop rather than + // unwrapping the result. self.datastore .saga_update_state(id, update, self.sec_id, Generation::new()) .await diff --git a/nexus/src/external_api/console_api.rs b/nexus/src/external_api/console_api.rs index 2c9b09fd8e..c27f2a0dcc 100644 --- a/nexus/src/external_api/console_api.rs +++ b/nexus/src/external_api/console_api.rs @@ -2,13 +2,11 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Handler functions (entrypoints) for console-related routes. - * - * This was originally conceived as a separate dropshot server from the external API, - * but in order to avoid CORS issues for now, we are serving these routes directly - * from the external API. - */ +//! Handler functions (entrypoints) for console-related routes. +//! +//! This was originally conceived as a separate dropshot server from the external API, +//! but in order to avoid CORS issues for now, we are serving these routes directly +//! from the external API. use super::views; use crate::authn::external::{ cookies::Cookies, @@ -181,7 +179,6 @@ fn get_login_url(state: Option) -> String { } /// Redirect to IdP login URL -// // Currently hard-coded to redirect to our own fake login form. #[endpoint { method = GET, diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index e9fbe47e44..70894ff6ea 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Handler functions (entrypoints) for external HTTP APIs - */ +//! Handler functions (entrypoints) for external HTTP APIs use crate::db; use crate::db::model::Name; @@ -70,9 +68,7 @@ use uuid::Uuid; type NexusApiDescription = ApiDescription>; -/** - * Returns a description of the external nexus API - */ +/// Returns a description of the external nexus API pub fn external_api() -> NexusApiDescription { fn register_endpoints(api: &mut NexusApiDescription) -> Result<(), String> { api.register(organizations_get)?; @@ -182,47 +178,43 @@ pub fn external_api() -> NexusApiDescription { api } -/* - * API ENDPOINT FUNCTION NAMING CONVENTIONS - * - * Generally, HTTP resources are grouped within some collection. For a - * relatively simple example: - * - * GET /organizations (list the organizations in the collection) - * POST /organizations (create a organization in the collection) - * GET /organizations/{org_name} (look up a organization in the collection) - * DELETE /organizations/{org_name} (delete a organization in the collection) - * PUT /organizations/{org_name} (update a organization in the collection) - * - * There's a naming convention for the functions that implement these API entry - * points. When operating on the collection itself, we use: - * - * {collection_path}_{verb} - * - * For examples: - * - * GET /organizations -> organizations_get() - * POST /organizations -> organizations_post() - * - * For operations on items within the collection, we use: - * - * {collection_path}_{verb}_{object} - * - * For examples: - * - * DELETE /organizations/{org_name} -> organizations_delete_organization() - * GET /organizations/{org_name} -> organizations_get_organization() - * PUT /organizations/{org_name} -> organizations_put_organization() - * - * Note that these function names end up in generated OpenAPI spec as the - * operationId for each endpoint, and therefore represent a contract with - * clients. Client generators use operationId to name API methods, so changing - * a function name is a breaking change from a client perspective. - */ - -/** - * List all organizations. - */ +// API ENDPOINT FUNCTION NAMING CONVENTIONS +// +// Generally, HTTP resources are grouped within some collection. For a +// relatively simple example: +// +// GET /organizations (list the organizations in the collection) +// POST /organizations (create a organization in the collection) +// GET /organizations/{org_name} (look up a organization in the collection) +// DELETE /organizations/{org_name} (delete a organization in the collection) +// PUT /organizations/{org_name} (update a organization in the collection) +// +// There's a naming convention for the functions that implement these API entry +// points. When operating on the collection itself, we use: +// +// {collection_path}_{verb} +// +// For examples: +// +// GET /organizations -> organizations_get() +// POST /organizations -> organizations_post() +// +// For operations on items within the collection, we use: +// +// {collection_path}_{verb}_{object} +// +// For examples: +// +// DELETE /organizations/{org_name} -> organizations_delete_organization() +// GET /organizations/{org_name} -> organizations_get_organization() +// PUT /organizations/{org_name} -> organizations_put_organization() +// +// Note that these function names end up in generated OpenAPI spec as the +// operationId for each endpoint, and therefore represent a contract with +// clients. Client generators use operationId to name API methods, so changing +// a function name is a breaking change from a client perspective. + +/// List all organizations. #[endpoint { method = GET, path = "/organizations", @@ -261,9 +253,7 @@ async fn organizations_get( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Create a new organization. - */ +/// Create a new organization. #[endpoint { method = POST, path = "/organizations", @@ -285,18 +275,14 @@ async fn organizations_post( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Path parameters for Organization requests - */ +/// Path parameters for Organization requests #[derive(Deserialize, JsonSchema)] struct OrganizationPathParam { /// The organization's unique name. organization_name: Name, } -/** - * Fetch a specific organization - */ +/// Fetch a specific organization #[endpoint { method = GET, path = "/organizations/{organization_name}", @@ -319,9 +305,7 @@ async fn organizations_get_organization( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Delete a specific organization. - */ +/// Delete a specific organization. #[endpoint { method = DELETE, path = "/organizations/{organization_name}", @@ -343,16 +327,12 @@ async fn organizations_delete_organization( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Update a specific organization. - */ -/* - * TODO-correctness: Is it valid for PUT to accept application/json that's a - * subset of what the resource actually represents? If not, is that a problem? - * (HTTP may require that this be idempotent.) If so, can we get around that - * having this be a slightly different content-type (e.g., - * "application/json-patch")? We should see what other APIs do. - */ +/// Update a specific organization. +// TODO-correctness: Is it valid for PUT to accept application/json that's a +// subset of what the resource actually represents? If not, is that a problem? +// (HTTP may require that this be idempotent.) If so, can we get around that +// having this be a slightly different content-type (e.g., +// "application/json-patch")? We should see what other APIs do. #[endpoint { method = PUT, path = "/organizations/{organization_name}", @@ -381,9 +361,7 @@ async fn organizations_put_organization( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * List all projects. - */ +/// List all projects. #[endpoint { method = GET, path = "/organizations/{organization_name}/projects", @@ -437,9 +415,7 @@ async fn organization_projects_get( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Create a new project. - */ +/// Create a new project. #[endpoint { method = POST, path = "/organizations/{organization_name}/projects", @@ -468,9 +444,7 @@ async fn organization_projects_post( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Path parameters for Project requests - */ +/// Path parameters for Project requests #[derive(Deserialize, JsonSchema)] struct ProjectPathParam { /// The organization's unique name. @@ -479,9 +453,7 @@ struct ProjectPathParam { project_name: Name, } -/** - * Fetch a specific project - */ +/// Fetch a specific project #[endpoint { method = GET, path = "/organizations/{organization_name}/projects/{project_name}", @@ -506,9 +478,7 @@ async fn organization_projects_get_project( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Delete a specific project. - */ +/// Delete a specific project. #[endpoint { method = DELETE, path = "/organizations/{organization_name}/projects/{project_name}", @@ -531,16 +501,12 @@ async fn organization_projects_delete_project( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Update a specific project. - */ -/* - * TODO-correctness: Is it valid for PUT to accept application/json that's a - * subset of what the resource actually represents? If not, is that a problem? - * (HTTP may require that this be idempotent.) If so, can we get around that - * having this be a slightly different content-type (e.g., - * "application/json-patch")? We should see what other APIs do. - */ +/// Update a specific project. +// TODO-correctness: Is it valid for PUT to accept application/json that's a +// subset of what the resource actually represents? If not, is that a problem? +// (HTTP may require that this be idempotent.) If so, can we get around that +// having this be a slightly different content-type (e.g., +// "application/json-patch")? We should see what other APIs do. #[endpoint { method = PUT, path = "/organizations/{organization_name}/projects/{project_name}", @@ -571,13 +537,9 @@ async fn organization_projects_put_project( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/* - * Disks - */ +// Disks -/** - * List disks in a project. - */ +/// List disks in a project. #[endpoint { method = GET, path = "/organizations/{organization_name}/projects/{project_name}/disks", @@ -613,12 +575,8 @@ async fn project_disks_get( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Create a disk in a project. - */ -/* - * TODO-correctness See note about instance create. This should be async. - */ +/// Create a disk in a project. +// TODO-correctness See note about instance create. This should be async. #[endpoint { method = POST, path = "/organizations/{organization_name}/projects/{project_name}/disks", @@ -650,9 +608,7 @@ async fn project_disks_post( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Path parameters for Disk requests - */ +/// Path parameters for Disk requests #[derive(Deserialize, JsonSchema)] struct DiskPathParam { organization_name: Name, @@ -660,9 +616,7 @@ struct DiskPathParam { disk_name: Name, } -/** - * Fetch a single disk in a project. - */ +/// Fetch a single disk in a project. #[endpoint { method = GET, path = "/organizations/{organization_name}/projects/{project_name}/disks/{disk_name}", @@ -688,9 +642,7 @@ async fn project_disks_get_disk( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Delete a disk from a project. - */ +/// Delete a disk from a project. #[endpoint { method = DELETE, path = "/organizations/{organization_name}/projects/{project_name}/disks/{disk_name}", @@ -721,13 +673,9 @@ async fn project_disks_delete_disk( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/* - * Instances - */ +// Instances -/** - * List instances in a project. - */ +/// List instances in a project. #[endpoint { method = GET, path = "/organizations/{organization_name}/projects/{project_name}/instances", @@ -763,18 +711,14 @@ async fn project_instances_get( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Create an instance in a project. - */ -/* - * TODO-correctness This is supposed to be async. Is that right? We can create - * the instance immediately -- it's just not booted yet. Maybe the boot - * operation is what's a separate operation_id. What about the response code - * (201 Created vs 202 Accepted)? Is that orthogonal? Things can return a - * useful response, including an operation id, with either response code. Maybe - * a "reboot" operation would return a 202 Accepted because there's no actual - * resource created? - */ +/// Create an instance in a project. +// TODO-correctness This is supposed to be async. Is that right? We can create +// the instance immediately -- it's just not booted yet. Maybe the boot +// operation is what's a separate operation_id. What about the response code +// (201 Created vs 202 Accepted)? Is that orthogonal? Things can return a +// useful response, including an operation id, with either response code. Maybe +// a "reboot" operation would return a 202 Accepted because there's no actual +// resource created? #[endpoint { method = POST, path = "/organizations/{organization_name}/projects/{project_name}/instances", @@ -806,9 +750,7 @@ async fn project_instances_post( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Path parameters for Instance requests - */ +/// Path parameters for Instance requests #[derive(Deserialize, JsonSchema)] struct InstancePathParam { organization_name: Name, @@ -816,9 +758,7 @@ struct InstancePathParam { instance_name: Name, } -/** - * Get an instance in a project. - */ +/// Get an instance in a project. #[endpoint { method = GET, path = "/organizations/{organization_name}/projects/{project_name}/instances/{instance_name}", @@ -849,9 +789,7 @@ async fn project_instances_get_instance( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Delete an instance from a project. - */ +/// Delete an instance from a project. #[endpoint { method = DELETE, path = "/organizations/{organization_name}/projects/{project_name}/instances/{instance_name}", @@ -882,9 +820,7 @@ async fn project_instances_delete_instance( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Migrate an instance to a different propolis-server, possibly on a different sled. - */ +/// Migrate an instance to a different propolis-server, possibly on a different sled. #[endpoint { method = POST, path = "/organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/migrate", @@ -918,9 +854,7 @@ async fn project_instances_migrate_instance( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Reboot an instance. - */ +/// Reboot an instance. #[endpoint { method = POST, path = "/organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/reboot", @@ -951,9 +885,7 @@ async fn project_instances_instance_reboot( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Boot an instance. - */ +/// Boot an instance. #[endpoint { method = POST, path = "/organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/start", @@ -984,15 +916,13 @@ async fn project_instances_instance_start( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Halt an instance. - */ +/// Halt an instance. #[endpoint { method = POST, path = "/organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/stop", tags = ["instances"], }] -/* Our naming convention kind of falls apart here. */ +// Our naming convention kind of falls apart here. async fn project_instances_instance_stop( rqctx: Arc>>, path_params: Path, @@ -1018,10 +948,8 @@ async fn project_instances_instance_stop( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * List disks attached to this instance. - */ -/* TODO-scalability needs to be paginated */ +/// List disks attached to this instance. +// TODO-scalability needs to be paginated #[endpoint { method = GET, path = "/organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/disks", @@ -1268,9 +1196,7 @@ async fn instance_network_interfaces_get_interface( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/* - * Snapshots - */ +// Snapshots /// List snapshots in a project. #[endpoint { @@ -1410,13 +1336,9 @@ async fn project_snapshots_delete_snapshot( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/* - * VPCs - */ +// VPCs -/** - * List VPCs in a project. - */ +/// List VPCs in a project. #[endpoint { method = GET, path = "/organizations/{organization_name}/projects/{project_name}/vpcs", @@ -1453,9 +1375,7 @@ async fn project_vpcs_get( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Path parameters for VPC requests - */ +/// Path parameters for VPC requests #[derive(Deserialize, JsonSchema)] struct VpcPathParam { organization_name: Name, @@ -1463,9 +1383,7 @@ struct VpcPathParam { vpc_name: Name, } -/** - * Get a VPC in a project. - */ +/// Get a VPC in a project. #[endpoint { method = GET, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}", @@ -1491,9 +1409,7 @@ async fn project_vpcs_get_vpc( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Create a VPC in a project. - */ +/// Create a VPC in a project. #[endpoint { method = POST, path = "/organizations/{organization_name}/projects/{project_name}/vpcs", @@ -1525,9 +1441,7 @@ async fn project_vpcs_post( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Update a VPC. - */ +/// Update a VPC. #[endpoint { method = PUT, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}", @@ -1557,9 +1471,7 @@ async fn project_vpcs_put_vpc( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Delete a vpc from a project. - */ +/// Delete a vpc from a project. #[endpoint { method = DELETE, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}", @@ -1590,9 +1502,7 @@ async fn project_vpcs_delete_vpc( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * List subnets in a VPC. - */ +/// List subnets in a VPC. #[endpoint { method = GET, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/subnets", @@ -1627,9 +1537,7 @@ async fn vpc_subnets_get( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Path parameters for VPC Subnet requests - */ +/// Path parameters for VPC Subnet requests #[derive(Deserialize, JsonSchema)] struct VpcSubnetPathParam { organization_name: Name, @@ -1638,9 +1546,7 @@ struct VpcSubnetPathParam { subnet_name: Name, } -/** - * Get subnet in a VPC. - */ +/// Get subnet in a VPC. #[endpoint { method = GET, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/subnets/{subnet_name}", @@ -1669,9 +1575,7 @@ async fn vpc_subnets_get_subnet( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Create a subnet in a VPC. - */ +/// Create a subnet in a VPC. #[endpoint { method = POST, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/subnets", @@ -1701,9 +1605,7 @@ async fn vpc_subnets_post( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Delete a subnet from a VPC. - */ +/// Delete a subnet from a VPC. #[endpoint { method = DELETE, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/subnets/{subnet_name}", @@ -1732,9 +1634,7 @@ async fn vpc_subnets_delete_subnet( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Update a VPC Subnet. - */ +/// Update a VPC Subnet. #[endpoint { method = PUT, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/subnets/{subnet_name}", @@ -1801,13 +1701,9 @@ async fn subnet_network_interfaces_get( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/* - * VPC Firewalls - */ +// VPC Firewalls -/** - * List firewall rules for a VPC. - */ +/// List firewall rules for a VPC. #[endpoint { method = GET, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/firewall/rules", @@ -1838,9 +1734,7 @@ async fn vpc_firewall_rules_get( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Replace the firewall rules for a VPC - */ +/// Replace the firewall rules for a VPC #[endpoint { method = PUT, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/firewall/rules", @@ -1872,13 +1766,9 @@ async fn vpc_firewall_rules_put( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/* - * VPC Routers - */ +// VPC Routers -/** - * List VPC Custom and System Routers - */ +/// List VPC Custom and System Routers #[endpoint { method = GET, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers", @@ -1913,9 +1803,7 @@ async fn vpc_routers_get( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Path parameters for VPC Router requests - */ +/// Path parameters for VPC Router requests #[derive(Deserialize, JsonSchema)] struct VpcRouterPathParam { organization_name: Name, @@ -1924,9 +1812,7 @@ struct VpcRouterPathParam { router_name: Name, } -/** - * Get a VPC Router - */ +/// Get a VPC Router #[endpoint { method = GET, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers/{router_name}", @@ -1955,9 +1841,7 @@ async fn vpc_routers_get_router( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Create a VPC Router - */ +/// Create a VPC Router #[endpoint { method = POST, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers", @@ -1988,9 +1872,7 @@ async fn vpc_routers_post( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Delete a router from its VPC - */ +/// Delete a router from its VPC #[endpoint { method = DELETE, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers/{router_name}", @@ -2019,9 +1901,7 @@ async fn vpc_routers_delete_router( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Update a VPC Router - */ +/// Update a VPC Router #[endpoint { method = PUT, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers/{router_name}", @@ -2052,13 +1932,9 @@ async fn vpc_routers_put_router( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/* - * Vpc Router Routes - */ +// Vpc Router Routes -/** - * List a Router's routes - */ +/// List a Router's routes #[endpoint { method = GET, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers/{router_name}/routes", @@ -2092,9 +1968,7 @@ async fn routers_routes_get( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Path parameters for Router Route requests - */ +/// Path parameters for Router Route requests #[derive(Deserialize, JsonSchema)] struct RouterRoutePathParam { organization_name: Name, @@ -2104,9 +1978,7 @@ struct RouterRoutePathParam { route_name: Name, } -/** - * Get a VPC Router route - */ +/// Get a VPC Router route #[endpoint { method = GET, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers/{router_name}/routes/{route_name}", @@ -2134,9 +2006,7 @@ async fn routers_routes_get_route( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Create a VPC Router - */ +/// Create a VPC Router #[endpoint { method = POST, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers/{router_name}/routes", @@ -2166,9 +2036,7 @@ async fn routers_routes_post( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Delete a route from its router - */ +/// Delete a route from its router #[endpoint { method = DELETE, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers/{router_name}/routes/{route_name}", @@ -2196,9 +2064,7 @@ async fn routers_routes_delete_route( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Update a Router route - */ +/// Update a Router route #[endpoint { method = PUT, path = "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers/{router_name}/routes/{route_name}", @@ -2228,13 +2094,9 @@ async fn routers_routes_put_route( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/* - * Racks - */ +// Racks -/** - * List racks in the system. - */ +/// List racks in the system. #[endpoint { method = GET, path = "/hardware/racks", @@ -2256,18 +2118,14 @@ async fn hardware_racks_get( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Path parameters for Rack requests - */ +/// Path parameters for Rack requests #[derive(Deserialize, JsonSchema)] struct RackPathParam { - /** The rack's unique ID. */ + /// The rack's unique ID. rack_id: Uuid, } -/** - * Fetch information about a particular rack. - */ +/// Fetch information about a particular rack. #[endpoint { method = GET, path = "/hardware/racks/{rack_id}", @@ -2287,13 +2145,9 @@ async fn hardware_racks_get_rack( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/* - * Sleds - */ +// Sleds -/** - * List sleds in the system. - */ +/// List sleds in the system. #[endpoint { method = GET, path = "/hardware/sleds", @@ -2318,18 +2172,14 @@ async fn hardware_sleds_get( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Path parameters for Sled requests - */ +/// Path parameters for Sled requests #[derive(Deserialize, JsonSchema)] struct SledPathParam { - /** The sled's unique ID. */ + /// The sled's unique ID. sled_id: Uuid, } -/** - * Fetch information about a sled in the system. - */ +/// Fetch information about a sled in the system. #[endpoint { method = GET, path = "/hardware/sleds/{sled_id}", @@ -2349,13 +2199,9 @@ async fn hardware_sleds_get_sled( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/* - * Updates - */ +// Updates -/** - * Refresh update metadata - */ +/// Refresh update metadata #[endpoint { method = POST, path = "/updates/refresh", @@ -2373,13 +2219,9 @@ async fn updates_refresh( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/* - * Sagas - */ +// Sagas -/** - * List all sagas (for debugging) - */ +/// List all sagas (for debugging) #[endpoint { method = GET, path = "/sagas", @@ -2401,17 +2243,13 @@ async fn sagas_get( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Path parameters for Saga requests - */ +/// Path parameters for Saga requests #[derive(Deserialize, JsonSchema)] struct SagaPathParam { saga_id: Uuid, } -/** - * Fetch information about a single saga (for debugging) - */ +/// Fetch information about a single saga (for debugging) #[endpoint { method = GET, path = "/sagas/{saga_id}", @@ -2431,13 +2269,9 @@ async fn sagas_get_saga( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/* - * Built-in (system) users - */ +// Built-in (system) users -/** - * List the built-in system users - */ +/// List the built-in system users #[endpoint { method = GET, path = "/users", @@ -2465,18 +2299,14 @@ async fn users_get( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Path parameters for global (system) user requests - */ +/// Path parameters for global (system) user requests #[derive(Deserialize, JsonSchema)] struct UserPathParam { /// The built-in user's unique name. user_name: Name, } -/** - * Fetch a specific built-in system user - */ +/// Fetch a specific built-in system user #[endpoint { method = GET, path = "/users/{user_name}", @@ -2498,9 +2328,7 @@ async fn users_get_user( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * List all timeseries schema - */ +/// List all timeseries schema #[endpoint { method = GET, path = "/timeseries/schema", @@ -2521,22 +2349,16 @@ async fn timeseries_schema_get( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/* - * Built-in roles - */ +// Built-in roles -/* - * Roles have their own pagination scheme because they do not use the usual "id" - * or "name" types. For more, see the comment in dbinit.sql. - */ +// Roles have their own pagination scheme because they do not use the usual "id" +// or "name" types. For more, see the comment in dbinit.sql. #[derive(Deserialize, JsonSchema, Serialize)] struct RolePage { last_seen: String, } -/** - * List the built-in roles - */ +/// List the built-in roles #[endpoint { method = GET, path = "/roles", @@ -2583,18 +2405,14 @@ async fn roles_get( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Path parameters for global (system) role requests - */ +/// Path parameters for global (system) role requests #[derive(Deserialize, JsonSchema)] struct RolePathParam { /// The built-in role's unique name. role_name: String, } -/** - * Fetch a specific built-in role - */ +/// Fetch a specific built-in role #[endpoint { method = GET, path = "/roles/{role_name}", diff --git a/nexus/src/external_api/params.rs b/nexus/src/external_api/params.rs index 3029623a80..84e5336f77 100644 --- a/nexus/src/external_api/params.rs +++ b/nexus/src/external_api/params.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Params define the request bodies of API endpoints for creating or updating resources. - */ +//! Params define the request bodies of API endpoints for creating or updating resources. use omicron_common::api::external::{ ByteCount, IdentityMetadataCreateParams, IdentityMetadataUpdateParams, @@ -15,58 +13,42 @@ use serde::{Deserialize, Serialize}; use std::net::IpAddr; use uuid::Uuid; -/* - * ORGANIZATIONS - */ +// ORGANIZATIONS -/** - * Create-time parameters for an [`Organization`](crate::external_api::views::Organization) - */ +/// Create-time parameters for an [`Organization`](crate::external_api::views::Organization) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct OrganizationCreate { #[serde(flatten)] pub identity: IdentityMetadataCreateParams, } -/** - * Updateable properties of an [`Organization`](crate::external_api::views::Organization) - */ +/// Updateable properties of an [`Organization`](crate::external_api::views::Organization) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct OrganizationUpdate { #[serde(flatten)] pub identity: IdentityMetadataUpdateParams, } -/* - * PROJECTS - */ +// PROJECTS -/** - * Create-time parameters for a [`Project`](crate::external_api::views::Project) - */ +/// Create-time parameters for a [`Project`](crate::external_api::views::Project) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct ProjectCreate { #[serde(flatten)] pub identity: IdentityMetadataCreateParams, } -/** - * Updateable properties of a [`Project`](crate::external_api::views::Project) - */ +/// Updateable properties of a [`Project`](crate::external_api::views::Project) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct ProjectUpdate { #[serde(flatten)] pub identity: IdentityMetadataUpdateParams, } -/* - * NETWORK INTERFACES - */ +// NETWORK INTERFACES -/** - * Create-time parameters for a - * [`NetworkInterface`](omicron_common::api::external::NetworkInterface) - */ +/// Create-time parameters for a +/// [`NetworkInterface`](omicron_common::api::external::NetworkInterface) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct NetworkInterfaceCreate { #[serde(flatten)] @@ -79,9 +61,7 @@ pub struct NetworkInterfaceCreate { pub ip: Option, } -/* - * INSTANCES - */ +// INSTANCES /// Describes an attachment of a `NetworkInterface` to an `Instance`, at the /// time the instance is created. @@ -125,37 +105,29 @@ impl Default for InstanceNetworkInterfaceAttachment { } } -/** - * Create-time parameters for an [`Instance`](omicron_common::api::external::Instance) - */ +/// Create-time parameters for an [`Instance`](omicron_common::api::external::Instance) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct InstanceCreate { #[serde(flatten)] pub identity: IdentityMetadataCreateParams, pub ncpus: InstanceCpuCount, pub memory: ByteCount, - pub hostname: String, /* TODO-cleanup different type? */ + pub hostname: String, // TODO-cleanup different type? /// The network interfaces to be created for this instance. #[serde(default)] pub network_interfaces: InstanceNetworkInterfaceAttachment, } -/** - * Migration parameters for an [`Instance`](omicron_common::api::external::Instance) - */ +/// Migration parameters for an [`Instance`](omicron_common::api::external::Instance) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct InstanceMigrate { pub dst_sled_uuid: Uuid, } -/* - * VPCS - */ +// VPCS -/** - * Create-time parameters for a [`Vpc`](crate::external_api::views::Vpc) - */ +/// Create-time parameters for a [`Vpc`](crate::external_api::views::Vpc) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct VpcCreate { #[serde(flatten)] @@ -171,9 +143,7 @@ pub struct VpcCreate { pub dns_name: Name, } -/** - * Updateable properties of a [`Vpc`](crate::external_api::views::Vpc) - */ +/// Updateable properties of a [`Vpc`](crate::external_api::views::Vpc) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct VpcUpdate { #[serde(flatten)] @@ -181,9 +151,7 @@ pub struct VpcUpdate { pub dns_name: Option, } -/** - * Create-time parameters for a [`VpcSubnet`](crate::external_api::views::VpcSubnet) - */ +/// Create-time parameters for a [`VpcSubnet`](crate::external_api::views::VpcSubnet) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct VpcSubnetCreate { #[serde(flatten)] @@ -204,9 +172,7 @@ pub struct VpcSubnetCreate { pub ipv6_block: Option, } -/** - * Updateable properties of a [`VpcSubnet`](crate::external_api::views::VpcSubnet) - */ +/// Updateable properties of a [`VpcSubnet`](crate::external_api::views::VpcSubnet) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct VpcSubnetUpdate { #[serde(flatten)] @@ -218,9 +184,7 @@ pub struct VpcSubnetUpdate { pub ipv6_block: Option, } -/* - * VPC ROUTERS - */ +// VPC ROUTERS /// Create-time parameters for a [`VpcRouter`](omicron_common::api::external::VpcRouter) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] @@ -236,21 +200,17 @@ pub struct VpcRouterUpdate { pub identity: IdentityMetadataUpdateParams, } -/* - * DISKS - */ +// DISKS -/** - * Create-time parameters for a [`Disk`](omicron_common::api::external::Disk) - */ +/// Create-time parameters for a [`Disk`](omicron_common::api::external::Disk) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct DiskCreate { - /** common identifying metadata */ + /// common identifying metadata #[serde(flatten)] pub identity: IdentityMetadataCreateParams, - /** id for snapshot from which the Disk should be created, if any */ - pub snapshot_id: Option, /* TODO should be a name? */ - /** size of the Disk */ + /// id for snapshot from which the Disk should be created, if any + pub snapshot_id: Option, // TODO should be a name? + /// size of the Disk pub size: ByteCount, } @@ -274,10 +234,8 @@ impl DiskCreate { } } -/** - * Parameters for the [`Disk`](omicron_common::api::external::Disk) to be - * attached or detached to an instance - */ +/// Parameters for the [`Disk`](omicron_common::api::external::Disk) to be +/// attached or detached to an instance #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct DiskIdentifier { pub disk: Name, @@ -291,9 +249,7 @@ pub struct NetworkInterfaceIdentifier { pub interface_name: Name, } -/* - * SNAPSHOTS - */ +// SNAPSHOTS /// Create-time parameters for a [`Snapshot`](omicron_common::api::external::Snapshot) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] @@ -306,17 +262,13 @@ pub struct SnapshotCreate { pub disk: Name, } -/* - * BUILT-IN USERS - * - * These cannot be created via the external API, but we use the same interfaces - * for creating them internally as we use for types that can be created in the - * external API. - */ - -/** - * Create-time parameters for a [`UserBuiltin`](crate::db::model::UserBuiltin) - */ +// BUILT-IN USERS +// +// These cannot be created via the external API, but we use the same interfaces +// for creating them internally as we use for types that can be created in the +// external API. + +/// Create-time parameters for a [`UserBuiltin`](crate::db::model::UserBuiltin) #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct UserBuiltinCreate { #[serde(flatten)] diff --git a/nexus/src/external_api/views.rs b/nexus/src/external_api/views.rs index 7d4b6b1b92..19a05af68d 100644 --- a/nexus/src/external_api/views.rs +++ b/nexus/src/external_api/views.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Views are response bodies, most of which are public lenses onto DB models. - */ +//! Views are response bodies, most of which are public lenses onto DB models. use crate::authn; use crate::db::identity::{Asset, Resource}; @@ -19,13 +17,9 @@ use serde::{Deserialize, Serialize}; use std::net::SocketAddr; use uuid::Uuid; -/* - * ORGANIZATIONS - */ +// ORGANIZATIONS -/** - * Client view of an [`Organization`] - */ +/// Client view of an [`Organization`] #[derive(ObjectIdentity, Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct Organization { #[serde(flatten)] @@ -38,19 +32,13 @@ impl Into for model::Organization { } } -/* - * PROJECTS - */ +// PROJECTS -/** - * Client view of a [`Project`] - */ +/// Client view of a [`Project`] #[derive(ObjectIdentity, Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct Project { - /* - * TODO-correctness is flattening here (and in all the other types) the - * intent in RFD 4? - */ + // TODO-correctness is flattening here (and in all the other types) the + // intent in RFD 4? #[serde(flatten)] pub identity: IdentityMetadata, pub organization_id: Uuid, @@ -65,9 +53,7 @@ impl Into for model::Project { } } -/* - * SNAPSHOTS - */ +// SNAPSHOTS /// Client view of a Snapshot #[derive(ObjectIdentity, Clone, Debug, Deserialize, Serialize, JsonSchema)] @@ -80,19 +66,15 @@ pub struct Snapshot { pub size: ByteCount, } -/* - * VPCs - */ +// VPCs -/** - * Client view of a [`Vpc`] - */ +/// Client view of a [`Vpc`] #[derive(ObjectIdentity, Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct Vpc { #[serde(flatten)] pub identity: IdentityMetadata, - /** id for the project containing this VPC */ + /// id for the project containing this VPC pub project_id: Uuid, /// id for the system router where subnet default routes are registered @@ -102,7 +84,7 @@ pub struct Vpc { pub ipv6_prefix: Ipv6Net, // TODO-design should this be optional? - /** The name used for the VPC in DNS. */ + /// The name used for the VPC in DNS. pub dns_name: Name, } @@ -122,17 +104,17 @@ impl Into for model::Vpc { /// them, within a IPv4 subnetwork or optionall an IPv6 subnetwork. #[derive(ObjectIdentity, Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct VpcSubnet { - /** common identifying metadata */ + /// common identifying metadata #[serde(flatten)] pub identity: IdentityMetadata, - /** The VPC to which the subnet belongs. */ + /// The VPC to which the subnet belongs. pub vpc_id: Uuid, - /** The IPv4 subnet CIDR block. */ + /// The IPv4 subnet CIDR block. pub ipv4_block: Ipv4Net, - /** The IPv6 subnet CIDR block. */ + /// The IPv6 subnet CIDR block. pub ipv6_block: Ipv6Net, } @@ -147,13 +129,9 @@ impl Into for model::VpcSubnet { } } -/* - * RACKS - */ +// RACKS -/** - * Client view of an [`Rack`] - */ +/// Client view of an [`Rack`] #[derive(ObjectIdentity, Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct Rack { #[serde(flatten)] @@ -166,13 +144,9 @@ impl Into for model::Rack { } } -/* - * SLEDS - */ +// SLEDS -/** - * Client view of an [`Sled`] - */ +/// Client view of an [`Sled`] #[derive(ObjectIdentity, Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct Sled { #[serde(flatten)] @@ -186,19 +160,13 @@ impl Into for model::Sled { } } -/* - * BUILT-IN USERS - */ +// BUILT-IN USERS -/** - * Client view of a [`User`] - */ +/// Client view of a [`User`] #[derive(ObjectIdentity, Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct User { - /* - * TODO-correctness is flattening here (and in all the other types) the - * intent in RFD 4? - */ + // TODO-correctness is flattening here (and in all the other types) the + // intent in RFD 4? #[serde(flatten)] pub identity: IdentityMetadata, } @@ -209,9 +177,7 @@ impl Into for model::UserBuiltin { } } -/** - * Client view of currently authed user. - */ +/// Client view of currently authed user. // TODO: this may end up merged with User once more details about the user are // stored in the auth context. Right now there is only the ID. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq)] @@ -225,13 +191,9 @@ impl Into for authn::Actor { } } -/* - * ROLES - */ +// ROLES -/** - * Client view of a [`Role`] - */ +/// Client view of a [`Role`] #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize, JsonSchema)] pub struct Role { pub name: RoleName, diff --git a/nexus/src/internal_api/http_entrypoints.rs b/nexus/src/internal_api/http_entrypoints.rs index 04478bdfeb..d3e3ec4f9c 100644 --- a/nexus/src/internal_api/http_entrypoints.rs +++ b/nexus/src/internal_api/http_entrypoints.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/** - * Handler functions (entrypoints) for HTTP APIs internal to the control plane - */ +/// Handler functions (entrypoints) for HTTP APIs internal to the control plane use crate::context::OpContext; use crate::ServerContext; @@ -35,9 +33,7 @@ use uuid::Uuid; type NexusApiDescription = ApiDescription>; -/** - * Returns a description of the internal nexus API - */ +/// Returns a description of the internal nexus API pub fn internal_api() -> NexusApiDescription { fn register_endpoints(api: &mut NexusApiDescription) -> Result<(), String> { api.register(cpapi_sled_agents_post)?; @@ -59,9 +55,7 @@ pub fn internal_api() -> NexusApiDescription { api } -/** - * Path parameters for Sled Agent requests (internal API) - */ +/// Path parameters for Sled Agent requests (internal API) #[derive(Deserialize, JsonSchema)] struct SledAgentPathParam { sled_id: Uuid, @@ -93,18 +87,14 @@ async fn cpapi_sled_agents_post( apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Path parameters for Sled Agent requests (internal API) - */ +/// Path parameters for Sled Agent requests (internal API) #[derive(Deserialize, JsonSchema)] struct ZpoolPathParam { sled_id: Uuid, zpool_id: Uuid, } -/** - * Report that a pool for a specified sled has come online. - */ +/// Report that a pool for a specified sled has come online. #[endpoint { method = PUT, path = "/sled_agents/{sled_id}/zpools/{zpool_id}", @@ -128,9 +118,7 @@ struct DatasetPathParam { dataset_id: Uuid, } -/** - * Report that a dataset within a pool has come online. - */ +/// Report that a dataset within a pool has come online. #[endpoint { method = PUT, path = "/zpools/{zpool_id}/dataset/{dataset_id}", @@ -155,17 +143,13 @@ async fn dataset_put( Ok(HttpResponseOk(DatasetPutResponse { reservation: None, quota: None })) } -/** - * Path parameters for Instance requests (internal API) - */ +/// Path parameters for Instance requests (internal API) #[derive(Deserialize, JsonSchema)] struct InstancePathParam { instance_id: Uuid, } -/** - * Report updated state for an instance. - */ +/// Report updated state for an instance. #[endpoint { method = PUT, path = "/instances/{instance_id}", @@ -186,17 +170,13 @@ async fn cpapi_instances_put( apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Path parameters for Disk requests (internal API) - */ +/// Path parameters for Disk requests (internal API) #[derive(Deserialize, JsonSchema)] struct DiskPathParam { disk_id: Uuid, } -/** - * Report updated state for a disk. - */ +/// Report updated state for a disk. #[endpoint { method = PUT, path = "/disks/{disk_id}", @@ -218,9 +198,7 @@ async fn cpapi_disks_put( apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/** - * Accept a registration from a new metric producer - */ +/// Accept a registration from a new metric producer #[endpoint { method = POST, path = "/metrics/producers", @@ -242,9 +220,7 @@ async fn cpapi_producers_post( .await } -/** - * Accept a notification of a new oximeter collection server. - */ +/// Accept a notification of a new oximeter collection server. #[endpoint { method = POST, path = "/metrics/collectors", @@ -266,9 +242,7 @@ async fn cpapi_collectors_post( .await } -/** - * Endpoint for oximeter to collect nexus server metrics. - */ +/// Endpoint for oximeter to collect nexus server metrics. #[endpoint { method = GET, path = "/metrics/collect/{producer_id}", diff --git a/nexus/src/internal_api/params.rs b/nexus/src/internal_api/params.rs index 1e4f420d67..96f078b9c6 100644 --- a/nexus/src/internal_api/params.rs +++ b/nexus/src/internal_api/params.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Params define the request bodies of API endpoints for creating or updating resources. - */ +//! Params define the request bodies of API endpoints for creating or updating resources. use omicron_common::api::external::ByteCount; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index d8f5df6ceb..bf24258464 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -2,18 +2,14 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Library interface to the Nexus, the heart of the control plane - */ - -/* - * We only use rustdoc for internal documentation, including private items, so - * it's expected that we'll have links to private items in the docs. - */ +//! Library interface to the Nexus, the heart of the control plane + +// We only use rustdoc for internal documentation, including private items, so +// it's expected that we'll have links to private items in the docs. #![allow(rustdoc::private_intra_doc_links)] -/* TODO(#40): Remove this exception once resolved. */ +// TODO(#40): Remove this exception once resolved. #![allow(clippy::unnecessary_wraps)] -/* Clippy's style lints are useful, but not worth running automatically. */ +// Clippy's style lints are useful, but not worth running automatically. #![allow(clippy::style)] pub mod authn; // Public only for testing @@ -48,10 +44,8 @@ extern crate newtype_derive; #[macro_use] extern crate diesel; -/** - * Run the OpenAPI generator for the external API, which emits the OpenAPI spec - * to stdout. - */ +/// Run the OpenAPI generator for the external API, which emits the OpenAPI spec +/// to stdout. pub fn run_openapi_external() -> Result<(), String> { external_api() .openapi("Oxide Region API", "0.0.1") @@ -72,23 +66,19 @@ pub fn run_openapi_internal() -> Result<(), String> { .map_err(|e| e.to_string()) } -/** - * Packages up a [`Nexus`], running both external and internal HTTP API servers - * wired up to Nexus - */ +/// Packages up a [`Nexus`], running both external and internal HTTP API servers +/// wired up to Nexus pub struct Server { - /** shared state used by API request handlers */ + /// shared state used by API request handlers pub apictx: Arc, - /** dropshot server for external API */ + /// dropshot server for external API pub http_server_external: dropshot::HttpServer>, - /** dropshot server for internal API */ + /// dropshot server for internal API pub http_server_internal: dropshot::HttpServer>, } impl Server { - /** - * Start a nexus server. - */ + /// Start a nexus server. pub async fn start( config: &Config, rack_id: Uuid, @@ -123,13 +113,11 @@ impl Server { Ok(Server { apictx, http_server_external, http_server_internal }) } - /** - * Wait for the given server to shut down - * - * Note that this doesn't initiate a graceful shutdown, so if you call this - * immediately after calling `start()`, the program will block indefinitely - * or until something else initiates a graceful shutdown. - */ + /// Wait for the given server to shut down + /// + /// Note that this doesn't initiate a graceful shutdown, so if you call this + /// immediately after calling `start()`, the program will block indefinitely + /// or until something else initiates a graceful shutdown. pub async fn wait_for_finish(self) -> Result<(), String> { let errors = vec![ self.http_server_external @@ -152,9 +140,7 @@ impl Server { } } - /** - * Register the Nexus server as a metric producer with `oximeter. - */ + /// Register the Nexus server as a metric producer with `oximeter. pub async fn register_as_producer(&self) { self.apictx .nexus @@ -163,9 +149,7 @@ impl Server { } } -/** - * Run an instance of the [Server]. - */ +/// Run an instance of the [Server]. pub async fn run_server(config: &Config) -> Result<(), String> { use slog::Drain; let (drain, registration) = slog_dtrace::with_drain( diff --git a/nexus/src/nexus.rs b/nexus/src/nexus.rs index 3fb8fe4dbf..43eea90f41 100644 --- a/nexus/src/nexus.rs +++ b/nexus/src/nexus.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Nexus, the service that operates much of the control plane in an Oxide fleet - */ +//! Nexus, the service that operates much of the control plane in an Oxide fleet use crate::authn; use crate::authz; @@ -85,24 +83,18 @@ use uuid::Uuid; // TODO: When referring to API types, we should try to include // the prefix unless it is unambiguous. -/** - * Exposes additional [`Nexus`] interfaces for use by the test suite - */ +/// Exposes additional [`Nexus`] interfaces for use by the test suite #[async_trait] pub trait TestInterfaces { - /** - * Returns the SledAgentClient for an Instance from its id. We may also - * want to split this up into instance_lookup_by_id() and instance_sled(), - * but after all it's a test suite special to begin with. - */ + /// Returns the SledAgentClient for an Instance from its id. We may also + /// want to split this up into instance_lookup_by_id() and instance_sled(), + /// but after all it's a test suite special to begin with. async fn instance_sled_by_id( &self, id: &Uuid, ) -> Result, Error>; - /** - * Returns the SledAgentClient for a Disk from its id. - */ + /// Returns the SledAgentClient for a Disk from its id. async fn disk_sled_by_id( &self, id: &Uuid, @@ -116,57 +108,51 @@ pub trait TestInterfaces { pub static BASE_ARTIFACT_DIR: &str = "/var/tmp/oxide_artifacts"; -/** - * Manages an Oxide fleet -- the heart of the control plane - */ +/// Manages an Oxide fleet -- the heart of the control plane pub struct Nexus { - /** uuid for this nexus instance. */ + /// uuid for this nexus instance. id: Uuid, - /** uuid for this rack (TODO should also be in persistent storage) */ + /// uuid for this rack (TODO should also be in persistent storage) rack_id: Uuid, - /** general server log */ + /// general server log log: Logger, - /** cached rack identity metadata */ + /// cached rack identity metadata api_rack_identity: db::model::RackIdentity, - /** persistent storage for resources in the control plane */ + /// persistent storage for resources in the control plane db_datastore: Arc, - /** handle to global authz information */ + /// handle to global authz information authz: Arc, - /** saga execution coordinator */ + /// saga execution coordinator sec_client: Arc, - /** Task representing completion of recovered Sagas */ + /// Task representing completion of recovered Sagas recovery_task: std::sync::Mutex>, - /** Status of background task to populate database */ + /// Status of background task to populate database populate_status: tokio::sync::watch::Receiver, - /** Client to the timeseries database. */ + /// Client to the timeseries database. timeseries_client: oximeter_db::Client, - /** Contents of the trusted root role for the TUF repository. */ + /// Contents of the trusted root role for the TUF repository. updates_config: Option, } -/* - * TODO Is it possible to make some of these operations more generic? A - * particularly good example is probably list() (or even lookup()), where - * with the right type parameters, generic code can be written to work on all - * types. - * TODO update and delete need to accommodate both with-etag and don't-care - * TODO audit logging ought to be part of this structure and its functions - */ +// TODO Is it possible to make some of these operations more generic? A +// particularly good example is probably list() (or even lookup()), where +// with the right type parameters, generic code can be written to work on all +// types. +// TODO update and delete need to accommodate both with-etag and don't-care +// TODO audit logging ought to be part of this structure and its functions impl Nexus { - /** - * Create a new Nexus instance for the given rack id `rack_id` - */ - /* TODO-polish revisit rack metadata */ + /// Create a new Nexus instance for the given rack id `rack_id` + // TODO-polish revisit rack metadata pub fn new_with_id( rack_id: Uuid, log: Logger, @@ -192,12 +178,10 @@ impl Nexus { let timeseries_client = oximeter_db::Client::new(config.timeseries_db.address, &log); - /* - * TODO-cleanup We may want a first-class subsystem for managing startup - * background tasks. It could use a Future for each one, a status enum - * for each one, status communication via channels, and a single task to - * run them all. - */ + // TODO-cleanup We may want a first-class subsystem for managing startup + // background tasks. It could use a Future for each one, a status enum + // for each one, status communication via channels, and a single task to + // run them all. let populate_ctx = OpContext::for_background( log.new(o!("component" => "DataLoader")), Arc::clone(&authz), @@ -221,7 +205,7 @@ impl Nexus { updates_config: config.updates.clone(), }; - /* TODO-cleanup all the extra Arcs here seems wrong */ + // TODO-cleanup all the extra Arcs here seems wrong let nexus = Arc::new(nexus); let opctx = OpContext::for_background( log.new(o!("component" => "SagaRecoverer")), @@ -264,10 +248,8 @@ impl Nexus { } } - /* - * TODO-robustness we should have a limit on how many sled agents there can - * be (for graceful degradation at large scale). - */ + // TODO-robustness we should have a limit on how many sled agents there can + // be (for graceful degradation at large scale). pub async fn upsert_sled( &self, id: Uuid, @@ -306,9 +288,7 @@ impl Nexus { Ok(()) } - /** - * Insert a new record of an Oximeter collector server. - */ + /// Insert a new record of an Oximeter collector server. pub async fn upsert_oximeter_collector( &self, oximeter_info: &OximeterInfo, @@ -421,9 +401,7 @@ impl Nexus { client } - /** - * List all registered Oximeter collector instances. - */ + /// List all registered Oximeter collector instances. pub async fn oximeter_list( &self, page_params: &DataPageParams<'_, Uuid>, @@ -435,9 +413,7 @@ impl Nexus { &self.db_datastore } - /** - * Given a saga template and parameters, create a new saga and execute it. - */ + /// Given a saga template and parameters, create a new saga and execute it. async fn execute_saga( self: &Arc, saga_template: Arc>, @@ -449,10 +425,8 @@ impl Nexus { ExecContextType = Arc, SagaParamsType = Arc

, >, - /* - * TODO-cleanup The bound `P: Serialize` should not be necessary because - * SagaParamsType must already impl Serialize. - */ + // TODO-cleanup The bound `P: Serialize` should not be necessary because + // SagaParamsType must already impl Serialize. P: serde::Serialize, { let saga_id = SagaId(Uuid::new_v4()); @@ -475,11 +449,9 @@ impl Nexus { .await .context("creating saga") .map_err(|error| { - /* - * TODO-error This could be a service unavailable error, - * depending on the failure mode. We need more information from - * Steno. - */ + // TODO-error This could be a service unavailable error, + // depending on the failure mode. We need more information from + // Steno. Error::internal_error(&format!("{:#}", error)) })?; @@ -492,15 +464,13 @@ impl Nexus { let result = future.await; result.kind.map_err(|saga_error| { saga_error.error_source.convert::().unwrap_or_else(|e| { - /* TODO-error more context would be useful */ + // TODO-error more context would be useful Error::InternalError { internal_message: e.to_string() } }) }) } - /* - * Organizations - */ + // Organizations pub async fn organization_create( &self, @@ -554,9 +524,7 @@ impl Nexus { .await } - /* - * Projects - */ + // Projects pub async fn project_create( &self, @@ -678,9 +646,7 @@ impl Nexus { .await } - /* - * Disks - */ + // Disks pub async fn project_list_disks( &self, @@ -715,10 +681,8 @@ impl Nexus { // (if possibly redundant) to check this here. opctx.authorize(authz::Action::CreateChild, &authz_project).await?; - /* - * Until we implement snapshots, do not allow disks to be created with a - * snapshot id. - */ + // Until we implement snapshots, do not allow disks to be created with a + // snapshot id. if params.snapshot_id.is_some() { return Err(Error::InvalidValue { label: String::from("snapshot_id"), @@ -836,14 +800,10 @@ impl Nexus { unimplemented!(); } - /* - * Instances - */ + // Instances - /* - * TODO-design This interface should not exist. See - * SagaContext::alloc_server(). - */ + // TODO-design This interface should not exist. See + // SagaContext::alloc_server(). pub async fn sled_allocate(&self) -> Result { // TODO: replace this with a real allocation policy. // @@ -908,66 +868,62 @@ impl Nexus { saga_params, ) .await?; - /* TODO-error more context would be useful */ + // TODO-error more context would be useful let instance_id = saga_outputs.lookup_output::("instance_id").map_err(|e| { Error::InternalError { internal_message: e.to_string() } })?; - /* - * TODO-correctness TODO-robustness TODO-design It's not quite correct - * to take this instance id and look it up again. It's possible that - * it's been modified or even deleted since the saga executed. In that - * case, we might return a different state of the Instance than the one - * that the user created or even fail with a 404! Both of those are - * wrong behavior -- we should be returning the very instance that the - * user created. - * - * How can we fix this? Right now we have internal representations like - * Instance and analaogous end-user-facing representations like - * Instance. The former is not even serializable. The saga - * _could_ emit the View version, but that's not great for two (related) - * reasons: (1) other sagas might want to provision instances and get - * back the internal representation to do other things with the - * newly-created instance, and (2) even within a saga, it would be - * useful to pass a single Instance representation along the saga, - * but they probably would want the internal representation, not the - * view. - * - * The saga could emit an Instance directly. Today, Instance - * etc. aren't supposed to even be serializable -- we wanted to be able - * to have other datastore state there if needed. We could have a third - * InstanceInternalView...but that's starting to feel pedantic. We - * could just make Instance serializable, store that, and call it a - * day. Does it matter that we might have many copies of the same - * objects in memory? - * - * If we make these serializable, it would be nice if we could leverage - * the type system to ensure that we never accidentally send them out a - * dropshot endpoint. (On the other hand, maybe we _do_ want to do - * that, for internal interfaces! Can we do this on a - * per-dropshot-server-basis?) - * - * TODO Even worse, post-authz, we do two lookups here instead of one. - * Maybe sagas should be able to emit `authz::Instance`-type objects. - */ + // TODO-correctness TODO-robustness TODO-design It's not quite correct + // to take this instance id and look it up again. It's possible that + // it's been modified or even deleted since the saga executed. In that + // case, we might return a different state of the Instance than the one + // that the user created or even fail with a 404! Both of those are + // wrong behavior -- we should be returning the very instance that the + // user created. + // + // How can we fix this? Right now we have internal representations like + // Instance and analaogous end-user-facing representations like + // Instance. The former is not even serializable. The saga + // _could_ emit the View version, but that's not great for two (related) + // reasons: (1) other sagas might want to provision instances and get + // back the internal representation to do other things with the + // newly-created instance, and (2) even within a saga, it would be + // useful to pass a single Instance representation along the saga, + // but they probably would want the internal representation, not the + // view. + // + // The saga could emit an Instance directly. Today, Instance + // etc. aren't supposed to even be serializable -- we wanted to be able + // to have other datastore state there if needed. We could have a third + // InstanceInternalView...but that's starting to feel pedantic. We + // could just make Instance serializable, store that, and call it a + // day. Does it matter that we might have many copies of the same + // objects in memory? + // + // If we make these serializable, it would be nice if we could leverage + // the type system to ensure that we never accidentally send them out a + // dropshot endpoint. (On the other hand, maybe we _do_ want to do + // that, for internal interfaces! Can we do this on a + // per-dropshot-server-basis?) + // + // TODO Even worse, post-authz, we do two lookups here instead of one. + // Maybe sagas should be able to emit `authz::Instance`-type objects. let authz_instance = self.db_datastore.instance_lookup_by_id(instance_id).await?; self.db_datastore.instance_refetch(opctx, &authz_instance).await } - /* - * TODO-correctness It's not totally clear what the semantics and behavior - * should be here. It might be nice to say that you can only do this - * operation if the Instance is already stopped, in which case we can - * execute this immediately by just removing it from the database, with the - * same race we have with disk delete (i.e., if someone else is requesting - * an instance boot, we may wind up in an inconsistent state). On the other - * hand, we could always allow this operation, issue the request to the SA - * to destroy the instance (not just stop it), and proceed with deletion - * when that finishes. But in that case, although the HTTP DELETE request - * completed, the object will still appear for a little while, which kind of - * sucks. - */ + // TODO-correctness It's not totally clear what the semantics and behavior + // should be here. It might be nice to say that you can only do this + // operation if the Instance is already stopped, in which case we can + // execute this immediately by just removing it from the database, with the + // same race we have with disk delete (i.e., if someone else is requesting + // an instance boot, we may wind up in an inconsistent state). On the other + // hand, we could always allow this operation, issue the request to the SA + // to destroy the instance (not just stop it), and proceed with deletion + // when that finishes. But in that case, although the HTTP DELETE request + // completed, the object will still appear for a little while, which kind of + // sucks. pub async fn project_destroy_instance( &self, opctx: &OpContext, @@ -975,11 +931,9 @@ impl Nexus { project_name: &Name, instance_name: &Name, ) -> DeleteResult { - /* - * TODO-robustness We need to figure out what to do with Destroyed - * instances? Presumably we need to clean them up at some point, but - * not right away so that callers can see that they've been destroyed. - */ + // TODO-robustness We need to figure out what to do with Destroyed + // instances? Presumably we need to clean them up at some point, but + // not right away so that callers can see that they've been destroyed. let authz_instance = self .db_datastore .instance_lookup_by_path( @@ -1051,15 +1005,13 @@ impl Nexus { runtime: &nexus::InstanceRuntimeState, requested: &InstanceRuntimeStateRequested, ) -> Result<(), Error> { - /* - * Users are allowed to request a start or stop even if the instance is - * already in the desired state (or moving to it), and we will issue a - * request to the SA to make the state change in these cases in case the - * runtime state we saw here was stale. However, users are not allowed - * to change the state of an instance that's migrating, failed or - * destroyed. But if we're already migrating, requesting a migration is - * allowed to allow for idempotency. - */ + // Users are allowed to request a start or stop even if the instance is + // already in the desired state (or moving to it), and we will issue a + // request to the SA to make the state change in these cases in case the + // runtime state we saw here was stale. However, users are not allowed + // to change the state of an instance that's migrating, failed or + // destroyed. But if we're already migrating, requesting a migration is + // allowed to allow for idempotency. let allowed = match runtime.run_state { InstanceState::Creating => true, InstanceState::Starting => true, @@ -1115,9 +1067,7 @@ impl Nexus { ))) } - /** - * Returns the SledAgentClient for the host where this Instance is running. - */ + /// Returns the SledAgentClient for the host where this Instance is running. async fn instance_sled( &self, instance: &db::model::Instance, @@ -1126,9 +1076,7 @@ impl Nexus { self.sled_client(&sa_id).await } - /** - * Reboot the specified instance. - */ + /// Reboot the specified instance. pub async fn instance_reboot( &self, opctx: &OpContext, @@ -1136,19 +1084,17 @@ impl Nexus { project_name: &Name, instance_name: &Name, ) -> UpdateResult { - /* - * To implement reboot, we issue a call to the sled agent to set a - * runtime state of "reboot". We cannot simply stop the Instance and - * start it again here because if we crash in the meantime, we might - * leave it stopped. - * - * When an instance is rebooted, the "rebooting" flag remains set on - * the runtime state as it transitions to "Stopping" and "Stopped". - * This flag is cleared when the state goes to "Starting". This way, - * even if the whole rack powered off while this was going on, we would - * never lose track of the fact that this Instance was supposed to be - * running. - */ + // To implement reboot, we issue a call to the sled agent to set a + // runtime state of "reboot". We cannot simply stop the Instance and + // start it again here because if we crash in the meantime, we might + // leave it stopped. + // + // When an instance is rebooted, the "rebooting" flag remains set on + // the runtime state as it transitions to "Stopping" and "Stopped". + // This flag is cleared when the state goes to "Starting". This way, + // even if the whole rack powered off while this was going on, we would + // never lose track of the fact that this Instance was supposed to be + // running. let authz_project = self .db_datastore .project_lookup_by_path(organization_name, project_name) @@ -1171,9 +1117,7 @@ impl Nexus { self.db_datastore.instance_refetch(opctx, &authz_instance).await } - /** - * Make sure the given Instance is running. - */ + /// Make sure the given Instance is running. pub async fn instance_start( &self, opctx: &OpContext, @@ -1203,9 +1147,7 @@ impl Nexus { self.db_datastore.instance_refetch(opctx, &authz_instance).await } - /** - * Make sure the given Instance is stopped. - */ + /// Make sure the given Instance is stopped. pub async fn instance_stop( &self, opctx: &OpContext, @@ -1235,9 +1177,7 @@ impl Nexus { self.db_datastore.instance_refetch(opctx, &authz_instance).await } - /** - * Idempotently place the instance in a 'Migrating' state. - */ + /// Idempotently place the instance in a 'Migrating' state. pub async fn instance_start_migrate( &self, opctx: &OpContext, @@ -1266,10 +1206,8 @@ impl Nexus { self.db_datastore.instance_refetch(opctx, &authz_instance).await } - /** - * Modifies the runtime state of the Instance as requested. This generally - * means booting or halting the Instance. - */ + /// Modifies the runtime state of the Instance as requested. This generally + /// means booting or halting the Instance. async fn instance_set_runtime( &self, opctx: &OpContext, @@ -1286,12 +1224,10 @@ impl Nexus { let sa = self.instance_sled(&db_instance).await?; - /* - * Ask the sled agent to begin the state change. Then update the - * database to reflect the new intermediate state. If this update is - * not the newest one, that's fine. That might just mean the sled agent - * beat us to it. - */ + // Ask the sled agent to begin the state change. Then update the + // database to reflect the new intermediate state. If this update is + // not the newest one, that's fine. That might just mean the sled agent + // beat us to it. // TODO: Populate this with an appropriate NIC. // See also: sic_create_instance_record in sagas.rs for a similar @@ -1324,9 +1260,7 @@ impl Nexus { .map(|_| ()) } - /** - * Lists disks attached to the instance. - */ + /// Lists disks attached to the instance. pub async fn instance_list_disks( &self, opctx: &OpContext, @@ -1348,9 +1282,7 @@ impl Nexus { .await } - /** - * Attach a disk to an instance. - */ + /// Attach a disk to an instance. pub async fn instance_attach_disk( &self, opctx: &OpContext, @@ -1384,12 +1316,10 @@ impl Nexus { DiskState::Creating => "disk is detached", DiskState::Detached => "disk is detached", - /* - * It would be nice to provide a more specific message here, but - * the appropriate identifier to provide the user would be the - * other instance's name. Getting that would require another - * database hit, which doesn't seem worth it for this. - */ + // It would be nice to provide a more specific message here, but + // the appropriate identifier to provide the user would be the + // other instance's name. Getting that would require another + // database hit, which doesn't seem worth it for this. DiskState::Attaching(_) => { "disk is attached to another instance" } @@ -1409,23 +1339,19 @@ impl Nexus { } match &db_disk.state().into() { - /* - * If we're already attaching or attached to the requested instance, - * there's nothing else to do. - * TODO-security should it be an error if you're not authorized to - * do this and we did not actually have to do anything? - */ + // If we're already attaching or attached to the requested instance, + // there's nothing else to do. + // TODO-security should it be an error if you're not authorized to + // do this and we did not actually have to do anything? DiskState::Attached(id) if id == instance_id => return Ok(db_disk), - /* - * If the disk is currently attaching or attached to another - * instance, fail this request. Users must explicitly detach first - * if that's what they want. If it's detaching, they have to wait - * for it to become detached. - * TODO-debug: the error message here could be better. We'd have to - * look up the other instance by id (and gracefully handle it not - * existing). - */ + // If the disk is currently attaching or attached to another + // instance, fail this request. Users must explicitly detach first + // if that's what they want. If it's detaching, they have to wait + // for it to become detached. + // TODO-debug: the error message here could be better. We'd have to + // look up the other instance by id (and gracefully handle it not + // existing). DiskState::Attached(id) => { assert_ne!(id, instance_id); return disk_attachment_error(&db_disk); @@ -1463,9 +1389,7 @@ impl Nexus { self.db_datastore.disk_refetch(opctx, &authz_disk).await } - /** - * Detach a disk from an instance. - */ + /// Detach a disk from an instance. pub async fn instance_detach_disk( &self, opctx: &OpContext, @@ -1491,12 +1415,10 @@ impl Nexus { let instance_id = &authz_instance.id(); match &db_disk.state().into() { - /* - * This operation is a noop if the disk is not attached or already - * detaching from the same instance. - * TODO-security should it be an error if you're not authorized to - * do this and we did not actually have to do anything? - */ + // This operation is a noop if the disk is not attached or already + // detaching from the same instance. + // TODO-security should it be an error if you're not authorized to + // do this and we did not actually have to do anything? DiskState::Creating => return Ok(db_disk), DiskState::Detached => return Ok(db_disk), DiskState::Destroyed => return Ok(db_disk), @@ -1505,10 +1427,8 @@ impl Nexus { return Ok(db_disk) } - /* - * This operation is not allowed if the disk is attached to some - * other instance. - */ + // This operation is not allowed if the disk is attached to some + // other instance. DiskState::Attaching(id) if id != instance_id => { return Err(Error::InvalidRequest { message: String::from("disk is attached elsewhere"), @@ -1525,7 +1445,7 @@ impl Nexus { }); } - /* These are the cases where we have to do something. */ + // These are the cases where we have to do something. DiskState::Attaching(_) => (), DiskState::Attached(_) => (), } @@ -1541,10 +1461,8 @@ impl Nexus { self.db_datastore.disk_refetch(opctx, &authz_disk).await } - /** - * Modifies the runtime state of the Disk as requested. This generally - * means attaching or detaching the disk. - */ + /// Modifies the runtime state of the Disk as requested. This generally + /// means attaching or detaching the disk. async fn disk_set_runtime( &self, opctx: &OpContext, @@ -1557,10 +1475,8 @@ impl Nexus { opctx.authorize(authz::Action::Modify, authz_disk).await?; - /* - * Ask the Sled Agent to begin the state change. Then update the - * database to reflect the new intermediate state. - */ + // Ask the Sled Agent to begin the state change. Then update the + // database to reflect the new intermediate state. let new_runtime = sa .disk_put( &authz_disk.id(), @@ -2397,9 +2313,7 @@ impl Nexus { .await } - /** - * VPC Router routes - */ + /// VPC Router routes pub async fn router_list_routes( &self, @@ -2541,9 +2455,7 @@ impl Nexus { .await?) } - /* - * Racks. We simulate just one for now. - */ + // Racks. We simulate just one for now. fn as_rack(&self) -> db::model::Rack { db::model::Rack { @@ -2576,9 +2488,7 @@ impl Nexus { } } - /* - * Sleds - */ + // Sleds pub async fn sleds_list( &self, @@ -2594,18 +2504,14 @@ impl Nexus { self.db_datastore.sled_fetch(*sled_id).await } - /* - * Sagas - */ + // Sagas pub async fn sagas_list( &self, pagparams: &DataPageParams<'_, Uuid>, ) -> ListResult { - /* - * The endpoint we're serving only supports `ScanById`, which only - * supports an ascending scan. - */ + // The endpoint we're serving only supports `ScanById`, which only + // supports an ascending scan. bail_unless!( pagparams.direction == dropshot::PaginationOrder::Ascending ); @@ -2631,9 +2537,7 @@ impl Nexus { })? } - /* - * Built-in users - */ + // Built-in users pub async fn users_builtin_list( &self, @@ -2651,9 +2555,7 @@ impl Nexus { self.db_datastore.user_builtin_fetch(opctx, name).await } - /* - * Built-in roles - */ + // Built-in roles pub async fn roles_builtin_list( &self, @@ -2671,14 +2573,10 @@ impl Nexus { self.db_datastore.role_builtin_fetch(opctx, name).await } - /* - * Internal control plane interfaces. - */ + // Internal control plane interfaces. - /** - * Invoked by a sled agent to publish an updated runtime state for an - * Instance. - */ + /// Invoked by a sled agent to publish an updated runtime state for an + /// Instance. pub async fn notify_instance_updated( &self, id: &Uuid, @@ -2708,15 +2606,13 @@ impl Nexus { Ok(()) } - /* - * If the instance doesn't exist, swallow the error -- there's - * nothing to do here. - * TODO-robustness This could only be possible if we've removed an - * Instance from the datastore altogether. When would we do that? - * We don't want to do it as soon as something's destroyed, I think, - * and in that case, we'd need some async task for cleaning these - * up. - */ + // If the instance doesn't exist, swallow the error -- there's + // nothing to do here. + // TODO-robustness This could only be possible if we've removed an + // Instance from the datastore altogether. When would we do that? + // We don't want to do it as soon as something's destroyed, I think, + // and in that case, we'd need some async task for cleaning these + // up. Err(Error::ObjectNotFound { .. }) => { warn!(log, "non-existent instance updated by sled agent"; "instance_id" => %id, @@ -2724,12 +2620,10 @@ impl Nexus { Ok(()) } - /* - * If the datastore is unavailable, propagate that to the caller. - * TODO-robustness Really this should be any _transient_ error. How - * can we distinguish? Maybe datastore should emit something - * different from Error with an Into. - */ + // If the datastore is unavailable, propagate that to the caller. + // TODO-robustness Really this should be any _transient_ error. How + // can we distinguish? Maybe datastore should emit something + // different from Error with an Into. Err(error) => { warn!(log, "failed to update instance from sled agent"; "instance_id" => %id, @@ -2754,7 +2648,7 @@ impl Nexus { .disk_update_runtime(opctx, &authz_disk, &new_state.clone().into()) .await; - /* TODO-cleanup commonize with notify_instance_updated() */ + // TODO-cleanup commonize with notify_instance_updated() match result { Ok(true) => { info!(log, "disk updated by sled agent"; @@ -2769,15 +2663,13 @@ impl Nexus { Ok(()) } - /* - * If the disk doesn't exist, swallow the error -- there's - * nothing to do here. - * TODO-robustness This could only be possible if we've removed a - * disk from the datastore altogether. When would we do that? - * We don't want to do it as soon as something's destroyed, I think, - * and in that case, we'd need some async task for cleaning these - * up. - */ + // If the disk doesn't exist, swallow the error -- there's + // nothing to do here. + // TODO-robustness This could only be possible if we've removed a + // disk from the datastore altogether. When would we do that? + // We don't want to do it as soon as something's destroyed, I think, + // and in that case, we'd need some async task for cleaning these + // up. Err(Error::ObjectNotFound { .. }) => { warn!(log, "non-existent disk updated by sled agent"; "instance_id" => %id, @@ -2785,9 +2677,7 @@ impl Nexus { Ok(()) } - /* - * If the datastore is unavailable, propagate that to the caller. - */ + // If the datastore is unavailable, propagate that to the caller. Err(error) => { warn!(log, "failed to update disk from sled agent"; "disk_id" => %id, @@ -2798,13 +2688,9 @@ impl Nexus { } } - /* - * Timeseries - */ + // Timeseries - /** - * List existing timeseries schema. - */ + /// List existing timeseries schema. pub async fn timeseries_schema_list( &self, pag_params: &TimeseriesSchemaPaginationParams, @@ -2823,9 +2709,7 @@ impl Nexus { }) } - /** - * Assign a newly-registered metric producer to an oximeter collector server. - */ + /// Assign a newly-registered metric producer to an oximeter collector server. pub async fn assign_producer( &self, producer_info: nexus::ProducerEndpoint, @@ -2848,9 +2732,7 @@ impl Nexus { Ok(()) } - /** - * Return an oximeter collector to assign a newly-registered producer - */ + /// Return an oximeter collector to assign a newly-registered producer async fn next_collector(&self) -> Result<(OximeterClient, Uuid), Error> { // TODO-robustness Replace with a real load-balancing strategy. let page_params = DataPageParams { diff --git a/nexus/src/populate.rs b/nexus/src/populate.rs index 50c923f6ad..17aebac8da 100644 --- a/nexus/src/populate.rs +++ b/nexus/src/populate.rs @@ -83,11 +83,9 @@ async fn populate( .await; if let Err(error) = &db_result { - /* - * TODO-autonomy this should raise an alert, bump a counter, or raise - * some other red flag that something is wrong. (This should be - * unlikely in practice.) - */ + // TODO-autonomy this should raise an alert, bump a counter, or raise + // some other red flag that something is wrong. (This should be + // unlikely in practice.) error!(opctx.log, "gave up trying to populate built-in {}", p.name; "error_message" => ?error diff --git a/nexus/src/saga_interface.rs b/nexus/src/saga_interface.rs index 759e6d8d14..e878af42f7 100644 --- a/nexus/src/saga_interface.rs +++ b/nexus/src/saga_interface.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Interfaces available to saga actions and undo actions - */ +//! Interfaces available to saga actions and undo actions use crate::external_api::params; use crate::Nexus; @@ -16,11 +14,9 @@ use std::fmt; use std::sync::Arc; use uuid::Uuid; -/* - * TODO-design Should this be the same thing as ServerContext? It's - * very analogous, but maybe there's utility in having separate views for the - * HTTP server and sagas. - */ +// TODO-design Should this be the same thing as ServerContext? It's +// very analogous, but maybe there's utility in having separate views for the +// HTTP server and sagas. pub struct SagaContext { nexus: Arc, log: Logger, @@ -46,17 +42,15 @@ impl SagaContext { &self.log } - /* - * TODO-design This interface should not exist. Instead, sleds should be - * represented in the database. Reservations will wind up writing to the - * database. Allocating a server will thus be a saga action, complete with - * an undo action. The only thing needed at this layer is a way to read and - * write to the database, which we already have. - * - * Note: the parameters appear here (unused) to make sure callers make sure - * to have them available. They're not used now, but they will be in a real - * implementation. - */ + // TODO-design This interface should not exist. Instead, sleds should be + // represented in the database. Reservations will wind up writing to the + // database. Allocating a server will thus be a saga action, complete with + // an undo action. The only thing needed at this layer is a way to read and + // write to the database, which we already have. + // + // Note: the parameters appear here (unused) to make sure callers make sure + // to have them available. They're not used now, but they will be in a real + // implementation. pub async fn alloc_server( &self, _params: ¶ms::InstanceCreate, diff --git a/nexus/src/sagas.rs b/nexus/src/sagas.rs index 9838846efa..338bffb334 100644 --- a/nexus/src/sagas.rs +++ b/nexus/src/sagas.rs @@ -2,16 +2,12 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Saga actions, undo actions, and saga constructors used in Nexus. - */ +//! Saga actions, undo actions, and saga constructors used in Nexus. -/* - * NOTE: We want to be careful about what interfaces we expose to saga actions. - * In the future, we expect to mock these out for comprehensive testing of - * correctness, idempotence, etc. The more constrained this interface is, the - * easier it will be to test, version, and update in deployed systems. - */ +// NOTE: We want to be careful about what interfaces we expose to saga actions. +// In the future, we expect to mock these out for comprehensive testing of +// correctness, idempotence, etc. The more constrained this interface is, the +// easier it will be to test, version, and update in deployed systems. use crate::context::OpContext; use crate::db::identity::{Asset, Resource}; @@ -57,9 +53,7 @@ use steno::SagaTemplateGeneric; use steno::SagaType; use uuid::Uuid; -/* - * We'll need a richer mechanism for registering sagas, but this works for now. - */ +// We'll need a richer mechanism for registering sagas, but this works for now. pub const SAGA_INSTANCE_CREATE_NAME: &'static str = "instance-create"; pub const SAGA_INSTANCE_MIGRATE_NAME: &'static str = "instance-migrate"; pub const SAGA_DISK_CREATE_NAME: &'static str = "disk-create"; @@ -114,9 +108,7 @@ async fn saga_generate_uuid( Ok(Uuid::new_v4()) } -/* - * "Create Instance" saga template - */ +// "Create Instance" saga template #[derive(Debug, Deserialize, Serialize)] pub struct ParamsInstanceCreate { @@ -508,9 +500,7 @@ async fn sic_create_instance_record( async fn sic_instance_ensure( sagactx: ActionContext, ) -> Result<(), ActionError> { - /* - * TODO-correctness is this idempotent? - */ + // TODO-correctness is this idempotent? let osagactx = sagactx.user_data(); let runtime_params = InstanceRuntimeStateRequested { run_state: InstanceStateRequested::Running, @@ -525,11 +515,9 @@ async fn sic_instance_ensure( .await .map_err(ActionError::action_failed)?; - /* - * Ask the sled agent to begin the state change. Then update the database - * to reflect the new intermediate state. If this update is not the newest - * one, that's fine. That might just mean the sled agent beat us to it. - */ + // Ask the sled agent to begin the state change. Then update the database + // to reflect the new intermediate state. If this update is not the newest + // one, that's fine. That might just mean the sled agent beat us to it. let new_runtime_state = sa .instance_put( &instance_id, @@ -554,9 +542,7 @@ async fn sic_instance_ensure( .map_err(ActionError::action_failed) } -/* - * "Migrate Instance" saga template - */ +// "Migrate Instance" saga template #[derive(Debug, Deserialize, Serialize)] pub struct ParamsInstanceMigrate { pub serialized_authn: authn::saga::Serialized, diff --git a/nexus/test-utils/src/http_testing.rs b/nexus/test-utils/src/http_testing.rs index 210ea2db71..fca1073bf3 100644 --- a/nexus/test-utils/src/http_testing.rs +++ b/nexus/test-utils/src/http_testing.rs @@ -16,7 +16,6 @@ use std::fmt::Debug; /// Convenient way to make an outgoing HTTP request and verify various /// properties of the response for testing -// // When testing an HTTP server, we make varying requests to the server and // verify a bunch of properties about its behavior. A lot of things can go // wrong along the way: diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 60301bdd5d..090f98ba41 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -57,23 +57,21 @@ impl ControlPlaneTestContext { } pub fn load_test_config() -> omicron_nexus::Config { - /* - * We load as much configuration as we can from the test suite configuration - * file. In practice, TestContext requires that: - * - * - the Nexus TCP listen port be 0, - * - the CockroachDB TCP listen port be 0, and - * - if the log will go to a file then the path must be the sentinel value - * "UNUSED". - * - each Nexus created for testing gets its own id so they don't see each - * others sagas and try to recover them - * - * (See LogContext::new() for details.) Given these restrictions, it may - * seem barely worth reading a config file at all. However, developers can - * change the logging level and local IP if they want, and as we add more - * configuration options, we expect many of those can be usefully configured - * (and reconfigured) for the test suite. - */ + // We load as much configuration as we can from the test suite configuration + // file. In practice, TestContext requires that: + // + // - the Nexus TCP listen port be 0, + // - the CockroachDB TCP listen port be 0, and + // - if the log will go to a file then the path must be the sentinel value + // "UNUSED". + // - each Nexus created for testing gets its own id so they don't see each + // others sagas and try to recover them + // + // (See LogContext::new() for details.) Given these restrictions, it may + // seem barely worth reading a config file at all. However, developers can + // change the logging level and local IP if they want, and as we add more + // configuration options, we expect many of those can be usefully configured + // (and reconfigured) for the test suite. let config_file_path = Path::new("tests/config.test.toml"); let mut config = omicron_nexus::Config::from_file(config_file_path) .expect("failed to load config.test.toml"); @@ -94,13 +92,13 @@ pub async fn test_setup_with_config( let rack_id = Uuid::parse_str(RACK_UUID).unwrap(); let log = &logctx.log; - /* Start up CockroachDB. */ + // Start up CockroachDB. let database = db::test_setup_database(log).await; - /* Start ClickHouse database server. */ + // Start ClickHouse database server. let clickhouse = dev::clickhouse::ClickHouseInstance::new(0).await.unwrap(); - /* Store actual address/port information for the databases after they start. */ + // Store actual address/port information for the databases after they start. config.database.url = database.pg_config().clone(); config.timeseries_db.address.set_port(clickhouse.port()); @@ -123,7 +121,7 @@ pub async fn test_setup_with_config( logctx.log.new(o!("component" => "internal client test context")), ); - /* Set up a single sled agent. */ + // Set up a single sled agent. let sa_id = Uuid::parse_str(SLED_AGENT_UUID).unwrap(); let sled_agent = start_sled_agent( logctx.log.new(o!( @@ -182,7 +180,7 @@ pub async fn start_sled_agent( request_body_max_bytes: 2048, ..Default::default() }, - /* TODO-cleanup this is unused */ + // TODO-cleanup this is unused log: ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Debug }, storage: sim::ConfigStorage { zpools: vec![], @@ -291,7 +289,7 @@ pub async fn start_producer_server( Ok(server) } -/** Returns whether the two identity metadata objects are identical. */ +/// Returns whether the two identity metadata objects are identical. pub fn identity_eq(ident1: &IdentityMetadata, ident2: &IdentityMetadata) { assert_eq!(ident1.id, ident2.id); assert_eq!(ident1.name, ident2.name); diff --git a/nexus/tests/integration_tests/basic.rs b/nexus/tests/integration_tests/basic.rs index 6047a66788..a0fb62d396 100644 --- a/nexus/tests/integration_tests/basic.rs +++ b/nexus/tests/integration_tests/basic.rs @@ -2,12 +2,10 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Smoke tests against the API server - * - * This file defines a very basic set of tests against the API. - * TODO-coverage add test for racks, sleds - */ +//! Smoke tests against the API server +//! +//! This file defines a very basic set of tests against the API. +//! TODO-coverage add test for racks, sleds use dropshot::test_util::objects_list_page; use dropshot::test_util::ClientTestContext; @@ -41,7 +39,7 @@ async fn test_basic_failures(cptestctx: &ControlPlaneTestContext) { let org_name = "test-org"; create_organization(&client, &org_name).await; - /* Error case: GET /nonexistent (a path with no route at all) */ + // Error case: GET /nonexistent (a path with no route at all) let error = client .make_request( Method::GET, @@ -53,10 +51,8 @@ async fn test_basic_failures(cptestctx: &ControlPlaneTestContext) { .expect_err("expected error"); assert_eq!("Not Found", error.message); - /* - * Error case: GET /organizations/test-org/projects/nonexistent (a possible - * value that does not exist inside a collection that does exist) - */ + // Error case: GET /organizations/test-org/projects/nonexistent (a possible + // value that does not exist inside a collection that does exist) let error = client .make_request( Method::GET, @@ -68,11 +64,9 @@ async fn test_basic_failures(cptestctx: &ControlPlaneTestContext) { .expect_err("expected error"); assert_eq!("not found: project with name \"nonexistent\"", error.message); - /* - * Error case: GET /organizations/test-org/projects/-invalid-name - * TODO-correctness is 400 the right error code here or is 404 more - * appropriate? - */ + // Error case: GET /organizations/test-org/projects/-invalid-name + // TODO-correctness is 400 the right error code here or is 404 more + // appropriate? let error = client .make_request( Method::GET, @@ -88,7 +82,7 @@ async fn test_basic_failures(cptestctx: &ControlPlaneTestContext) { error.message ); - /* Error case: PUT /organizations/test-org/projects */ + // Error case: PUT /organizations/test-org/projects let error = client .make_request( Method::PUT, @@ -100,7 +94,7 @@ async fn test_basic_failures(cptestctx: &ControlPlaneTestContext) { .expect_err("expected error"); assert_eq!("Method Not Allowed", error.message); - /* Error case: DELETE /organizations/test-org/projects */ + // Error case: DELETE /organizations/test-org/projects let error = client .make_request( Method::DELETE, @@ -112,7 +106,7 @@ async fn test_basic_failures(cptestctx: &ControlPlaneTestContext) { .expect_err("expected error"); assert_eq!("Method Not Allowed", error.message); - /* Error case: list instances in a nonexistent project. */ + // Error case: list instances in a nonexistent project. let error = client .make_request_with_body( Method::GET, @@ -124,7 +118,7 @@ async fn test_basic_failures(cptestctx: &ControlPlaneTestContext) { .expect_err("expected error"); assert_eq!("not found: project with name \"nonexistent\"", error.message); - /* Error case: fetch an instance in a nonexistent project. */ + // Error case: fetch an instance in a nonexistent project. let error = client .make_request_with_body( Method::GET, @@ -136,7 +130,7 @@ async fn test_basic_failures(cptestctx: &ControlPlaneTestContext) { .expect_err("expected error"); assert_eq!("not found: project with name \"nonexistent\"", error.message); - /* Error case: fetch an instance with an invalid name. */ + // Error case: fetch an instance with an invalid name. let error = client .make_request_with_body( Method::GET, @@ -152,7 +146,7 @@ async fn test_basic_failures(cptestctx: &ControlPlaneTestContext) { error.message ); - /* Error case: delete an instance with an invalid name. */ + // Error case: delete an instance with an invalid name. let error = client .make_request_with_body( Method::DELETE, @@ -177,15 +171,11 @@ async fn test_projects_basic(cptestctx: &ControlPlaneTestContext) { create_organization(&client, &org_name).await; let projects_url = "/organizations/test-org/projects"; - /* - * Verify that there are no projects to begin with. - */ + // Verify that there are no projects to begin with. let projects = projects_list(&client, &projects_url).await; assert_eq!(0, projects.len()); - /* - * Create three projects used by the rest of this test. - */ + // Create three projects used by the rest of this test. let projects_to_create = vec!["simproject1", "simproject2", "simproject3"]; let new_project_ids = { let mut project_ids: Vec = Vec::new(); @@ -219,10 +209,8 @@ async fn test_projects_basic(cptestctx: &ControlPlaneTestContext) { project_ids }; - /* - * Error case: GET /organizations/test-org/projects/simproject1/nonexistent - * (a path that does not exist beneath a resource that does exist) - */ + // Error case: GET /organizations/test-org/projects/simproject1/nonexistent + // (a path that does not exist beneath a resource that does exist) let error = client .make_request_error( Method::GET, @@ -232,11 +220,9 @@ async fn test_projects_basic(cptestctx: &ControlPlaneTestContext) { .await; assert_eq!("Not Found", error.message); - /* - * Basic GET /organizations/test-org/projects now that we've created a few. - * TODO-coverage: pagination - * TODO-coverage: marker even without pagination - */ + // Basic GET /organizations/test-org/projects now that we've created a few. + // TODO-coverage: pagination + // TODO-coverage: marker even without pagination let initial_projects = projects_list(&client, &projects_url).await; assert_eq!(initial_projects.len(), 3); assert_eq!(initial_projects[0].identity.id, new_project_ids[0]); @@ -249,10 +235,8 @@ async fn test_projects_basic(cptestctx: &ControlPlaneTestContext) { assert_eq!(initial_projects[2].identity.name, "simproject3"); assert!(initial_projects[2].identity.description.len() > 0); - /* - * Basic test of out-of-the-box GET - * /organizations/test-org/projects/simproject2 - */ + // Basic test of out-of-the-box GET + // /organizations/test-org/projects/simproject2 let project = project_get(&client, "/organizations/test-org/projects/simproject2") .await; @@ -262,10 +246,8 @@ async fn test_projects_basic(cptestctx: &ControlPlaneTestContext) { assert_eq!(project.identity.description, expected.identity.description); assert!(project.identity.description.len() > 0); - /* - * Delete "simproject2". We'll make sure that's reflected in the other - * requests. - */ + // Delete "simproject2". We'll make sure that's reflected in the other + // requests. NexusRequest::object_delete( client, "/organizations/test-org/projects/simproject2", @@ -275,10 +257,8 @@ async fn test_projects_basic(cptestctx: &ControlPlaneTestContext) { .await .expect("expected request to fail"); - /* - * Having deleted "simproject2", verify "GET", "PUT", and "DELETE" on - * "/organizations/test-org/projects/simproject2". - */ + // Having deleted "simproject2", verify "GET", "PUT", and "DELETE" on + // "/organizations/test-org/projects/simproject2". for method in [Method::GET, Method::DELETE] { NexusRequest::expect_failure( client, @@ -310,9 +290,7 @@ async fn test_projects_basic(cptestctx: &ControlPlaneTestContext) { .await .expect("failed to make request"); - /* - * Similarly, verify "GET /organizations/test-org/projects" - */ + // Similarly, verify "GET /organizations/test-org/projects" let expected_projects: Vec<&Project> = initial_projects .iter() .filter(|p| p.identity.name != "simproject2") @@ -339,10 +317,8 @@ async fn test_projects_basic(cptestctx: &ControlPlaneTestContext) { expected_projects[1].identity.description ); - /* - * Update "simproject3". We'll make sure that's reflected in the other - * requests. - */ + // Update "simproject3". We'll make sure that's reflected in the other + // requests. let project_update = params::ProjectUpdate { identity: IdentityMetadataUpdateParams { name: None, @@ -372,11 +348,9 @@ async fn test_projects_basic(cptestctx: &ControlPlaneTestContext) { assert_eq!(project.identity.description, expected.identity.description); assert_eq!(project.identity.description, "Li'l lightnin'"); - /* - * Update "simproject3" in a way that changes its name. This is a deeper - * operation under the hood. This case also exercises changes to multiple - * fields in one request. - */ + // Update "simproject3" in a way that changes its name. This is a deeper + // operation under the hood. This case also exercises changes to multiple + // fields in one request. let project_update = params::ProjectUpdate { identity: IdentityMetadataUpdateParams { name: Some("lil-lightnin".parse().unwrap()), @@ -409,9 +383,7 @@ async fn test_projects_basic(cptestctx: &ControlPlaneTestContext) { .await .expect("expected success"); - /* - * Try to create a project with a name that conflicts with an existing one. - */ + // Try to create a project with a name that conflicts with an existing one. let project_create = params::ProjectCreate { identity: IdentityMetadataCreateParams { name: "simproject1".parse().unwrap(), @@ -433,10 +405,8 @@ async fn test_projects_basic(cptestctx: &ControlPlaneTestContext) { // TODO-coverage try to rename it to a name that conflicts - /* - * Try to create a project with an unsupported name. - * TODO-polish why doesn't serde include the field name in this error? - */ + // Try to create a project with an unsupported name. + // TODO-polish why doesn't serde include the field name in this error? #[derive(Serialize)] struct BadProject { name: &'static str, @@ -461,9 +431,7 @@ async fn test_projects_basic(cptestctx: &ControlPlaneTestContext) { (allowed characters are lowercase ASCII, digits, and \"-\"" )); - /* - * Now, really do create another project. - */ + // Now, really do create another project. let project_create = params::ProjectCreate { identity: IdentityMetadataCreateParams { name: "honor-roller".parse().unwrap(), @@ -481,13 +449,11 @@ async fn test_projects_basic(cptestctx: &ControlPlaneTestContext) { assert_eq!(project.identity.name, "honor-roller"); assert_eq!(project.identity.description, "a soapbox racer"); - /* - * List projects again and verify all of our changes. We should have: - * - * - "honor-roller" with description "a soapbox racer" - * - "lil-lightnin" with description "little lightning" - * - "simproject1", same as when it was created. - */ + // List projects again and verify all of our changes. We should have: + // + // - "honor-roller" with description "a soapbox racer" + // - "lil-lightnin" with description "little lightning" + // - "simproject1", same as when it was created. let projects = projects_list(&client, &projects_url).await; assert_eq!(projects.len(), 3); assert_eq!(projects[0].identity.name, "honor-roller"); @@ -505,21 +471,19 @@ async fn test_projects_list(cptestctx: &ControlPlaneTestContext) { let org_name = "test-org"; create_organization(&client, &org_name).await; - /* Verify that there are no projects to begin with. */ + // Verify that there are no projects to begin with. let projects_url = "/organizations/test-org/projects"; assert_eq!(projects_list(&client, &projects_url).await.len(), 0); - /* Create a large number of projects that we can page through. */ + // Create a large number of projects that we can page through. let projects_total = 10; let projects_subset = 3; let mut projects_created = Vec::with_capacity(projects_total); for _ in 0..projects_total { - /* - * We'll use uuids for the names to make sure that works, and that we - * can paginate through by _name_ even though the names happen to be - * uuids. Names have to start with a letter, though, so we've got to - * make sure our uuid has one. - */ + // We'll use uuids for the names to make sure that works, and that we + // can paginate through by _name_ even though the names happen to be + // uuids. Names have to start with a letter, though, so we've got to + // make sure our uuid has one. let mut name = Uuid::new_v4().to_string(); name.replace_range(0..1, "a"); let project = create_project(&client, org_name, &name).await; @@ -540,10 +504,8 @@ async fn test_projects_list(cptestctx: &ControlPlaneTestContext) { clone.iter().map(|v| v.id).collect::>() }; - /* - * Page through all the projects in the default order, which should be in - * increasing order of name. - */ + // Page through all the projects in the default order, which should be in + // increasing order of name. let found_projects_by_name = NexusRequest::iter_collection_authn::( &client, @@ -563,10 +525,8 @@ async fn test_projects_list(cptestctx: &ControlPlaneTestContext) { .collect::>() ); - /* - * Page through all the projects in ascending order by name, which should be - * the same as above. - */ + // Page through all the projects in ascending order by name, which should be + // the same as above. let found_projects_by_name = NexusRequest::iter_collection_authn::( &client, @@ -586,10 +546,8 @@ async fn test_projects_list(cptestctx: &ControlPlaneTestContext) { .collect::>() ); - /* - * Page through all the projects in descending order by name, which should be - * the reverse of the above. - */ + // Page through all the projects in descending order by name, which should be + // the reverse of the above. let mut found_projects_by_name = NexusRequest::iter_collection_authn::( &client, @@ -610,9 +568,7 @@ async fn test_projects_list(cptestctx: &ControlPlaneTestContext) { .collect::>() ); - /* - * Page through the projects in ascending order by id. - */ + // Page through the projects in ascending order by id. let found_projects_by_id = NexusRequest::iter_collection_authn::( &client, projects_url, @@ -636,11 +592,11 @@ async fn test_projects_list(cptestctx: &ControlPlaneTestContext) { async fn test_sleds_list(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - /* Verify that there is one sled to begin with. */ + // Verify that there is one sled to begin with. let sleds_url = "/hardware/sleds"; assert_eq!(sleds_list(&client, &sleds_url).await.len(), 1); - /* Now start a few more sled agents. */ + // Now start a few more sled agents. let nsleds = 3; let mut sas = Vec::with_capacity(nsleds); for _ in 0..nsleds { @@ -651,7 +607,7 @@ async fn test_sleds_list(cptestctx: &ControlPlaneTestContext) { sas.push(start_sled_agent(log, addr, sa_id).await.unwrap()); } - /* List sleds again. */ + // List sleds again. let sleds_found = sleds_list(&client, &sleds_url).await; assert_eq!(sleds_found.len(), nsleds + 1); @@ -661,7 +617,7 @@ async fn test_sleds_list(cptestctx: &ControlPlaneTestContext) { sledids_found_sorted.sort(); assert_eq!(sledids_found, sledids_found_sorted); - /* Tear down the agents. */ + // Tear down the agents. for sa in sas { sa.http_server.close().await.unwrap(); } diff --git a/nexus/tests/integration_tests/commands.rs b/nexus/tests/integration_tests/commands.rs index 241c2715e8..7d3855d5a6 100644 --- a/nexus/tests/integration_tests/commands.rs +++ b/nexus/tests/integration_tests/commands.rs @@ -2,15 +2,11 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Tests for the executable commands in this repo. Most functionality is tested - * elsewhere, so this really just sanity checks argument parsing, bad args, and - * the --openapi mode. - */ +//! Tests for the executable commands in this repo. Most functionality is tested +//! elsewhere, so this really just sanity checks argument parsing, bad args, and +//! the --openapi mode. -/* - * TODO-coverage: test success cases of nexus - */ +// TODO-coverage: test success cases of nexus use expectorate::assert_contents; use omicron_test_utils::dev::test_cmds::assert_exit_code; @@ -27,17 +23,15 @@ use std::fs; use std::path::PathBuf; use subprocess::Exec; -/** name of the "nexus" executable */ +/// name of the "nexus" executable const CMD_NEXUS: &str = env!("CARGO_BIN_EXE_nexus"); fn path_to_nexus() -> PathBuf { path_to_executable(CMD_NEXUS) } -/** - * Write the requested string to a temporary file and return the path to that - * file. - */ +/// Write the requested string to a temporary file and return the path to that +/// file. fn write_config(config: &str) -> PathBuf { let file_path = temp_file_path("test_commands_config"); eprintln!("writing temp config: {}", file_path.display()); @@ -45,9 +39,7 @@ fn write_config(config: &str) -> PathBuf { file_path } -/* - * Tests - */ +// Tests #[test] fn test_nexus_no_args() { @@ -93,15 +85,13 @@ fn test_nexus_invalid_config() { #[track_caller] fn run_command_with_arg(arg: &str) -> (String, String) { - /* - * This is a little goofy: we need a config file for the program. - * (Arguably, --openapi shouldn't require a config file, but it's - * conceivable that the API metadata or the exposed endpoints would depend - * on the configuration.) We ship a config file in "examples", and we may - * as well use it here -- it would be a bug if that one didn't work for this - * purpose. However, it's not clear how to reliably locate it at runtime. - * But we do know where it is at compile time, so we load it then. - */ + // This is a little goofy: we need a config file for the program. + // (Arguably, --openapi shouldn't require a config file, but it's + // conceivable that the API metadata or the exposed endpoints would depend + // on the configuration.) We ship a config file in "examples", and we may + // as well use it here -- it would be a bug if that one didn't work for this + // purpose. However, it's not clear how to reliably locate it at runtime. + // But we do know where it is at compile time, so we load it then. let config = include_str!("../../examples/config.toml"); let config_path = write_config(config); let exec = Exec::cmd(path_to_nexus()).arg(&config_path).arg(arg); @@ -117,39 +107,29 @@ fn test_nexus_openapi() { let (stdout_text, stderr_text) = run_command_with_arg("--openapi"); assert_contents("tests/output/cmd-nexus-openapi-stderr", &stderr_text); - /* - * Make sure the result parses as a valid OpenAPI spec and sanity-check a - * few fields. - */ + // Make sure the result parses as a valid OpenAPI spec and sanity-check a + // few fields. let spec: OpenAPI = serde_json::from_str(&stdout_text) .expect("stdout was not valid OpenAPI"); assert_eq!(spec.openapi, "3.0.3"); assert_eq!(spec.info.title, "Oxide Region API"); assert_eq!(spec.info.version, "0.0.1"); - /* - * Spot check a couple of items. - */ + // Spot check a couple of items. assert!(!spec.paths.paths.is_empty()); assert!(spec.paths.paths.get("/organizations").is_some()); - /* - * Check for lint errors. - */ + // Check for lint errors. let errors = openapi_lint::validate(&spec); assert!(errors.is_empty(), "{}", errors.join("\n\n")); - /* - * Construct a string that helps us identify the organization of tags and - * operations. - */ + // Construct a string that helps us identify the organization of tags and + // operations. let mut ops_by_tag = BTreeMap::>::new(); for (path, _, op) in spec.operations() { - /* - * Make sure each operation has exactly one tag. Note, we intentionally - * do this before validating the OpenAPI output as fixing an error here - * would necessitate refreshing the spec file again. - */ + // Make sure each operation has exactly one tag. Note, we intentionally + // do this before validating the OpenAPI output as fixing an error here + // would necessitate refreshing the spec file again. assert_eq!( op.tags.len(), 1, @@ -178,17 +158,13 @@ fn test_nexus_openapi() { tags.push('\n'); } - /* - * Confirm that the output hasn't changed. It's expected that we'll change - * this file as the API evolves, but pay attention to the diffs to ensure - * that the changes match your expectations. - */ + // Confirm that the output hasn't changed. It's expected that we'll change + // this file as the API evolves, but pay attention to the diffs to ensure + // that the changes match your expectations. assert_contents("../openapi/nexus.json", &stdout_text); - /* - * When this fails, verify that operations on which you're adding, - * renaming, or changing the tags are what you intend. - */ + // When this fails, verify that operations on which you're adding, + // renaming, or changing the tags are what you intend. assert_contents("tests/output/nexus_tags.txt", &tags); } @@ -198,16 +174,12 @@ fn test_nexus_openapi_internal() { let spec: OpenAPI = serde_json::from_str(&stdout_text) .expect("stdout was not valid OpenAPI"); - /* - * Check for lint errors. - */ + // Check for lint errors. let errors = openapi_lint::validate(&spec); assert!(errors.is_empty(), "{}", errors.join("\n\n")); - /* - * Confirm that the output hasn't changed. It's expected that we'll change - * this file as the API evolves, but pay attention to the diffs to ensure - * that the changes match your expectations. - */ + // Confirm that the output hasn't changed. It's expected that we'll change + // this file as the API evolves, but pay attention to the diffs to ensure + // that the changes match your expectations. assert_contents("../openapi/nexus-internal.json", &stdout_text); } diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index 50ef122c5e..e2ab7a7978 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -712,9 +712,7 @@ fn disks_eq(disk1: &Disk, disk2: &Disk) { assert_eq!(disk1.device_path, disk2.device_path); } -/** - * Simulate completion of an ongoing disk state transition. - */ +/// Simulate completion of an ongoing disk state transition. async fn disk_simulate(nexus: &Arc, id: &Uuid) { let sa = nexus.disk_sled_by_id(id).await.unwrap(); sa.disk_finish_transition(id.clone()).await; diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 60925ee6e3..e166ad3fde 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Tests basic instance support in the API - */ +//! Tests basic instance support in the API use http::method::Method; use http::StatusCode; @@ -46,7 +44,7 @@ async fn test_instances_access_before_create_returns_not_found( ) { let client = &cptestctx.external_client; - /* Create a project that we'll use for testing. */ + // Create a project that we'll use for testing. create_organization(&client, ORGANIZATION_NAME).await; let url_instances = format!( "/organizations/{}/projects/{}/instances", @@ -54,11 +52,11 @@ async fn test_instances_access_before_create_returns_not_found( ); let _ = create_project(&client, ORGANIZATION_NAME, PROJECT_NAME).await; - /* List instances. There aren't any yet. */ + // List instances. There aren't any yet. let instances = instances_list(&client, &url_instances).await; assert_eq!(instances.len(), 0); - /* Make sure we get a 404 if we fetch one. */ + // Make sure we get a 404 if we fetch one. let instance_url = format!("{}/just-rainsticks", url_instances); let error: HttpErrorResponseBody = NexusRequest::expect_failure( client, @@ -77,7 +75,7 @@ async fn test_instances_access_before_create_returns_not_found( "not found: instance with name \"just-rainsticks\"" ); - /* Ditto if we try to delete one. */ + // Ditto if we try to delete one. let error: HttpErrorResponseBody = NexusRequest::expect_failure( client, StatusCode::NOT_FOUND, @@ -104,7 +102,7 @@ async fn test_instances_create_reboot_halt( let apictx = &cptestctx.server.apictx; let nexus = &apictx.nexus; - /* Create a project that we'll use for testing. */ + // Create a project that we'll use for testing. create_organization(&client, ORGANIZATION_NAME).await; let url_instances = format!( "/organizations/{}/projects/{}/instances", @@ -112,7 +110,7 @@ async fn test_instances_create_reboot_halt( ); let _ = create_project(&client, ORGANIZATION_NAME, PROJECT_NAME).await; - /* Create an instance. */ + // Create an instance. let instance_url = format!("{}/just-rainsticks", url_instances); let instance = create_instance( client, @@ -124,13 +122,13 @@ async fn test_instances_create_reboot_halt( assert_eq!(instance.identity.name, "just-rainsticks"); assert_eq!(instance.identity.description, "instance \"just-rainsticks\""); let InstanceCpuCount(nfoundcpus) = instance.ncpus; - /* These particulars are hardcoded in create_instance(). */ + // These particulars are hardcoded in create_instance(). assert_eq!(nfoundcpus, 4); assert_eq!(instance.memory.to_whole_mebibytes(), 256); assert_eq!(instance.hostname, "the_host"); assert_eq!(instance.runtime.run_state, InstanceState::Starting); - /* Attempt to create a second instance with a conflicting name. */ + // Attempt to create a second instance with a conflicting name. let error: HttpErrorResponseBody = NexusRequest::new( RequestBuilder::new(client, Method::POST, &url_instances) .body(Some(¶ms::InstanceCreate { @@ -157,17 +155,17 @@ async fn test_instances_create_reboot_halt( .unwrap(); assert_eq!(error.message, "already exists: instance \"just-rainsticks\""); - /* List instances again and expect to find the one we just created. */ + // List instances again and expect to find the one we just created. let instances = instances_list(&client, &url_instances).await; assert_eq!(instances.len(), 1); instances_eq(&instances[0], &instance); - /* Fetch the instance and expect it to match. */ + // Fetch the instance and expect it to match. let instance = instance_get(&client, &instance_url).await; instances_eq(&instances[0], &instance); assert_eq!(instance.runtime.run_state, InstanceState::Starting); - /* Check that the instance got a network interface */ + // Check that the instance got a network interface let ips_url = format!( "/organizations/{}/projects/{}/vpcs/default/subnets/default/network-interfaces", ORGANIZATION_NAME, PROJECT_NAME @@ -180,9 +178,7 @@ async fn test_instances_create_reboot_halt( assert_eq!(network_interfaces[0].instance_id, instance.identity.id); assert_eq!(network_interfaces[0].identity.name, "default"); - /* - * Now, simulate completion of instance boot and check the state reported. - */ + // Now, simulate completion of instance boot and check the state reported. instance_simulate(nexus, &instance.identity.id).await; let instance_next = instance_get(&client, &instance_url).await; identity_eq(&instance.identity, &instance_next.identity); @@ -192,10 +188,8 @@ async fn test_instances_create_reboot_halt( > instance.runtime.time_run_state_updated ); - /* - * Request another boot. This should succeed without changing the state, - * not even the state timestamp. - */ + // Request another boot. This should succeed without changing the state, + // not even the state timestamp. let instance = instance_next; let instance_next = instance_post(&client, &instance_url, InstanceOp::Start).await; @@ -203,9 +197,7 @@ async fn test_instances_create_reboot_halt( let instance_next = instance_get(&client, &instance_url).await; instances_eq(&instance, &instance_next); - /* - * Reboot the instance. - */ + // Reboot the instance. let instance = instance_next; let instance_next = instance_post(&client, &instance_url, InstanceOp::Reboot).await; @@ -233,9 +225,7 @@ async fn test_instances_create_reboot_halt( > instance.runtime.time_run_state_updated ); - /* - * Request a halt and verify both the immediate state and the finished state. - */ + // Request a halt and verify both the immediate state and the finished state. let instance = instance_next; let instance_next = instance_post(&client, &instance_url, InstanceOp::Stop).await; @@ -254,10 +244,8 @@ async fn test_instances_create_reboot_halt( > instance.runtime.time_run_state_updated ); - /* - * Request another halt. This should succeed without changing the state, - * not even the state timestamp. - */ + // Request another halt. This should succeed without changing the state, + // not even the state timestamp. let instance = instance_next; let instance_next = instance_post(&client, &instance_url, InstanceOp::Stop).await; @@ -265,9 +253,7 @@ async fn test_instances_create_reboot_halt( let instance_next = instance_get(&client, &instance_url).await; instances_eq(&instance, &instance_next); - /* - * Attempt to reboot the halted instance. This should fail. - */ + // Attempt to reboot the halted instance. This should fail. let _error: HttpErrorResponseBody = NexusRequest::expect_failure( client, StatusCode::BAD_REQUEST, @@ -285,10 +271,8 @@ async fn test_instances_create_reboot_halt( // client, and expressing that as a rich error type. // assert_eq!(error.message, "cannot reboot instance in state \"stopped\""); - /* - * Start the instance. While it's starting, issue a reboot. This should - * succeed, having stopped in between. - */ + // Start the instance. While it's starting, issue a reboot. This should + // succeed, having stopped in between. let instance = instance_next; let instance_next = instance_post(&client, &instance_url, InstanceOp::Start).await; @@ -325,11 +309,9 @@ async fn test_instances_create_reboot_halt( > instance.runtime.time_run_state_updated ); - /* - * Stop the instance. While it's stopping, issue a reboot. This should - * fail because you cannot stop an instance that's en route to a stopped - * state. - */ + // Stop the instance. While it's stopping, issue a reboot. This should + // fail because you cannot stop an instance that's en route to a stopped + // state. let instance = instance_next; let instance_next = instance_post(&client, &instance_url, InstanceOp::Stop).await; @@ -351,7 +333,7 @@ async fn test_instances_create_reboot_halt( .unwrap() .parsed_body() .unwrap(); - //assert_eq!(error.message, "cannot reboot instance in state \"stopping\""); + // assert_eq!(error.message, "cannot reboot instance in state \"stopping\""); let instance = instance_next; instance_simulate(nexus, &instance.identity.id).await; let instance_next = instance_get(&client, &instance_url).await; @@ -361,25 +343,21 @@ async fn test_instances_create_reboot_halt( > instance.runtime.time_run_state_updated ); - /* TODO-coverage add a test to try to delete the project at this point. */ + // TODO-coverage add a test to try to delete the project at this point. - /* Delete the instance. */ + // Delete the instance. NexusRequest::object_delete(client, &instance_url) .authn_as(AuthnMode::PrivilegedUser) .execute() .await .unwrap(); - /* - * TODO-coverage re-add tests that check the server-side state after - * deleting. We need to figure out how these actually get cleaned up from - * the API namespace when this happens. - */ + // TODO-coverage re-add tests that check the server-side state after + // deleting. We need to figure out how these actually get cleaned up from + // the API namespace when this happens. - /* - * Once more, try to reboot it. This should not work on a destroyed - * instance. - */ + // Once more, try to reboot it. This should not work on a destroyed + // instance. NexusRequest::expect_failure( client, StatusCode::NOT_FOUND, @@ -391,9 +369,7 @@ async fn test_instances_create_reboot_halt( .await .unwrap(); - /* - * Similarly, we should not be able to start or stop the instance. - */ + // Similarly, we should not be able to start or stop the instance. NexusRequest::expect_failure( client, StatusCode::NOT_FOUND, @@ -485,12 +461,10 @@ async fn test_instances_delete_fails_when_running_succeeds_when_stopped( async fn test_instances_invalid_creation_returns_bad_request( cptestctx: &ControlPlaneTestContext, ) { - /* - * The rest of these examples attempt to create invalid instances. We don't - * do exhaustive tests of the model here -- those are part of unit tests -- - * but we exercise a few different types of errors to make sure those get - * passed through properly. - */ + // The rest of these examples attempt to create invalid instances. We don't + // do exhaustive tests of the model here -- those are part of unit tests -- + // but we exercise a few different types of errors to make sure those get + // passed through properly. let client = &cptestctx.external_client; let url_instances = format!( @@ -922,7 +896,7 @@ async fn test_instance_with_multiple_nics_unwinds_completely( ) { let client = &cptestctx.external_client; - /* Create a project that we'll use for testing. */ + // Create a project that we'll use for testing. create_organization(&client, ORGANIZATION_NAME).await; let url_instances = format!( "/organizations/{}/projects/{}/instances", @@ -1018,9 +992,7 @@ async fn instances_list( .all_items } -/** - * Convenience function for starting, stopping, or rebooting an instance. - */ +/// Convenience function for starting, stopping, or rebooting an instance. enum InstanceOp { Start, Stop, @@ -1070,12 +1042,10 @@ fn instances_eq(instance1: &Instance, instance2: &Instance) { ); } -/** - * Simulate completion of an ongoing instance state transition. To do this, we - * have to look up the instance, then get the sled agent associated with that - * instance, and then tell it to finish simulating whatever async transition is - * going on. - */ +/// Simulate completion of an ongoing instance state transition. To do this, we +/// have to look up the instance, then get the sled agent associated with that +/// instance, and then tell it to finish simulating whatever async transition is +/// going on. async fn instance_simulate(nexus: &Arc, id: &Uuid) { let sa = nexus.instance_sled_by_id(id).await.unwrap(); sa.instance_finish_transition(id.clone()).await; diff --git a/nexus/tests/integration_tests/organizations.rs b/nexus/tests/integration_tests/organizations.rs index a28407e800..915a8b6385 100644 --- a/nexus/tests/integration_tests/organizations.rs +++ b/nexus/tests/integration_tests/organizations.rs @@ -17,7 +17,7 @@ use nexus_test_utils_macros::nexus_test; async fn test_organizations(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - /* Create organizations that we'll use for testing. */ + // Create organizations that we'll use for testing. let o1_name = "test-org"; let o2_name = "oxidecomputer"; create_organization(&client, &o1_name).await; diff --git a/nexus/tests/integration_tests/projects.rs b/nexus/tests/integration_tests/projects.rs index 60ffe55962..7b230b1f73 100644 --- a/nexus/tests/integration_tests/projects.rs +++ b/nexus/tests/integration_tests/projects.rs @@ -17,7 +17,7 @@ async fn test_projects(cptestctx: &ControlPlaneTestContext) { let org_name = "test-org"; create_organization(&client, &org_name).await; - /* Create a project that we'll use for testing. */ + // Create a project that we'll use for testing. let p1_name = "springfield-squidport"; let p2_name = "cairo-airport"; let org_p1_id = @@ -33,7 +33,7 @@ async fn test_projects(cptestctx: &ControlPlaneTestContext) { let project: Project = project_get(&client, &p2_url).await; assert_eq!(project.identity.name, p2_name); - /* Verify the list of Projects. */ + // Verify the list of Projects. let projects_url = format!("/organizations/{}/projects", org_name); let projects = NexusRequest::iter_collection_authn::( &client, @@ -49,8 +49,8 @@ async fn test_projects(cptestctx: &ControlPlaneTestContext) { assert_eq!(projects[0].identity.name, p2_name); assert_eq!(projects[1].identity.name, p1_name); - /* Create a second organization and make sure we can have two projects with - * the same name across organizations */ + // Create a second organization and make sure we can have two projects with + // the same name across organizations let org2_name = "test-org2"; create_organization(&client, &org2_name).await; let org2_p1_id = diff --git a/nexus/tests/integration_tests/roles_builtin.rs b/nexus/tests/integration_tests/roles_builtin.rs index 9faf8b1bda..78cb1efdb2 100644 --- a/nexus/tests/integration_tests/roles_builtin.rs +++ b/nexus/tests/integration_tests/roles_builtin.rs @@ -53,7 +53,6 @@ async fn test_roles_builtin(cptestctx: &ControlPlaneTestContext) { // There's an empty page at the end of each dropshot scan. assert_eq!(roles.len() + 1, roles_paginated.npages); - // // Test GET /roles/$role_name // diff --git a/nexus/tests/integration_tests/subnet_allocation.rs b/nexus/tests/integration_tests/subnet_allocation.rs index ccc268fd46..47a0e5ef72 100644 --- a/nexus/tests/integration_tests/subnet_allocation.rs +++ b/nexus/tests/integration_tests/subnet_allocation.rs @@ -2,10 +2,8 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Tests that subnet allocation will successfully allocate the entire space of a - * subnet and error appropriately when the space is exhausted. - */ +//! Tests that subnet allocation will successfully allocate the entire space of a +//! subnet and error appropriately when the space is exhausted. use http::method::Method; use http::StatusCode; diff --git a/nexus/tests/integration_tests/unauthorized.rs b/nexus/tests/integration_tests/unauthorized.rs index c205766b06..979f3019e0 100644 --- a/nexus/tests/integration_tests/unauthorized.rs +++ b/nexus/tests/integration_tests/unauthorized.rs @@ -79,7 +79,6 @@ async fn test_unauthorized(cptestctx: &ControlPlaneTestContext) { } } -// // SETUP PHASE // @@ -256,7 +255,6 @@ lazy_static! { }; } -// // VERIFY PHASE // diff --git a/nexus/tests/integration_tests/updates.rs b/nexus/tests/integration_tests/updates.rs index 048bd146a1..9147200c9c 100644 --- a/nexus/tests/integration_tests/updates.rs +++ b/nexus/tests/integration_tests/updates.rs @@ -108,9 +108,9 @@ async fn static_content( rqctx: Arc>, path: Path, ) -> Result, HttpError> { - /* NOTE: this is a particularly brief and bad implementation of this to keep the test shorter. - * see https://github.com/oxidecomputer/dropshot/blob/main/dropshot/examples/file_server.rs for - * something more robust! */ + // NOTE: this is a particularly brief and bad implementation of this to keep the test shorter. + // see https://github.com/oxidecomputer/dropshot/blob/main/dropshot/examples/file_server.rs for + // something more robust! let mut fs_path = rqctx.context().base.clone(); for component in path.into_inner().path { fs_path.push(component); diff --git a/nexus/tests/integration_tests/vpc_firewall.rs b/nexus/tests/integration_tests/vpc_firewall.rs index c70bffd074..f0fcb92de3 100644 --- a/nexus/tests/integration_tests/vpc_firewall.rs +++ b/nexus/tests/integration_tests/vpc_firewall.rs @@ -26,7 +26,7 @@ use uuid::Uuid; async fn test_vpc_firewall(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - /* Create a project that we'll use for testing. */ + // Create a project that we'll use for testing. let org_name = "test-org"; create_organization(&client, &org_name).await; let project_name = "springfield-squidport"; diff --git a/nexus/tests/integration_tests/vpc_routers.rs b/nexus/tests/integration_tests/vpc_routers.rs index de65971a03..86aeb98d25 100644 --- a/nexus/tests/integration_tests/vpc_routers.rs +++ b/nexus/tests/integration_tests/vpc_routers.rs @@ -25,7 +25,7 @@ use omicron_nexus::external_api::params; async fn test_vpc_routers(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - /* Create a project that we'll use for testing. */ + // Create a project that we'll use for testing. let organization_name = "test-org"; let project_name = "springfield-squidport"; let vpcs_url = format!( @@ -35,7 +35,7 @@ async fn test_vpc_routers(cptestctx: &ControlPlaneTestContext) { create_organization(&client, organization_name).await; let _ = create_project(&client, organization_name, project_name).await; - /* Create a VPC. */ + // Create a VPC. let vpc_name = "vpc1"; let vpc = create_vpc(&client, organization_name, project_name, vpc_name).await; @@ -67,7 +67,7 @@ async fn test_vpc_routers(cptestctx: &ControlPlaneTestContext) { .unwrap(); assert_eq!(error.message, "not found: vpc-router with name \"router1\""); - /* Create a VPC Router. */ + // Create a VPC Router. let router = create_router( &client, organization_name, diff --git a/nexus/tests/integration_tests/vpc_subnets.rs b/nexus/tests/integration_tests/vpc_subnets.rs index e66192919b..a5f8348ca1 100644 --- a/nexus/tests/integration_tests/vpc_subnets.rs +++ b/nexus/tests/integration_tests/vpc_subnets.rs @@ -26,7 +26,7 @@ use serde_json::json; async fn test_vpc_subnets(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - /* Create a project that we'll use for testing. */ + // Create a project that we'll use for testing. let org_name = "test-org"; create_organization(&client, &org_name).await; let project_name = "springfield-squidport"; @@ -34,7 +34,7 @@ async fn test_vpc_subnets(cptestctx: &ControlPlaneTestContext) { format!("/organizations/{}/projects/{}/vpcs", org_name, project_name); let _ = create_project(&client, org_name, project_name).await; - /* Create a VPC. */ + // Create a VPC. let vpc_name = "vpc1"; let vpc = create_vpc(&client, org_name, project_name, vpc_name).await; @@ -79,7 +79,7 @@ async fn test_vpc_subnets(cptestctx: &ControlPlaneTestContext) { .unwrap(); assert_eq!(error.message, "not found: vpc-subnet with name \"subnet1\""); - /* Create a VPC Subnet. */ + // Create a VPC Subnet. let ipv4_block = Ipv4Net("10.0.0.0/24".parse().unwrap()); let other_ipv4_block = Ipv4Net("172.31.0.0/16".parse().unwrap()); // Create the first two available IPv6 address ranges. */ diff --git a/nexus/tests/integration_tests/vpcs.rs b/nexus/tests/integration_tests/vpcs.rs index 7322973750..0013887457 100644 --- a/nexus/tests/integration_tests/vpcs.rs +++ b/nexus/tests/integration_tests/vpcs.rs @@ -25,7 +25,7 @@ use omicron_nexus::external_api::{params, views::Vpc}; async fn test_vpcs(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - /* Create a project that we'll use for testing. */ + // Create a project that we'll use for testing. let org_name = "test-org"; create_organization(&client, &org_name).await; let project_name = "springfield-squidport"; @@ -36,14 +36,14 @@ async fn test_vpcs(cptestctx: &ControlPlaneTestContext) { let project_name2 = "pokemon"; let _ = create_project(&client, &org_name, &project_name2).await; - /* List vpcs. We see the default VPC, and nothing else. */ + // List vpcs. We see the default VPC, and nothing else. let mut vpcs = vpcs_list(&client, &vpcs_url).await; assert_eq!(vpcs.len(), 1); assert_eq!(vpcs[0].identity.name, "default"); assert_eq!(vpcs[0].dns_name, "default"); let default_vpc = vpcs.remove(0); - /* Make sure we get a 404 if we fetch or delete one. */ + // Make sure we get a 404 if we fetch or delete one. let vpc_url = format!("{}/just-rainsticks", vpcs_url); for method in &[Method::GET, Method::DELETE] { let error: HttpErrorResponseBody = NexusRequest::expect_failure( @@ -64,10 +64,8 @@ async fn test_vpcs(cptestctx: &ControlPlaneTestContext) { ); } - /* - * Make sure creating a VPC fails if we specify an IPv6 prefix that is - * not a valid ULA range. - */ + // Make sure creating a VPC fails if we specify an IPv6 prefix that is + // not a valid ULA range. let bad_prefix = Ipv6Net("2000:1000::/48".parse().unwrap()); NexusRequest::new( RequestBuilder::new(client, Method::POST, &vpcs_url) @@ -86,7 +84,7 @@ async fn test_vpcs(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); - /* Create a VPC. */ + // Create a VPC. let vpc_name = "just-rainsticks"; let vpc = create_vpc(&client, org_name, project_name, vpc_name).await; assert_eq!(vpc.identity.name, "just-rainsticks"); @@ -102,7 +100,7 @@ async fn test_vpcs(cptestctx: &ControlPlaneTestContext) { "Expected a ULA IPv6 address prefix" ); - /* Attempt to create a second VPC with a conflicting name. */ + // Attempt to create a second VPC with a conflicting name. let error = create_vpc_with_error( &client, org_name, @@ -113,22 +111,22 @@ async fn test_vpcs(cptestctx: &ControlPlaneTestContext) { .await; assert_eq!(error.message, "already exists: vpc \"just-rainsticks\""); - /* creating a VPC with the same name in another project works, though */ + // creating a VPC with the same name in another project works, though let vpc2: Vpc = create_vpc(&client, org_name, project_name2, vpc_name).await; assert_eq!(vpc2.identity.name, "just-rainsticks"); - /* List VPCs again and expect to find the one we just created. */ + // List VPCs again and expect to find the one we just created. let vpcs = vpcs_list(&client, &vpcs_url).await; assert_eq!(vpcs.len(), 2); vpcs_eq(&vpcs[0], &default_vpc); vpcs_eq(&vpcs[1], &vpc); - /* Fetch the VPC and expect it to match. */ + // Fetch the VPC and expect it to match. let vpc = vpc_get(&client, &vpc_url).await; vpcs_eq(&vpcs[1], &vpc); - /* Update the VPC */ + // Update the VPC let update_params = params::VpcUpdate { identity: IdentityMetadataUpdateParams { name: Some("new-name".parse().unwrap()), @@ -159,20 +157,20 @@ async fn test_vpcs(cptestctx: &ControlPlaneTestContext) { // new url with new name let vpc_url = format!("{}/new-name", vpcs_url); - /* Fetch the VPC again. It should have the updated properties. */ + // Fetch the VPC again. It should have the updated properties. let vpc = vpc_get(&client, &vpc_url).await; assert_eq!(vpc.identity.name, "new-name"); assert_eq!(vpc.identity.description, "another description"); assert_eq!(vpc.dns_name, "def"); - /* Delete the VPC. */ + // Delete the VPC. NexusRequest::object_delete(client, &vpc_url) .authn_as(AuthnMode::PrivilegedUser) .execute() .await .unwrap(); - /* Now we expect a 404 on fetch */ + // Now we expect a 404 on fetch let error: HttpErrorResponseBody = NexusRequest::expect_failure( client, StatusCode::NOT_FOUND, @@ -187,7 +185,7 @@ async fn test_vpcs(cptestctx: &ControlPlaneTestContext) { .unwrap(); assert_eq!(error.message, "not found: vpc with name \"new-name\""); - /* And the list should be empty (aside from default VPC) again */ + // And the list should be empty (aside from default VPC) again let vpcs = vpcs_list(&client, &vpcs_url).await; assert_eq!(vpcs.len(), 1); vpcs_eq(&vpcs[0], &default_vpc); diff --git a/oximeter/collector/tests/test_commands.rs b/oximeter/collector/tests/test_commands.rs index 92130f2f2d..d23abcd63a 100644 --- a/oximeter/collector/tests/test_commands.rs +++ b/oximeter/collector/tests/test_commands.rs @@ -14,17 +14,15 @@ use omicron_test_utils::dev::test_cmds::{ use openapiv3::OpenAPI; use subprocess::Exec; -/** name of the "oximeter" executable */ +/// name of the "oximeter" executable const CMD_OXIMETER: &str = env!("CARGO_BIN_EXE_oximeter"); fn path_to_oximeter() -> PathBuf { path_to_executable(CMD_OXIMETER) } -/** - * Write the requested string to a temporary file and return the path to that - * file. - */ +/// Write the requested string to a temporary file and return the path to that +/// file. fn write_config(config: &str) -> PathBuf { let file_path = temp_file_path("test_commands_config"); eprintln!("writing temp config: {}", file_path.display()); @@ -43,15 +41,13 @@ fn test_oximeter_no_args() { #[test] fn test_oximeter_openapi() { - /* - * This is a little goofy: we need a config file for the program. - * (Arguably, --openapi shouldn't require a config file, but it's - * conceivable that the API metadata or the exposed endpoints would depend - * on the configuration.) We ship a config file in "examples", and we may - * as well use it here -- it would be a bug if that one didn't work for this - * purpose. However, it's not clear how to reliably locate it at runtime. - * But we do know where it is at compile time, so we load it then. - */ + // This is a little goofy: we need a config file for the program. + // (Arguably, --openapi shouldn't require a config file, but it's + // conceivable that the API metadata or the exposed endpoints would depend + // on the configuration.) We ship a config file in "examples", and we may + // as well use it here -- it would be a bug if that one didn't work for this + // purpose. However, it's not clear how to reliably locate it at runtime. + // But we do know where it is at compile time, so we load it then. let config = include_str!("../../collector/config.toml"); let config_path = write_config(config); let exec = Exec::cmd(path_to_oximeter()).arg(&config_path).arg("--openapi"); @@ -63,16 +59,12 @@ fn test_oximeter_openapi() { let spec: OpenAPI = serde_json::from_str(&stdout_text) .expect("stdout was not valid OpenAPI"); - /* - * Check for lint errors. - */ + // Check for lint errors. let errors = openapi_lint::validate(&spec); assert!(errors.is_empty(), "{}", errors.join("\n\n")); - /* - * Confirm that the output hasn't changed. It's expected that we'll change - * this file as the API evolves, but pay attention to the diffs to ensure - * that the changes match your expectations. - */ + // Confirm that the output hasn't changed. It's expected that we'll change + // this file as the API evolves, but pay attention to the diffs to ensure + // that the changes match your expectations. assert_contents("../../openapi/oximeter.json", &stdout_text); } diff --git a/package/src/bin/omicron-package.rs b/package/src/bin/omicron-package.rs index 6d8fbee415..e7bd7f5b7d 100644 --- a/package/src/bin/omicron-package.rs +++ b/package/src/bin/omicron-package.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Utility for bundling target binaries as tarfiles. - */ +//! Utility for bundling target binaries as tarfiles. use anyhow::{anyhow, bail, Context, Result}; use omicron_package::{parse, SubCommand}; diff --git a/rpaths/src/lib.rs b/rpaths/src/lib.rs index 23e7b6b130..903c6df184 100644 --- a/rpaths/src/lib.rs +++ b/rpaths/src/lib.rs @@ -2,113 +2,107 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Build-time crate for configuring RPATHs for Omicron binaries - * - * ## The least you need to know - * - * This build-time crate is used by several top-level Omicron crates to set - * RPATH so that libpq can be found at runtime. This is necessary because these - * crates depend on "diesel", which depends on "pq-sys", which links in "libpq". - * But Cargo/Rust have no built-in way to set the RPATH so that libpq can - * actually be found at runtime. (See below.) So we've developed the pattern - * here instead. It works like this: - * - * 1. Any crate that depends on pq-sys, directly or not, needs to follow these - * instructions. Generally, we depend on pq-sys _indirectly_, by virtue of - * depending on Diesel. - * 2. Affected crates (e.g., omicron-nexus) have a build.rs that just calls - * `omicron_rpath::configure_default_omicron_rpaths()`. - * 3. These crates must also add a dependency on "pq-sys", usually version "*". - * (This dependency is unfortunate but necessary in order for us to get the - * metadata emitted by pq-sys that tells it where it found libpq. Since we - * don't directly use pq-sys in the crate, we don't care what version it is. - * We specify "*" so that when Cargo dedups our dependency with the one in - * Diesel, we pick up whatever would be picked up anyway, and we'll get its - * metadata.) - * 4. At the top level of Omicron (in the workspace Cargo.toml), we use a - * patched version of pq-sys that emits metadata that's used by - * `configure_default_omicron_rpaths()`. - * - * This crate is factored (over-engineered, really) so that we can extend this - * pattern to other native libraries in the future. - * - * ## More details - * - * On Unix-like systems, executable binaries and shared libraries can have - * shared library dependencies. When a binary is loaded (whether it's an - * executable or a shared library), the runtime linker (ld.so) locates these - * dependencies using a combination of environment variables, system - * configuration, and RPATH entries embedded in the binary itself. While this - * process is well-defined, using it correctly can be deceptively tricky and - * it's often a source of considerable frustration. - * - * As of this writing, Cargo has no first-class way to configure the RPATH for - * binaries that it builds. This is covered by [rust-lang/cargo#5077][1]. This - * comes up most often for people trying to expose a native library to Rust, as - * via [*-sys packages][2]. Typically a Rust program that uses one of these - * packages will wind up with no RPATH entries. This will work if, at runtime, - * the library happens to be in one of the runtime linker's default search paths - * (e.g., /usr/lib). This is commonly the case if the library came from the - * system package manager. But in general, the library might be in some other - * path, and you would need to specify LD_LIBRARY_PATH every time you run the - * program in order for the linker to find the library. Using LD_LIBRARY_PATH - * like this is discouraged because it affects more than just the program you're - * running -- it affects everything that inherits the variable. You're supposed - * to include RPATH entries in the binary instead. - * - * As of 1.56, Cargo supports the "cargo:rustc-link-arg" instruction for use by - * [Build Scripts][3] to pass arbitrary options to the linker. We use that here - * to tell the linker to include the correct RPATH entry for our one native - * dependency that's affected by this (libpq, exposed via the pq-sys package). - * - * A subtle but critical point here is that the RPATH is knowable only by the - * system that's building the top-level executable binary. This mechanism can't - * go into the *-sys package that wraps the native library because that package - * cannot know where the library will be found at runtime. Only whoever (or - * whatever) is building the software knows that. Further, Cargo provides no - * mechanism for a package to emit linker arguments used when building its - * dependents. For more discussion on this, see [rust-lang/cargo#9554][4]. - * - * So we need to emit the linker argument here. How do we know what value to - * use? We take the approach used by most build systems: we use the path where - * the library was found on the build machine. But how do we know where that - * was? *-sys packages have non-trivial mechanisms for locating the desired - * library. We don't want to duplicate those here. Instead, we make use of - * metadata emitted by those build scripts, which shows up as "DEP_*" - * environment variables for us. - * - * **Important note:** In order for us to have metadata for these dependencies, - * we must *directly* depend on them. This may mean adding an otherwise unused - * dependency from the top-level package to the *-sys package. - * - * (In the limit, it may be wrong for us to use the same path that was used to - * locate the library on the build machine. We might want to bundle all of - * these libraries and use something like `$ORIGIN/../lib`. We could generalize - * the mechanism here to pass whatever path we want, possibly specified by some - * other environment variable like OMICRON_BUILD_RPATH.) - * - * [1]: https://github.com/rust-lang/cargo/issues/5077 - * [2]: https://doc.rust-lang.org/cargo/reference/build-scripts.html#-sys-packages - * [3]: https://doc.rust-lang.org/cargo/reference/build-scripts.html - * [4]: https://github.com/rust-lang/cargo/issues/9554 - */ +//! Build-time crate for configuring RPATHs for Omicron binaries +//! +//! ## The least you need to know +//! +//! This build-time crate is used by several top-level Omicron crates to set +//! RPATH so that libpq can be found at runtime. This is necessary because these +//! crates depend on "diesel", which depends on "pq-sys", which links in "libpq". +//! But Cargo/Rust have no built-in way to set the RPATH so that libpq can +//! actually be found at runtime. (See below.) So we've developed the pattern +//! here instead. It works like this: +//! +//! 1. Any crate that depends on pq-sys, directly or not, needs to follow these +//! instructions. Generally, we depend on pq-sys _indirectly_, by virtue of +//! depending on Diesel. +//! 2. Affected crates (e.g., omicron-nexus) have a build.rs that just calls +//! `omicron_rpath::configure_default_omicron_rpaths()`. +//! 3. These crates must also add a dependency on "pq-sys", usually version "*". +//! (This dependency is unfortunate but necessary in order for us to get the +//! metadata emitted by pq-sys that tells it where it found libpq. Since we +//! don't directly use pq-sys in the crate, we don't care what version it is. +//! We specify "*" so that when Cargo dedups our dependency with the one in +//! Diesel, we pick up whatever would be picked up anyway, and we'll get its +//! metadata.) +//! 4. At the top level of Omicron (in the workspace Cargo.toml), we use a +//! patched version of pq-sys that emits metadata that's used by +//! `configure_default_omicron_rpaths()`. +//! +//! This crate is factored (over-engineered, really) so that we can extend this +//! pattern to other native libraries in the future. +//! +//! ## More details +//! +//! On Unix-like systems, executable binaries and shared libraries can have +//! shared library dependencies. When a binary is loaded (whether it's an +//! executable or a shared library), the runtime linker (ld.so) locates these +//! dependencies using a combination of environment variables, system +//! configuration, and RPATH entries embedded in the binary itself. While this +//! process is well-defined, using it correctly can be deceptively tricky and +//! it's often a source of considerable frustration. +//! +//! As of this writing, Cargo has no first-class way to configure the RPATH for +//! binaries that it builds. This is covered by [rust-lang/cargo#5077][1]. This +//! comes up most often for people trying to expose a native library to Rust, as +//! via [*-sys packages][2]. Typically a Rust program that uses one of these +//! packages will wind up with no RPATH entries. This will work if, at runtime, +//! the library happens to be in one of the runtime linker's default search paths +//! (e.g., /usr/lib). This is commonly the case if the library came from the +//! system package manager. But in general, the library might be in some other +//! path, and you would need to specify LD_LIBRARY_PATH every time you run the +//! program in order for the linker to find the library. Using LD_LIBRARY_PATH +//! like this is discouraged because it affects more than just the program you're +//! running -- it affects everything that inherits the variable. You're supposed +//! to include RPATH entries in the binary instead. +//! +//! As of 1.56, Cargo supports the "cargo:rustc-link-arg" instruction for use by +//! [Build Scripts][3] to pass arbitrary options to the linker. We use that here +//! to tell the linker to include the correct RPATH entry for our one native +//! dependency that's affected by this (libpq, exposed via the pq-sys package). +//! +//! A subtle but critical point here is that the RPATH is knowable only by the +//! system that's building the top-level executable binary. This mechanism can't +//! go into the *-sys package that wraps the native library because that package +//! cannot know where the library will be found at runtime. Only whoever (or +//! whatever) is building the software knows that. Further, Cargo provides no +//! mechanism for a package to emit linker arguments used when building its +//! dependents. For more discussion on this, see [rust-lang/cargo#9554][4]. +//! +//! So we need to emit the linker argument here. How do we know what value to +//! use? We take the approach used by most build systems: we use the path where +//! the library was found on the build machine. But how do we know where that +//! was? *-sys packages have non-trivial mechanisms for locating the desired +//! library. We don't want to duplicate those here. Instead, we make use of +//! metadata emitted by those build scripts, which shows up as "DEP_*" +//! environment variables for us. +//! +//! **Important note:** In order for us to have metadata for these dependencies, +//! we must *directly* depend on them. This may mean adding an otherwise unused +//! dependency from the top-level package to the *-sys package. +//! +//! (In the limit, it may be wrong for us to use the same path that was used to +//! locate the library on the build machine. We might want to bundle all of +//! these libraries and use something like `$ORIGIN/../lib`. We could generalize +//! the mechanism here to pass whatever path we want, possibly specified by some +//! other environment variable like OMICRON_BUILD_RPATH.) +//! +//! [1]: https://github.com/rust-lang/cargo/issues/5077 +//! [2]: https://doc.rust-lang.org/cargo/reference/build-scripts.html#-sys-packages +//! [3]: https://doc.rust-lang.org/cargo/reference/build-scripts.html +//! [4]: https://github.com/rust-lang/cargo/issues/9554 -/** - * Tells Cargo to pass linker arguments that specify the right RPATH for Omicron - * binaries - */ -/* - * This currently assumes that all Omicron binaries link to the same set of - * native libraries. As a result, we use a fixed list of libraries. In the - * future, if they depend on different combinations, we can accept different - * arguments here that specify exactly which ones are expected to be found. - */ +/// Tells Cargo to pass linker arguments that specify the right RPATH for Omicron +/// binaries +// This currently assumes that all Omicron binaries link to the same set of +// native libraries. As a result, we use a fixed list of libraries. In the +// future, if they depend on different combinations, we can accept different +// arguments here that specify exactly which ones are expected to be found. pub fn configure_default_omicron_rpaths() { internal::configure_default_omicron_rpaths(); } -/* None of this behavior is needed on MacOS. */ +// None of this behavior is needed on MacOS. #[cfg(not(any(target_os = "illumos", target_os = "linux")))] mod internal { pub fn configure_default_omicron_rpaths() {} @@ -131,45 +125,39 @@ mod internal { } } - /** - * Environment variables that contain RPATHs we want to use in our built - * binaries - * - * These environment variables are set by Cargo based on metadata emitted by - * our dependencies' build scripts. Since a particular dependency could use - * multiple libraries in different paths, each of these environment - * variables may itself look like a path, not just a directory. That is, - * these are colon-separated lists of directories. - * - * Currently, we only do this for libpq ("pq-sys" package), but this pattern - * could be generalized for other native libraries. - */ + /// Environment variables that contain RPATHs we want to use in our built + /// binaries + /// + /// These environment variables are set by Cargo based on metadata emitted by + /// our dependencies' build scripts. Since a particular dependency could use + /// multiple libraries in different paths, each of these environment + /// variables may itself look like a path, not just a directory. That is, + /// these are colon-separated lists of directories. + /// + /// Currently, we only do this for libpq ("pq-sys" package), but this pattern + /// could be generalized for other native libraries. pub static RPATH_ENV_VARS: &'static [&'static str] = &["DEP_PQ_LIBDIRS"]; - /** - * Tells Cargo to pass linker arguments that specify RPATHs from the - * environment variable `env_var_name` - * - * Panics if the environment variable is not set or contains non-UTF8 data. - * This might be surprising, since environment variables are optional in - * most build-time mechanisms. We opt for strictness here because in fact - * we _do_ expect these to always be set, and if they're not, it's most - * likely that somebody has forgotten to include a required dependency. We - * want to tell them that rather than silently produce unrunnable binaries. - */ + /// Tells Cargo to pass linker arguments that specify RPATHs from the + /// environment variable `env_var_name` + /// + /// Panics if the environment variable is not set or contains non-UTF8 data. + /// This might be surprising, since environment variables are optional in + /// most build-time mechanisms. We opt for strictness here because in fact + /// we _do_ expect these to always be set, and if they're not, it's most + /// likely that somebody has forgotten to include a required dependency. We + /// want to tell them that rather than silently produce unrunnable binaries. pub fn configure_rpaths_from_env_var( rpaths: &mut Vec, env_var_name: &OsStr, ) { - /* - * If you see this message, that means that the build script for some - * Omicron crate is trying to configure RPATHs for a native library, but - * the environment variable that's supposed to contain the RPATH - * information for that library is unset. That most likely means that - * the crate you're building is lacking a direct dependency on the - * '*-sys' crate, or else that the '*-sys' crate's build script failed - * to set this metadata. - */ + // If you see this message, that means that the build script for some + // Omicron crate is trying to configure RPATHs for a native library, but + // the environment variable that's supposed to contain the RPATH + // information for that library is unset. That most likely means that + // the crate you're building is lacking a direct dependency on the + // '*-sys' crate, or else that the '*-sys' crate's build script failed + // to set this metadata. let env_var_value = std::env::var_os(env_var_name).unwrap_or_else(|| { panic!( @@ -187,10 +175,8 @@ mod internal { ); } - /** - * Given a colon-separated list of paths in `env_var_value`, append to - * `rpaths` the same list of paths. - */ + /// Given a colon-separated list of paths in `env_var_value`, append to + /// `rpaths` the same list of paths. fn configure_rpaths_from_path( rpaths: &mut Vec, env_var_value: &OsStr, @@ -204,10 +190,8 @@ mod internal { Ok(()) } - /** - * Emits the Cargo instruction for a given RPATH. This is only separated - * out to make different parts of this module easier to test. - */ + /// Emits the Cargo instruction for a given RPATH. This is only separated + /// out to make different parts of this module easier to test. pub fn emit_rpath(path_str: &str) -> String { format!("cargo:rustc-link-arg=-Wl,-R{}", path_str) } diff --git a/sled-agent-client/src/lib.rs b/sled-agent-client/src/lib.rs index 6a5517b935..73a9bda0d6 100644 --- a/sled-agent-client/src/lib.rs +++ b/sled-agent-client/src/lib.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Interface for making API requests to a Sled Agent - */ +//! Interface for making API requests to a Sled Agent use async_trait::async_trait; use omicron_common::generate_logging_api; @@ -252,10 +250,8 @@ impl From } } -/** - * Exposes additional [`Client`] interfaces for use by the test suite. These - * are bonus endpoints, not generated in the real client. - */ +/// Exposes additional [`Client`] interfaces for use by the test suite. These +/// are bonus endpoints, not generated in the real client. #[async_trait] pub trait TestInterfaces { async fn instance_finish_transition(&self, id: Uuid); diff --git a/sled-agent/src/bin/sled-agent-sim.rs b/sled-agent/src/bin/sled-agent-sim.rs index c4c1a4a9d9..cc7b3ddf7f 100644 --- a/sled-agent/src/bin/sled-agent-sim.rs +++ b/sled-agent/src/bin/sled-agent-sim.rs @@ -2,13 +2,9 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Executable program to run a simulated sled agent - */ +//! Executable program to run a simulated sled agent -/* - * TODO see the TODO for nexus. - */ +// TODO see the TODO for nexus. use dropshot::ConfigDropshot; use dropshot::ConfigLogging; diff --git a/sled-agent/src/bootstrap/config.rs b/sled-agent/src/bootstrap/config.rs index 339ecb3613..15ab42f824 100644 --- a/sled-agent/src/bootstrap/config.rs +++ b/sled-agent/src/bootstrap/config.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Interfaces for working with bootstrap agent configuration - */ +//! Interfaces for working with bootstrap agent configuration use crate::config::ConfigError; use crate::params::{DatasetEnsureBody, ServiceRequest}; diff --git a/sled-agent/src/bootstrap/trust_quorum/mod.rs b/sled-agent/src/bootstrap/trust_quorum/mod.rs index 552faf2598..fe8b9f7bcc 100644 --- a/sled-agent/src/bootstrap/trust_quorum/mod.rs +++ b/sled-agent/src/bootstrap/trust_quorum/mod.rs @@ -28,7 +28,6 @@ //! || --------- Request Share ------------> || //! || || //! || <----------- Share ------------------ || -//! mod client; mod error; diff --git a/sled-agent/src/bootstrap/trust_quorum/rack_secret.rs b/sled-agent/src/bootstrap/trust_quorum/rack_secret.rs index ce526613b2..2a801b2fe3 100644 --- a/sled-agent/src/bootstrap/trust_quorum/rack_secret.rs +++ b/sled-agent/src/bootstrap/trust_quorum/rack_secret.rs @@ -58,7 +58,6 @@ impl Eq for RackSecret {} /// We use verifiable secret sharing to detect invalid shares from being /// combined and generating an incorrect secret. Each share must be verified /// before the secret is reconstructed. -// // This is just a wrapper around a FeldmanVerifier from the vsss-rs crate. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Verifier { diff --git a/sled-agent/src/illumos/zone.rs b/sled-agent/src/illumos/zone.rs index 2b04243aaa..9baaefa517 100644 --- a/sled-agent/src/illumos/zone.rs +++ b/sled-agent/src/illumos/zone.rs @@ -161,7 +161,8 @@ impl Zones { info!(log, "Configuring new Omicron zone: {}", zone_name); let mut cfg = zone::Config::create( zone_name, - /* overwrite= */ true, + // overwrite= + true, zone::CreationOptions::Blank, ); let path = format!("{}/{}", ZONE_ZFS_DATASET_MOUNTPOINT, zone_name); diff --git a/sled-agent/src/instance.rs b/sled-agent/src/instance.rs index 4bafe69f80..fe202861cf 100644 --- a/sled-agent/src/instance.rs +++ b/sled-agent/src/instance.rs @@ -460,7 +460,8 @@ impl Instance { &inner.vnic_allocator, "propolis-server", Some(&inner.propolis_id().to_string()), - /* dataset= */ &[], + // dataset= + &[], &[ zone::Device { name: "/dev/vmm/*".to_string() }, zone::Device { name: "/dev/vmmctl".to_string() }, diff --git a/sled-agent/src/params.rs b/sled-agent/src/params.rs index 0b7bb347ef..c691d90eac 100644 --- a/sled-agent/src/params.rs +++ b/sled-agent/src/params.rs @@ -12,7 +12,7 @@ use std::fmt::{Debug, Display, Formatter, Result as FormatResult}; use std::net::SocketAddr; use uuid::Uuid; -///Used to request a Disk state change +/// Used to request a Disk state change #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize, JsonSchema)] #[serde(rename_all = "lowercase", tag = "state", content = "instance")] pub enum DiskStateRequested { diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index 0e2dd2c358..f13f1e1863 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -109,10 +109,14 @@ impl ServiceManager { &self.log, &self.vnic_allocator, &service.name, - /* unique_name= */ None, - /* dataset= */ &[], - /* devices= */ &[], - /* vnics= */ vec![], + // unique_name= + None, + // dataset= + &[], + // devices= + &[], + // vnics= + vec![], ) .await?; diff --git a/sled-agent/src/sim/collection.rs b/sled-agent/src/sim/collection.rs index c156adb05d..f2f4fd8aca 100644 --- a/sled-agent/src/sim/collection.rs +++ b/sled-agent/src/sim/collection.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Simulated sled agent object collection - */ +//! Simulated sled agent object collection use super::config::SimMode; @@ -22,12 +20,10 @@ use uuid::Uuid; use super::simulatable::Simulatable; -/** - * Simulates an object of type `S: Simulatable`. - * - * Much of the simulation logic is commonized here in `SimObject` rather than - * separately in the specific `Simulatable` types. - */ +/// Simulates an object of type `S: Simulatable`. +/// +/// Much of the simulation logic is commonized here in `SimObject` rather than +/// separately in the specific `Simulatable` types. #[derive(Debug)] struct SimObject { /// The simulated object. @@ -38,35 +34,31 @@ struct SimObject { channel_tx: Option>, } -/** - * Buffer size for channel used to communicate with each `SimObject`'s - * background task - * - * Messages sent on this channel trigger the task to simulate an asynchronous - * state transition by sleeping for some interval and then updating the object - * state. When the background task updates the object state after sleeping, it - * always looks at the current state to decide what to do. As a result, we - * never need to queue up more than one transition. In turn, that means we - * don't need (or want) a channel buffer larger than 1. If we were to queue up - * multiple messages in the buffer, the net effect would be exactly the same as - * if just one message were queued. (Because of what we said above, as part of - * processing that message, the receiver will wind up handling all state - * transitions requested up to the point where the first message is read. If - * another transition is requested after that point, another message will be - * enqueued and the receiver will process that transition then. There's no need - * to queue more than one message.) Even stronger: we don't want a larger - * buffer because that would only cause extra laps through the sleep cycle, - * which just wastes resources and increases the latency for processing the next - * real transition request. - */ +/// Buffer size for channel used to communicate with each `SimObject`'s +/// background task +/// +/// Messages sent on this channel trigger the task to simulate an asynchronous +/// state transition by sleeping for some interval and then updating the object +/// state. When the background task updates the object state after sleeping, it +/// always looks at the current state to decide what to do. As a result, we +/// never need to queue up more than one transition. In turn, that means we +/// don't need (or want) a channel buffer larger than 1. If we were to queue up +/// multiple messages in the buffer, the net effect would be exactly the same as +/// if just one message were queued. (Because of what we said above, as part of +/// processing that message, the receiver will wind up handling all state +/// transitions requested up to the point where the first message is read. If +/// another transition is requested after that point, another message will be +/// enqueued and the receiver will process that transition then. There's no need +/// to queue more than one message.) Even stronger: we don't want a larger +/// buffer because that would only cause extra laps through the sleep cycle, +/// which just wastes resources and increases the latency for processing the next +/// real transition request. const SIM_CHANNEL_BUFFER_SIZE: usize = 0; impl SimObject { - /** - * Create a new `SimObject` with async state transitions automatically - * simulated by a background task. The caller is expected to provide the - * background task that reads from the channel and advances the simulation. - */ + /// Create a new `SimObject` with async state transitions automatically + /// simulated by a background task. The caller is expected to provide the + /// background task that reads from the channel and advances the simulation. fn new_simulated_auto( initial_state: &S::CurrentState, log: Logger, @@ -83,12 +75,10 @@ impl SimObject { ) } - /** - * Create a new `SimObject` with state transitions simulated by explicit - * calls. The only difference from the perspective of this struct is that - * we won't have a channel to which we send notifications when asynchronous - * state transitions begin. - */ + /// Create a new `SimObject` with state transitions simulated by explicit + /// calls. The only difference from the perspective of this struct is that + /// we won't have a channel to which we send notifications when asynchronous + /// state transitions begin. fn new_simulated_explicit( initial_state: &S::CurrentState, log: Logger, @@ -101,11 +91,9 @@ impl SimObject { } } - /** - * Begin a transition to the requested object state `target`. On success, - * returns whatever requested state change was dropped (because it was - * replaced), if any. This is mainly used for testing. - */ + /// Begin a transition to the requested object state `target`. On success, + /// returns whatever requested state change was dropped (because it was + /// replaced), if any. This is mainly used for testing. fn transition( &mut self, target: S::RequestedState, @@ -126,27 +114,25 @@ impl SimObject { "action" => ?action, ); - /* - * If this is an asynchronous transition, notify the background task to - * simulate it. There are a few possible error cases: - * - * (1) We fail to send the message because the channel's buffer is full. - * All we need to guarantee in the first place is that the receiver - * will receive a message at least once after this function is - * invoked. If there's already a message in the buffer, we don't - * need to do anything else to achieve that. - * - * (2) We fail to send the message because the channel is disconnected. - * This would be a programmer error -- the contract between us and - * the receiver is that we shut down the channel first. As a - * result, we panic if we find this case. - * - * (3) We failed to send the message for some other reason. This - * appears impossible at the time of this writing. It would be - * nice if the returned error type were implemented in a way that we - * could identify this case at compile time (e.g., using an enum), - * but that's not currently the case. - */ + // If this is an asynchronous transition, notify the background task to + // simulate it. There are a few possible error cases: + // + // (1) We fail to send the message because the channel's buffer is full. + // All we need to guarantee in the first place is that the receiver + // will receive a message at least once after this function is + // invoked. If there's already a message in the buffer, we don't + // need to do anything else to achieve that. + // + // (2) We fail to send the message because the channel is disconnected. + // This would be a programmer error -- the contract between us and + // the receiver is that we shut down the channel first. As a + // result, we panic if we find this case. + // + // (3) We failed to send the message for some other reason. This + // appears impossible at the time of this writing. It would be + // nice if the returned error type were implemented in a way that we + // could identify this case at compile time (e.g., using an enum), + // but that's not currently the case. if self.object.desired().is_some() { if let Some(ref mut tx) = self.channel_tx { let result = tx.try_send(()); @@ -174,25 +160,23 @@ impl SimObject { } } -/** - * A collection of `Simulatable` objects, each represented by a `SimObject` - * - * This struct provides basic facilities for simulating SledAgent APIs for - * instances and disks. - */ +/// A collection of `Simulatable` objects, each represented by a `SimObject` +/// +/// This struct provides basic facilities for simulating SledAgent APIs for +/// instances and disks. pub struct SimCollection { - /** handle to the Nexus API, used to notify about async transitions */ + /// handle to the Nexus API, used to notify about async transitions nexus_client: Arc, - /** logger for this collection */ + /// logger for this collection log: Logger, - /** simulation mode: automatic (timer-based) or explicit (using an API) */ + /// simulation mode: automatic (timer-based) or explicit (using an API) sim_mode: SimMode, - /** list of objects being simulated */ + /// list of objects being simulated objects: Mutex>>, } impl SimCollection { - /** Returns a new collection of simulated objects. */ + /// Returns a new collection of simulated objects. pub fn new( nexus_client: Arc, log: Logger, @@ -206,14 +190,12 @@ impl SimCollection { } } - /** - * Body of the background task (one per `SimObject`) that simulates - * asynchronous transitions. Each time we read a message from the object's - * channel, we sleep for a bit and then invoke `poke()` to complete whatever - * transition is currently outstanding. - * - * This is only used for `SimMode::Auto`. - */ + /// Body of the background task (one per `SimObject`) that simulates + /// asynchronous transitions. Each time we read a message from the object's + /// channel, we sleep for a bit and then invoke `poke()` to complete whatever + /// transition is currently outstanding. + /// + /// This is only used for `SimMode::Auto`. async fn sim_step(&self, id: Uuid, mut rx: Receiver<()>) { while rx.next().await.is_some() { tokio::time::sleep(Duration::from_millis(1500)).await; @@ -221,24 +203,20 @@ impl SimCollection { } } - /** - * Complete a desired asynchronous state transition for object `id`. - * This is invoked either by `sim_step()` (if the simulation mode is - * `SimMode::Auto`) or `instance_finish_transition` (if the simulation mode - * is `SimMode::Api). - */ + /// Complete a desired asynchronous state transition for object `id`. + /// This is invoked either by `sim_step()` (if the simulation mode is + /// `SimMode::Auto`) or `instance_finish_transition` (if the simulation mode + /// is `SimMode::Api). pub async fn sim_poke(&self, id: Uuid) { let (new_state, to_destroy) = { - /* - * The object must be present in `objects` because it only gets - * removed when it comes to rest in the "Destroyed" state, but we - * can only get here if there's an asynchronous state transition - * desired. - * - * We do as little as possible with the lock held. In particular, - * we want to finish this work before calling out to notify the - * nexus. - */ + // The object must be present in `objects` because it only gets + // removed when it comes to rest in the "Destroyed" state, but we + // can only get here if there's an asynchronous state transition + // desired. + // + // We do as little as possible with the lock held. In particular, + // we want to finish this work before calling out to notify the + // nexus. let mut objects = self.objects.lock().await; let mut object = objects.remove(&id).unwrap(); object.transition_finish(); @@ -253,22 +231,18 @@ impl SimCollection { } }; - /* - * Notify Nexus that the object's state has changed. - * TODO-robustness: If this fails, we need to put it on some list of - * updates to retry later. - */ + // Notify Nexus that the object's state has changed. + // TODO-robustness: If this fails, we need to put it on some list of + // updates to retry later. S::notify(&self.nexus_client, &id, new_state).await.unwrap(); - /* - * If the object came to rest destroyed, complete any async cleanup - * needed now. - * TODO-debug It would be nice to have visibility into objects that - * are cleaning up in case we have to debug resource leaks here. - * TODO-correctness Is it a problem that nobody waits on the background - * task? If we did it here, we'd deadlock, since we're invoked from the - * background task. - */ + // If the object came to rest destroyed, complete any async cleanup + // needed now. + // TODO-debug It would be nice to have visibility into objects that + // are cleaning up in case we have to debug resource leaks here. + // TODO-correctness Is it a problem that nobody waits on the background + // task? If we did it here, we'd deadlock, since we're invoked from the + // background task. if let Some(destroyed_object) = to_destroy { if let Some(mut tx) = destroyed_object.channel_tx { tx.close_channel(); @@ -276,22 +250,20 @@ impl SimCollection { } } - /** - * Move the object identified by `id` from its current state to the - * requested state `target`. The object does not need to exist already; if - * not, it will be created from `current`. (This is the only case where - * `current` is used.) - * - * This call is idempotent; it will take whatever actions are necessary - * (if any) to create the object and move it to the requested state. - * - * This function returns the updated state, but note that this may not be - * the requested state in the event that the transition is asynchronous. - * For example, if an Instance is "stopped", and the requested state is - * "running", the returned state will be "starting". Subsequent - * asynchronous state transitions are reported via the notify() functions on - * the `NexusClient` object. - */ + /// Move the object identified by `id` from its current state to the + /// requested state `target`. The object does not need to exist already; if + /// not, it will be created from `current`. (This is the only case where + /// `current` is used.) + /// + /// This call is idempotent; it will take whatever actions are necessary + /// (if any) to create the object and move it to the requested state. + /// + /// This function returns the updated state, but note that this may not be + /// the requested state in the event that the transition is asynchronous. + /// For example, if an Instance is "stopped", and the requested state is + /// "running", the returned state will be "starting". Subsequent + /// asynchronous state transitions are reported via the notify() functions on + /// the `NexusClient` object. pub async fn sim_ensure( self: &Arc, id: &Uuid, @@ -304,7 +276,7 @@ impl SimCollection { if let Some(current_object) = maybe_current_object { (current_object, false) } else { - /* Create a new SimObject */ + // Create a new SimObject let idc = *id; let log = self.log.new(o!("id" => idc.to_string())); @@ -401,10 +373,8 @@ mod test { assert_eq!(r1.run_state, InstanceState::Creating); assert_eq!(r1.gen, Generation::new()); - /* - * There's no asynchronous transition going on yet so a - * transition_finish() shouldn't change anything. - */ + // There's no asynchronous transition going on yet so a + // transition_finish() shouldn't change anything. assert!(instance.object.desired().is_none()); instance.transition_finish(); assert!(instance.object.desired().is_none()); @@ -413,11 +383,9 @@ mod test { assert_eq!(r1.gen, instance.object.current().gen); assert!(rx.try_next().is_err()); - /* - * We should be able to transition immediately to any other stopped - * state. We can't do this for "Creating" because transition() treats - * that as a transition to "Running". - */ + // We should be able to transition immediately to any other stopped + // state. We can't do this for "Creating" because transition() treats + // that as a transition to "Running". let stopped_states = vec![ InstanceStateRequested::Stopped, InstanceStateRequested::Destroyed, @@ -451,11 +419,9 @@ mod test { logctx.cleanup_successful(); } - /** - * Tests a SimInstance which transitions to running and is subsequently destroyed. - * This test observes an intermediate transition through "stopping" to - * accomplish this goal. - */ + /// Tests a SimInstance which transitions to running and is subsequently destroyed. + /// This test observes an intermediate transition through "stopping" to + /// accomplish this goal. #[tokio::test] async fn test_sim_instance_running_then_destroyed() { let logctx = test_setup_log("test_sim_instance_running_then_destroyed"); @@ -466,10 +432,8 @@ mod test { assert_eq!(r1.run_state, InstanceState::Creating); assert_eq!(r1.gen, Generation::new()); - /* - * There's no asynchronous transition going on yet so a - * transition_finish() shouldn't change anything. - */ + // There's no asynchronous transition going on yet so a + // transition_finish() shouldn't change anything. assert!(instance.object.desired().is_none()); instance.transition_finish(); assert!(instance.object.desired().is_none()); @@ -478,10 +442,8 @@ mod test { assert_eq!(r1.gen, instance.object.current().gen); assert!(rx.try_next().is_err()); - /* - * Now, if we transition to "Running", we must go through the async - * process. - */ + // Now, if we transition to "Running", we must go through the async + // process. let mut rprev = r1; assert!(rx.try_next().is_err()); let dropped = instance @@ -513,10 +475,8 @@ mod test { let rnext = instance.object.current().clone(); assert_eq!(rprev.gen, rnext.gen); - /* - * If we transition again to "Running", the process should complete - * immediately. - */ + // If we transition again to "Running", the process should complete + // immediately. assert!(!rprev.run_state.is_stopped()); let dropped = instance .transition(InstanceRuntimeStateRequested { @@ -533,10 +493,8 @@ mod test { assert_eq!(rnext.run_state, rprev.run_state); rprev = rnext; - /* - * If we go back to any stopped state, we go through the async process - * again. - */ + // If we go back to any stopped state, we go through the async process + // again. assert!(!rprev.run_state.is_stopped()); assert!(rx.try_next().is_err()); let dropped = instance @@ -578,10 +536,8 @@ mod test { assert_eq!(r1.run_state, InstanceState::Creating); assert_eq!(r1.gen, Generation::new()); - /* - * There's no asynchronous transition going on yet so a - * transition_finish() shouldn't change anything. - */ + // There's no asynchronous transition going on yet so a + // transition_finish() shouldn't change anything. assert!(instance.object.desired().is_none()); instance.transition_finish(); assert!(instance.object.desired().is_none()); @@ -590,18 +546,14 @@ mod test { assert_eq!(r1.gen, instance.object.current().gen); assert!(rx.try_next().is_err()); - /* - * Now, if we transition to "Running", we must go through the async - * process. - */ + // Now, if we transition to "Running", we must go through the async + // process. let mut rprev = r1; - /* - * Now let's test the behavior of dropping a transition. We'll start - * transitioning back to "Running". Then, while we're still in - * "Starting", will transition back to "Destroyed". We should - * immediately go to "Stopping", and completing the transition should - * take us to "Destroyed". - */ + // Now let's test the behavior of dropping a transition. We'll start + // transitioning back to "Running". Then, while we're still in + // "Starting", will transition back to "Destroyed". We should + // immediately go to "Stopping", and completing the transition should + // take us to "Destroyed". assert!(rprev.run_state.is_stopped()); let dropped = instance .transition(InstanceRuntimeStateRequested { @@ -618,9 +570,7 @@ mod test { assert!(!rnext.run_state.is_stopped()); rprev = rnext; - /* - * Interrupt the async transition with a new one. - */ + // Interrupt the async transition with a new one. let dropped = instance .transition(InstanceRuntimeStateRequested { run_state: InstanceStateRequested::Destroyed, @@ -634,9 +584,7 @@ mod test { assert_eq!(rnext.run_state, InstanceState::Stopping); rprev = rnext; - /* - * Finish the async transition. - */ + // Finish the async transition. instance.transition_finish(); let rnext = instance.object.current().clone(); assert!(rnext.gen > rprev.gen); @@ -652,16 +600,12 @@ mod test { logctx.cleanup_successful(); } - /* - * Test reboot-related transitions. - */ + // Test reboot-related transitions. #[tokio::test] async fn test_sim_instance_reboot() { let logctx = test_setup_log("test_sim_instance_reboot"); - /* - * Get an initial instance up to "Running". - */ + // Get an initial instance up to "Running". let (mut instance, _rx) = make_instance(&logctx); let r1 = instance.object.current().clone(); @@ -684,9 +628,7 @@ mod test { } assert!(rnext.gen > rprev.gen); - /* - * Now, take it through a reboot sequence. - */ + // Now, take it through a reboot sequence. assert!(instance .transition(InstanceRuntimeStateRequested { run_state: InstanceStateRequested::Reboot, @@ -724,11 +666,9 @@ mod test { assert_eq!(rnext.run_state, InstanceState::Running); assert!(instance.object.desired().is_none()); - /* - * Begin a reboot. Then, while it's still "Stopping", begin another - * reboot. This should go through exactly one reboot sequence, as the - * second reboot is totally superfluous. - */ + // Begin a reboot. Then, while it's still "Stopping", begin another + // reboot. This should go through exactly one reboot sequence, as the + // second reboot is totally superfluous. assert!(instance .transition(InstanceRuntimeStateRequested { run_state: InstanceStateRequested::Reboot, @@ -758,11 +698,9 @@ mod test { let (rprev, rnext) = (rnext, instance.object.current().clone()); assert_eq!(rprev.gen, rnext.gen); - /* - * Begin a reboot. Then, while it's "Starting" (on the way back up), - * begin another reboot. This should go through a second reboot - * sequence. - */ + // Begin a reboot. Then, while it's "Starting" (on the way back up), + // begin another reboot. This should go through a second reboot + // sequence. assert!(instance .transition(InstanceRuntimeStateRequested { run_state: InstanceStateRequested::Reboot, @@ -795,13 +733,11 @@ mod test { let (rprev, rnext) = (rnext, instance.object.current().clone()); assert_eq!(rprev.gen, rnext.gen); - /* - * At this point, we've exercised what happens when a reboot is issued - * from "Running", from "Starting" with a reboot in progress, from - * "Stopping" with a reboot in progress. All that's left is "Starting" - * with no reboot in progress. First, stop the instance. Then start - * it. Then, while it's starting, begin a reboot sequence. - */ + // At this point, we've exercised what happens when a reboot is issued + // from "Running", from "Starting" with a reboot in progress, from + // "Stopping" with a reboot in progress. All that's left is "Starting" + // with no reboot in progress. First, stop the instance. Then start + // it. Then, while it's starting, begin a reboot sequence. assert!(instance .transition(InstanceRuntimeStateRequested { run_state: InstanceStateRequested::Stopped, @@ -841,25 +777,21 @@ mod test { let (rprev, rnext) = (rnext, instance.object.current().clone()); assert_eq!(rprev.gen, rnext.gen); - /* - * Issuing a reboot from any other state is not defined, including from - * "Stopping" while not in the process of a reboot and from any - * "stopped" state. instance_ensure() will prevent this, while - * transition() will allow it. We don't test the behavior of - * transition() because it's subject to change. - */ + // Issuing a reboot from any other state is not defined, including from + // "Stopping" while not in the process of a reboot and from any + // "stopped" state. instance_ensure() will prevent this, while + // transition() will allow it. We don't test the behavior of + // transition() because it's subject to change. logctx.cleanup_successful(); } - /** - * Tests basic usage of `SimDisk`. This is somewhat less exhaustive than - * the analogous tests for `SimInstance` because much of that functionality - * is implemented in `SimObject`, common to both. So we don't bother - * verifying dropped state, messages sent to the background task, or some - * sanity checks around completion of async transitions when none is - * desired. - */ + /// Tests basic usage of `SimDisk`. This is somewhat less exhaustive than + /// the analogous tests for `SimInstance` because much of that functionality + /// is implemented in `SimObject`, common to both. So we don't bother + /// verifying dropped state, messages sent to the background task, or some + /// sanity checks around completion of async transitions when none is + /// desired. #[tokio::test] async fn test_sim_disk_transition_to_detached_states() { let logctx = @@ -871,9 +803,7 @@ mod test { assert_eq!(r1.disk_state, DiskState::Creating); assert_eq!(r1.gen, Generation::new()); - /* - * Try transitioning to every other detached state. - */ + // Try transitioning to every other detached state. let detached_states = vec![ (DiskStateRequested::Detached, DiskState::Detached), (DiskStateRequested::Destroyed, DiskState::Destroyed), @@ -931,7 +861,7 @@ mod test { assert!(rnext.disk_state.is_attached()); let rprev = rnext; - /* If we go straight to "Attached" again, there's nothing to do. */ + // If we go straight to "Attached" again, there's nothing to do. assert!(disk .transition(DiskStateRequested::Attached(id.clone())) .unwrap() @@ -940,9 +870,7 @@ mod test { assert_eq!(rnext.gen, rprev.gen); let rprev = rnext; - /* - * It's illegal to go straight to attached to a different instance. - */ + // It's illegal to go straight to attached to a different instance. let id2 = uuid::Uuid::new_v4(); assert_ne!(id, id2); let error = disk @@ -957,10 +885,8 @@ mod test { assert_eq!(rprev.gen, rnext.gen); let rprev = rnext; - /* - * If we go to a different detached state, we go through the async - * transition again. - */ + // If we go to a different detached state, we go through the async + // transition again. disk.transition(DiskStateRequested::Detached).unwrap(); let rnext = disk.object.current().clone(); assert!(rnext.gen > rprev.gen); @@ -973,10 +899,8 @@ mod test { assert_eq!(rnext.disk_state, DiskState::Detached); assert!(rnext.gen > rprev.gen); - /* - * Verify that it works fine to change directions in the middle of an - * async transition. - */ + // Verify that it works fine to change directions in the middle of an + // async transition. disk.transition(DiskStateRequested::Attached(id.clone())).unwrap(); assert_eq!( disk.object.current().disk_state, diff --git a/sled-agent/src/sim/config.rs b/sled-agent/src/sim/config.rs index 7c3f24266f..828ebead82 100644 --- a/sled-agent/src/sim/config.rs +++ b/sled-agent/src/sim/config.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Interfaces for working with sled agent configuration - */ +//! Interfaces for working with sled agent configuration use dropshot::ConfigDropshot; use dropshot::ConfigLogging; @@ -13,23 +11,17 @@ use serde::Serialize; use std::net::{IpAddr, SocketAddr}; use uuid::Uuid; -/** - * How a [`SledAgent`](`super::sled_agent::SledAgent`) simulates object states and - * transitions - */ +/// How a [`SledAgent`](`super::sled_agent::SledAgent`) simulates object states and +/// transitions #[derive(Copy, Clone, Debug, Deserialize, PartialEq, Serialize)] pub enum SimMode { - /** - * Indicates that asynchronous state transitions should be simulated - * automatically using a timer to complete the transition a few seconds in - * the future. - */ + /// Indicates that asynchronous state transitions should be simulated + /// automatically using a timer to complete the transition a few seconds in + /// the future. Auto, - /** - * Indicates that asynchronous state transitions should be simulated - * explicitly, relying on calls through `sled_agent::TestInterfaces`. - */ + /// Indicates that asynchronous state transitions should be simulated + /// explicitly, relying on calls through `sled_agent::TestInterfaces`. Explicit, } @@ -49,21 +41,19 @@ pub struct ConfigStorage { pub ip: IpAddr, } -/** - * Configuration for a sled agent - */ +/// Configuration for a sled agent #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] pub struct Config { - /** unique id for the sled */ + /// unique id for the sled pub id: Uuid, - /** how to simulate asynchronous Instance and Disk transitions */ + /// how to simulate asynchronous Instance and Disk transitions pub sim_mode: SimMode, - /** IP address and TCP port for Nexus instance to register with */ + /// IP address and TCP port for Nexus instance to register with pub nexus_address: SocketAddr, - /** configuration for the sled agent dropshot server */ + /// configuration for the sled agent dropshot server pub dropshot: ConfigDropshot, - /** configuration for the sled agent debug log */ + /// configuration for the sled agent debug log pub log: ConfigLogging, - /** configuration for the sled agent's storage */ + /// configuration for the sled agent's storage pub storage: ConfigStorage, } diff --git a/sled-agent/src/sim/disk.rs b/sled-agent/src/sim/disk.rs index 19927ebb1d..5d67b8decb 100644 --- a/sled-agent/src/sim/disk.rs +++ b/sled-agent/src/sim/disk.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Simulated sled agent implementation - */ +//! Simulated sled agent implementation use crate::nexus::NexusClient; use crate::params::DiskStateRequested; @@ -20,11 +18,9 @@ use uuid::Uuid; use crate::common::disk::{Action as DiskAction, DiskStates}; -/** - * Simulated Disk (network block device), as created by the external Oxide API - * - * See `Simulatable` for how this works. - */ +/// Simulated Disk (network block device), as created by the external Oxide API +/// +/// See `Simulatable` for how this works. #[derive(Debug)] pub struct SimDisk { state: DiskStates, diff --git a/sled-agent/src/sim/http_entrypoints.rs b/sled-agent/src/sim/http_entrypoints.rs index d94836af62..ffd0d38a5b 100644 --- a/sled-agent/src/sim/http_entrypoints.rs +++ b/sled-agent/src/sim/http_entrypoints.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * HTTP entrypoint functions for the sled agent's exposed API - */ +//! HTTP entrypoint functions for the sled agent's exposed API use crate::params::{DiskEnsureBody, InstanceEnsureBody}; use dropshot::endpoint; @@ -27,9 +25,7 @@ use super::sled_agent::SledAgent; type SledApiDescription = ApiDescription>; -/** - * Returns a description of the sled agent API - */ +/// Returns a description of the sled agent API pub fn api() -> SledApiDescription { fn register_endpoints(api: &mut SledApiDescription) -> Result<(), String> { api.register(instance_put)?; @@ -47,9 +43,7 @@ pub fn api() -> SledApiDescription { api } -/** - * Path parameters for Instance requests (sled agent API) - */ +/// Path parameters for Instance requests (sled agent API) #[derive(Deserialize, JsonSchema)] struct InstancePathParam { instance_id: Uuid, @@ -87,9 +81,7 @@ async fn instance_poke_post( Ok(HttpResponseUpdatedNoContent()) } -/** - * Path parameters for Disk requests (sled agent API) - */ +/// Path parameters for Disk requests (sled agent API) #[derive(Deserialize, JsonSchema)] struct DiskPathParam { disk_id: Uuid, diff --git a/sled-agent/src/sim/instance.rs b/sled-agent/src/sim/instance.rs index 88392f1f97..428138348e 100644 --- a/sled-agent/src/sim/instance.rs +++ b/sled-agent/src/sim/instance.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Simulated sled agent implementation - */ +//! Simulated sled agent implementation use super::simulatable::Simulatable; @@ -22,9 +20,7 @@ use uuid::Uuid; use crate::common::instance::{Action as InstanceAction, InstanceStates}; -/** - * Simulated Instance (virtual machine), as created by the external Oxide API - */ +/// Simulated Instance (virtual machine), as created by the external Oxide API #[derive(Debug)] pub struct SimInstance { state: InstanceStates, diff --git a/sled-agent/src/sim/mod.rs b/sled-agent/src/sim/mod.rs index 4ad22f0214..0e8a05eb89 100644 --- a/sled-agent/src/sim/mod.rs +++ b/sled-agent/src/sim/mod.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Simulated sled agent implementation - */ +//! Simulated sled agent implementation mod collection; mod config; diff --git a/sled-agent/src/sim/server.rs b/sled-agent/src/sim/server.rs index c8a82291a8..6de9e82f09 100644 --- a/sled-agent/src/sim/server.rs +++ b/sled-agent/src/sim/server.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! -* Library interface to the sled agent - */ +//! Library interface to the sled agent use super::config::Config; use super::http_entrypoints::api as http_api; @@ -18,21 +16,17 @@ use omicron_common::backoff::{ use slog::{Drain, Logger}; use std::sync::Arc; -/** - * Packages up a [`SledAgent`], running the sled agent API under a Dropshot - * server wired up to the sled agent - */ +/// Packages up a [`SledAgent`], running the sled agent API under a Dropshot +/// server wired up to the sled agent pub struct Server { - /** underlying sled agent */ + /// underlying sled agent pub sled_agent: Arc, - /** dropshot server for the API */ + /// dropshot server for the API pub http_server: dropshot::HttpServer>, } impl Server { - /** - * Start a SledAgent server - */ + /// Start a SledAgent server pub async fn start( config: &Config, log: &Logger, @@ -66,14 +60,12 @@ impl Server { .map_err(|error| format!("initializing server: {}", error))? .start(); - /* - * Notify the control plane that we're up, and continue trying this - * until it succeeds. We retry with an randomized, capped exponential - * backoff. - * - * TODO-robustness if this returns a 400 error, we probably want to - * return a permanent error from the `notify_nexus` closure. - */ + // Notify the control plane that we're up, and continue trying this + // until it succeeds. We retry with an randomized, capped exponential + // backoff. + // + // TODO-robustness if this returns a 400 error, we probably want to + // return a permanent error from the `notify_nexus` closure. let sa_address = http_server.local_addr(); let notify_nexus = || async { debug!(log, "contacting server nexus"); @@ -120,21 +112,17 @@ impl Server { Ok(Server { sled_agent, http_server }) } - /** - * Wait for the given server to shut down - * - * Note that this doesn't initiate a graceful shutdown, so if you call this - * immediately after calling `start()`, the program will block indefinitely - * or until something else initiates a graceful shutdown. - */ + /// Wait for the given server to shut down + /// + /// Note that this doesn't initiate a graceful shutdown, so if you call this + /// immediately after calling `start()`, the program will block indefinitely + /// or until something else initiates a graceful shutdown. pub async fn wait_for_finish(self) -> Result<(), String> { self.http_server.await } } -/** - * Run an instance of the `Server` - */ +/// Run an instance of the `Server` pub async fn run_server(config: &Config) -> Result<(), String> { let (drain, registration) = slog_dtrace::with_drain( config diff --git a/sled-agent/src/sim/simulatable.rs b/sled-agent/src/sim/simulatable.rs index eaefc54fe0..ae94409dde 100644 --- a/sled-agent/src/sim/simulatable.rs +++ b/sled-agent/src/sim/simulatable.rs @@ -10,46 +10,44 @@ use std::fmt; use std::sync::Arc; use uuid::Uuid; -/** - * Describes Oxide API objects that can be simulated here in the sled agent - * - * We only simulate these objects from the perspective of an API consumer, which - * means for example accepting a request to boot it, reporting the current state - * as "starting", and then some time later reporting that the state is - * "running". - * - * The basic idea is that for any type that we want to simulate (e.g., - * Instances), there's a `CurrentState` (which you could think of as "stopped", - * "starting", "running", "stopping") and a `RequestedState` (which would only - * be "stopped" and "running" -- you can't ask an Instance to transition to - * "starting" or "stopping") - * - * (The term "state" here is a bit overloaded. `CurrentState` refers to the - * state of the object itself that's being simulated. This might be an Instance - * that is currently in state "running". `RequestedState` refers to a requested - * _change_ to the state of the object. The state of the _simulated_ object - * includes both of these: e.g., a "starting" Instance that is requested to be - * "running". So in most cases in the interface below, the state is represented - * by a tuple of `(CurrentState, Option)`.) - * - * Transitioning between states is always either synchronous (which means that - * we make the transition immediately) or asynchronous (which means that we - * first transition to some intermediate state and some time later finish the - * transition to the requested state). An Instance transition from "Stopped" to - * "Destroyed" is synchronous. An Instance transition from "Stopped" to - * "Running" is asynchronous; it first goes to "Starting" and some time later - * becomes "Running". - * - * It's expected that an object can begin another user-requested state - * transition no matter what state it's in, although some particular transitions - * may be disallowed (e.g., "reboot" from a stopped state). - * - * The implementor determines the set of possible states (via `CurrentState` and - * `RequestedState`) as well as what transitions are allowed. - * - * When an asynchronous state change completes, we notify the control plane via - * the `notify()` function. - */ +/// Describes Oxide API objects that can be simulated here in the sled agent +/// +/// We only simulate these objects from the perspective of an API consumer, which +/// means for example accepting a request to boot it, reporting the current state +/// as "starting", and then some time later reporting that the state is +/// "running". +/// +/// The basic idea is that for any type that we want to simulate (e.g., +/// Instances), there's a `CurrentState` (which you could think of as "stopped", +/// "starting", "running", "stopping") and a `RequestedState` (which would only +/// be "stopped" and "running" -- you can't ask an Instance to transition to +/// "starting" or "stopping") +/// +/// (The term "state" here is a bit overloaded. `CurrentState` refers to the +/// state of the object itself that's being simulated. This might be an Instance +/// that is currently in state "running". `RequestedState` refers to a requested +/// _change_ to the state of the object. The state of the _simulated_ object +/// includes both of these: e.g., a "starting" Instance that is requested to be +/// "running". So in most cases in the interface below, the state is represented +/// by a tuple of `(CurrentState, Option)`.) +/// +/// Transitioning between states is always either synchronous (which means that +/// we make the transition immediately) or asynchronous (which means that we +/// first transition to some intermediate state and some time later finish the +/// transition to the requested state). An Instance transition from "Stopped" to +/// "Destroyed" is synchronous. An Instance transition from "Stopped" to +/// "Running" is asynchronous; it first goes to "Starting" and some time later +/// becomes "Running". +/// +/// It's expected that an object can begin another user-requested state +/// transition no matter what state it's in, although some particular transitions +/// may be disallowed (e.g., "reboot" from a stopped state). +/// +/// The implementor determines the set of possible states (via `CurrentState` and +/// `RequestedState`) as well as what transitions are allowed. +/// +/// When an asynchronous state change completes, we notify the control plane via +/// the `notify()` function. #[async_trait] pub trait Simulatable: fmt::Debug + Send + Sync { /// Represents a possible current runtime state of the simulated object. diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index 8332cc4b6e..9e61173da9 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Simulated sled agent implementation - */ +//! Simulated sled agent implementation use crate::nexus::NexusClient; use crate::params::{ @@ -24,30 +22,26 @@ use super::disk::SimDisk; use super::instance::SimInstance; use super::storage::{CrucibleData, Storage}; -/** - * Simulates management of the control plane on a sled - * - * The current implementation simulates a server directly in this program. - * **It's important to be careful about the interface exposed by this struct.** - * The intent is for it to eventually be implemented using requests to a remote - * server. The tighter the coupling that exists now, the harder this will be to - * move later. - */ +/// Simulates management of the control plane on a sled +/// +/// The current implementation simulates a server directly in this program. +/// **It's important to be careful about the interface exposed by this struct.** +/// The intent is for it to eventually be implemented using requests to a remote +/// server. The tighter the coupling that exists now, the harder this will be to +/// move later. pub struct SledAgent { - /** collection of simulated instances, indexed by instance uuid */ + /// collection of simulated instances, indexed by instance uuid instances: Arc>, - /** collection of simulated disks, indexed by disk uuid */ + /// collection of simulated disks, indexed by disk uuid disks: Arc>, storage: Mutex, pub nexus_client: Arc, } impl SledAgent { - /* - * TODO-cleanup should this instantiate the NexusClient it needs? - * Should it take a Config object instead of separate id, sim_mode, etc? - */ - /** Constructs a simulated SledAgent with the given uuid. */ + // TODO-cleanup should this instantiate the NexusClient it needs? + // Should it take a Config object instead of separate id, sim_mode, etc? + /// Constructs a simulated SledAgent with the given uuid. pub fn new_simulated_with_id( config: &Config, log: Logger, @@ -82,11 +76,9 @@ impl SledAgent { } } - /** - * Idempotently ensures that the given API Instance (described by - * `api_instance`) exists on this server in the given runtime state - * (described by `target`). - */ + /// Idempotently ensures that the given API Instance (described by + /// `api_instance`) exists on this server in the given runtime state + /// (described by `target`). pub async fn instance_ensure( self: &Arc, instance_id: Uuid, @@ -99,11 +91,9 @@ impl SledAgent { .await?) } - /** - * Idempotently ensures that the given API Disk (described by `api_disk`) - * is attached (or not) as specified. This simulates disk attach and - * detach, similar to instance boot and halt. - */ + /// Idempotently ensures that the given API Disk (described by `api_disk`) + /// is attached (or not) as specified. This simulates disk attach and + /// detach, similar to instance boot and halt. pub async fn disk_ensure( self: &Arc, disk_id: Uuid, diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index 1298afd212..99bb25ec6d 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -98,7 +98,8 @@ impl SledAgent { Mountpoint::Path(std::path::PathBuf::from( ZONE_ZFS_DATASET_MOUNTPOINT, )), - /* do_format= */ true, + // do_format= + true, )?; // Identify all existing zones which should be managed by the Sled diff --git a/sled-agent/src/storage_manager.rs b/sled-agent/src/storage_manager.rs index 4ebebf8916..00b19ce7cd 100644 --- a/sled-agent/src/storage_manager.rs +++ b/sled-agent/src/storage_manager.rs @@ -672,7 +672,8 @@ impl StorageWorker { .initialize_dataset_and_zone( pool, &dataset_info, - /* do_format= */ true, + // do_format= + true, ) .await?; @@ -714,7 +715,8 @@ impl StorageWorker { self.initialize_dataset_and_zone( pool, &dataset_info, - /* do_format= */ false, + // do_format= + false, ) .await?; diff --git a/sled-agent/tests/test_commands.rs b/sled-agent/tests/test_commands.rs index d216aaf85e..25960b5998 100644 --- a/sled-agent/tests/test_commands.rs +++ b/sled-agent/tests/test_commands.rs @@ -2,15 +2,11 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Tests for the executable commands in this repo. Most functionality is tested - * elsewhere, so this really just sanity checks argument parsing, bad args, and - * the --openapi mode. - */ +//! Tests for the executable commands in this repo. Most functionality is tested +//! elsewhere, so this really just sanity checks argument parsing, bad args, and +//! the --openapi mode. -/* - * TODO-coverage: test success cases of sled-agent - */ +// TODO-coverage: test success cases of sled-agent use expectorate::assert_contents; use omicron_test_utils::dev::test_cmds::assert_exit_code; @@ -22,7 +18,7 @@ use openapiv3::OpenAPI; use std::path::PathBuf; use subprocess::Exec; -/** name of the "sled-agent-sim" executable */ +/// name of the "sled-agent-sim" executable const CMD_SLED_AGENT_SIM: &str = env!("CARGO_BIN_EXE_sled-agent-sim"); fn path_to_sled_agent_sim() -> PathBuf { @@ -43,7 +39,7 @@ fn test_sled_agent_sim_no_args() { &stderr_text, ); } -/** name of the "sled-agent" executable */ +/// name of the "sled-agent" executable const CMD_SLED_AGENT: &str = env!("CARGO_BIN_EXE_sled-agent"); fn path_to_sled_agent() -> PathBuf { @@ -72,17 +68,13 @@ fn test_sled_agent_openapi_bootagent() { let spec: OpenAPI = serde_json::from_str(&stdout_text) .expect("stdout was not valid OpenAPI"); - /* - * Check for lint errors. - */ + // Check for lint errors. let errors = openapi_lint::validate(&spec); assert!(errors.is_empty(), "{}", errors.join("\n\n")); - /* - * Confirm that the output hasn't changed. It's expected that we'll change - * this file as the API evolves, but pay attention to the diffs to ensure - * that the changes match your expectations. - */ + // Confirm that the output hasn't changed. It's expected that we'll change + // this file as the API evolves, but pay attention to the diffs to ensure + // that the changes match your expectations. assert_contents("../openapi/bootstrap-agent.json", &stdout_text); } @@ -99,16 +91,12 @@ fn test_sled_agent_openapi_sled() { let spec: OpenAPI = serde_json::from_str(&stdout_text) .expect("stdout was not valid OpenAPI"); - /* - * Check for lint errors. - */ + // Check for lint errors. let errors = openapi_lint::validate(&spec); assert!(errors.is_empty(), "{}", errors.join("\n\n")); - /* - * Confirm that the output hasn't changed. It's expected that we'll change - * this file as the API evolves, but pay attention to the diffs to ensure - * that the changes match your expectations. - */ + // Confirm that the output hasn't changed. It's expected that we'll change + // this file as the API evolves, but pay attention to the diffs to ensure + // that the changes match your expectations. assert_contents("../openapi/sled-agent.json", &stdout_text); } diff --git a/sp-sim/src/config.rs b/sp-sim/src/config.rs index ac9d57f6fd..b8c8adefb5 100644 --- a/sp-sim/src/config.rs +++ b/sp-sim/src/config.rs @@ -2,10 +2,8 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! //! Interfaces for parsing configuration files and working with a simulated SP //! configuration -//! use dropshot::ConfigLogging; use gateway_messages::SerialNumber; @@ -64,14 +62,12 @@ pub struct Config { pub simulated_sps: SimulatedSps, /// Server-wide logging configuration. pub log: ConfigLogging, - /* - /// Type of SP to simulate. - pub sp_type: SpType, - /// Components to simulate. - pub components: SpComponents, - /// UDP listen address. - pub bind_address: SocketAddr, - */ + // Type of SP to simulate. + // pub sp_type: SpType, + // Components to simulate. + // pub components: SpComponents, + // UDP listen address. + // pub bind_address: SocketAddr, } impl Config { diff --git a/test-utils/src/bin/omicron-dev.rs b/test-utils/src/bin/omicron-dev.rs index 084afd2499..228ce350f7 100644 --- a/test-utils/src/bin/omicron-dev.rs +++ b/test-utils/src/bin/omicron-dev.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Developer tool for setting up a local database for use by Omicron - */ +//! Developer tool for setting up a local database for use by Omicron use anyhow::bail; use anyhow::Context; @@ -70,45 +68,35 @@ struct DbRunArgs { store_dir: Option, /// Database (SQL) listen port. Use `0` to request any available port. - /* - * We choose an arbitrary default port that's different from the default - * CockroachDB port to avoid conflicting. We don't use 0 because this port - * is specified in a few other places, like the default Nexus config file. - * TODO We could load that file at compile time and use the value there. - */ + // We choose an arbitrary default port that's different from the default + // CockroachDB port to avoid conflicting. We don't use 0 because this port + // is specified in a few other places, like the default Nexus config file. + // TODO We could load that file at compile time and use the value there. #[structopt(long, default_value = "32221")] listen_port: u16, - /* - * This unusual structopt configuration makes "populate" default to true, - * allowing a --no-populate override on the CLI. - */ + // This unusual structopt configuration makes "populate" default to true, + // allowing a --no-populate override on the CLI. /// Do not populate the database with any schema #[structopt(long = "--no-populate", parse(from_flag = std::ops::Not::not))] populate: bool, } async fn cmd_db_run(args: &DbRunArgs) -> Result<(), anyhow::Error> { - /* - * Set ourselves up to wait for SIGINT. It's important to do this early, - * before we've created resources that we want to have cleaned up on SIGINT - * (e.g., the temporary directory created by the database starter). - */ + // Set ourselves up to wait for SIGINT. It's important to do this early, + // before we've created resources that we want to have cleaned up on SIGINT + // (e.g., the temporary directory created by the database starter). let signals = Signals::new(&[SIGINT]).expect("failed to wait for SIGINT"); let mut signal_stream = signals.fuse(); - /* - * Now start CockroachDB. This process looks bureaucratic (create arg - * builder, then create starter, then start it) because we want to be able - * to print what's happening before we do it. - */ + // Now start CockroachDB. This process looks bureaucratic (create arg + // builder, then create starter, then start it) because we want to be able + // to print what's happening before we do it. let mut db_arg_builder = dev::db::CockroachStarterBuilder::new().listen_port(args.listen_port); - /* - * NOTE: The stdout strings here are not intended to be stable, but they are - * used by the test suite. - */ + // NOTE: The stdout strings here are not intended to be stable, but they are + // used by the test suite. if let Some(store_dir) = &args.store_dir { println!( @@ -141,18 +129,14 @@ async fn cmd_db_run(args: &DbRunArgs) -> Result<(), anyhow::Error> { ); if args.populate { - /* - * Populate the database with our schema. - */ + // Populate the database with our schema. println!("omicron-dev: populating database"); db_instance.populate().await.context("populating database")?; println!("omicron-dev: populated database"); } - /* - * Wait for either the child process to shut down on its own or for us to - * receive SIGINT. - */ + // Wait for either the child process to shut down on its own or for us to + // receive SIGINT. tokio::select! { _ = db_instance.wait_for_shutdown() => { db_instance.cleanup().await.context("clean up after shutdown")?; diff --git a/test-utils/src/dev/db.rs b/test-utils/src/dev/db.rs index 6a45c0e6b8..d4e8b8d099 100644 --- a/test-utils/src/dev/db.rs +++ b/test-utils/src/dev/db.rs @@ -26,25 +26,21 @@ use tokio_postgres::config::SslMode; /// Default for how long to wait for CockroachDB to report its listening URL const COCKROACHDB_START_TIMEOUT_DEFAULT: Duration = Duration::from_secs(30); -/* - * A default listen port of 0 allows the system to choose any available port. - * This is appropriate for the test suite and may be useful in some cases for - * omicron-dev. However, omicron-dev by default chooses a specific port so that - * we can ship a Nexus configuration that will use the same port. - */ +// A default listen port of 0 allows the system to choose any available port. +// This is appropriate for the test suite and may be useful in some cases for +// omicron-dev. However, omicron-dev by default chooses a specific port so that +// we can ship a Nexus configuration that will use the same port. const COCKROACHDB_DEFAULT_LISTEN_PORT: u16 = 0; -/** CockroachDB database name */ -/* This MUST be kept in sync with src/sql/dbinit.sql and src/sql/dbwipe.sql. */ +/// CockroachDB database name +// This MUST be kept in sync with src/sql/dbinit.sql and src/sql/dbwipe.sql. const COCKROACHDB_DATABASE: &'static str = "omicron"; -/** CockroachDB user name */ -/* - * TODO-security This should really use "omicron", which is created in - * src/sql/dbinit.sql. Doing that requires either hardcoding a password or - * (better) using `cockroach cert` to set up a CA and certificates for this - * user. We should modify the infrastructure here to do that rather than use - * "root" here. - */ +/// CockroachDB user name +// TODO-security This should really use "omicron", which is created in +// src/sql/dbinit.sql. Doing that requires either hardcoding a password or +// (better) using `cockroach cert` to set up a CA and certificates for this +// user. We should modify the infrastructure here to do that rather than use +// "root" here. const COCKROACHDB_USER: &'static str = "root"; /// Path to the CockroachDB binary @@ -54,20 +50,18 @@ const COCKROACHDB_BIN: &str = "cockroach"; const COCKROACHDB_VERSION: &str = include_str!("../../../tools/cockroachdb_version"); -/** - * Builder for [`CockroachStarter`] that supports setting some command-line - * arguments for the `cockroach start-single-node` command - * - * Without customizations, this will run `cockroach start-single-node --insecure - * --listen-addr=127.0.0.1:0 --http-addr=:0`. - * - * It's useful to support running this concurrently (as in the test suite). To - * support this, we allow CockroachDB to choose its listening ports. To figure - * out which ports it chose, we also use the --listening-url-file option to have - * it write the URL to a file in a temporary directory. The Drop - * implementations for `CockroachStarter` and `CockroachInstance` will ensure - * that this directory gets cleaned up as long as this program exits normally. - */ +/// Builder for [`CockroachStarter`] that supports setting some command-line +/// arguments for the `cockroach start-single-node` command +/// +/// Without customizations, this will run `cockroach start-single-node --insecure +/// --listen-addr=127.0.0.1:0 --http-addr=:0`. +/// +/// It's useful to support running this concurrently (as in the test suite). To +/// support this, we allow CockroachDB to choose its listening ports. To figure +/// out which ports it chose, we also use the --listening-url-file option to have +/// it write the URL to a file in a temporary directory. The Drop +/// implementations for `CockroachStarter` and `CockroachInstance` will ensure +/// that this directory gets cleaned up as long as this program exits normally. #[derive(Debug)] pub struct CockroachStarterBuilder { /// optional value for the --store-dir option @@ -99,18 +93,16 @@ impl CockroachStarterBuilder { redirect_stdio: false, }; - /* - * We use single-node insecure mode listening only on localhost. We - * consider this secure enough for development (including the test - * suite), though it does allow anybody on the system to do anything - * with this database (including fill up all disk space). (It wouldn't - * be unreasonable to secure this with certificates even though we're - * on localhost. - * - * If we decide to let callers customize various listening addresses, we - * should be careful about making it too easy to generate a more - * insecure configuration. - */ + // We use single-node insecure mode listening only on localhost. We + // consider this secure enough for development (including the test + // suite), though it does allow anybody on the system to do anything + // with this database (including fill up all disk space). (It wouldn't + // be unreasonable to secure this with certificates even though we're + // on localhost. + // + // If we decide to let callers customize various listening addresses, we + // should be careful about making it too easy to generate a more + // insecure configuration. builder .arg("start-single-node") .arg("--insecure") @@ -118,12 +110,10 @@ impl CockroachStarterBuilder { builder } - /** - * Redirect stdout and stderr for the "cockroach" process to files within - * the temporary directory. This is used by the test suite so that people - * don't get reams of irrelevant output when running `cargo test`. This - * will be cleaned up as usual on success. - */ + /// Redirect stdout and stderr for the "cockroach" process to files within + /// the temporary directory. This is used by the test suite so that people + /// don't get reams of irrelevant output when running `cargo test`. This + /// will be cleaned up as usual on success. pub fn redirect_stdio_to_files(&mut self) -> &mut Self { self.redirect_stdio = true; self @@ -134,24 +124,20 @@ impl CockroachStarterBuilder { self } - /** - * Sets the `--store-dir` command-line argument to `store_dir` - * - * This is where the database will store all of its on-disk data. If this - * isn't specified, CockroachDB will be configured to store data into a - * temporary directory that will be cleaned up on Drop of - * [`CockroachStarter`] or [`CockroachInstance`]. - */ + /// Sets the `--store-dir` command-line argument to `store_dir` + /// + /// This is where the database will store all of its on-disk data. If this + /// isn't specified, CockroachDB will be configured to store data into a + /// temporary directory that will be cleaned up on Drop of + /// [`CockroachStarter`] or [`CockroachInstance`]. pub fn store_dir>(mut self, store_dir: P) -> Self { self.store_dir.replace(store_dir.as_ref().to_owned()); self } - /** - * Sets the listening port for the PostgreSQL and CockroachDB protocols - * - * We always listen only on 127.0.0.1. - */ + /// Sets the listening port for the PostgreSQL and CockroachDB protocols + /// + /// We always listen only on 127.0.0.1. pub fn listen_port(mut self, listen_port: u16) -> Self { self.listen_port = listen_port; self @@ -170,26 +156,22 @@ impl CockroachStarterBuilder { .with_context(|| format!("open \"{}\"", out_path.display())) } - /** - * Starts CockroachDB using the configured command-line arguments - * - * This will create a temporary directory for the listening URL file (see - * above) and potentially the database store directory (if `store_dir()` - * was never called). - */ + /// Starts CockroachDB using the configured command-line arguments + /// + /// This will create a temporary directory for the listening URL file (see + /// above) and potentially the database store directory (if `store_dir()` + /// was never called). pub fn build(mut self) -> Result { - /* - * We always need a temporary directory, if for no other reason than to - * put the listen-url file. (It would be nice if the subprocess crate - * allowed us to open a pipe stream to the child other than stdout or - * stderr, although there may not be a portable means to identify it to - * CockroachDB on the command line.) - * - * TODO Maybe it would be more ergonomic to use a well-known temporary - * directory rather than a random one. That way, we can warn the user - * if they start up two of them, and we can also clean up after unclean - * shutdowns. - */ + // We always need a temporary directory, if for no other reason than to + // put the listen-url file. (It would be nice if the subprocess crate + // allowed us to open a pipe stream to the child other than stdout or + // stderr, although there may not be a portable means to identify it to + // CockroachDB on the command line.) + // + // TODO Maybe it would be more ergonomic to use a well-known temporary + // directory rather than a random one. That way, we can warn the user + // if they start up two of them, and we can also clean up after unclean + // shutdowns. let temp_dir = tempdir().with_context(|| "creating temporary directory")?; let store_dir = self @@ -230,10 +212,8 @@ impl CockroachStarterBuilder { }) } - /** - * Convenience wrapper for self.cmd_builder.arg() that records the arguments - * so that we can print out the command line before we run it - */ + /// Convenience wrapper for self.cmd_builder.arg() that records the arguments + /// so that we can print out the command line before we run it fn arg>(&mut self, arg: S) -> &mut Self { let arg = arg.as_ref(); self.args.push(arg.to_string_lossy().to_string()); @@ -241,9 +221,7 @@ impl CockroachStarterBuilder { self } - /** - * Convenience for constructing a path name in a given temporary directory - */ + /// Convenience for constructing a path name in a given temporary directory fn temp_path>(tempdir: &TempDir, file: S) -> PathBuf { let mut pathbuf = tempdir.path().to_owned(); pathbuf.push(file.as_ref()); @@ -251,12 +229,10 @@ impl CockroachStarterBuilder { } } -/** - * Manages execution of the `cockroach` command in order to start a CockroachDB - * instance - * - * To use this, see [`CockroachStarterBuilder`]. - */ +/// Manages execution of the `cockroach` command in order to start a CockroachDB +/// instance +/// +/// To use this, see [`CockroachStarterBuilder`]. #[derive(Debug)] pub struct CockroachStarter { /// temporary directory used for URL file and potentially data storage @@ -274,14 +250,12 @@ pub struct CockroachStarter { } impl CockroachStarter { - /** Returns a human-readable summary of the command line to be executed */ + /// Returns a human-readable summary of the command line to be executed pub fn cmdline(&self) -> impl fmt::Display { self.args.join(" ") } - /** - * Returns the path to the temporary directory created for this execution - */ + /// Returns the path to the temporary directory created for this execution pub fn temp_dir(&self) -> &Path { self.temp_dir.path() } @@ -291,13 +265,11 @@ impl CockroachStarter { self.store_dir.as_path() } - /** - * Spawns a new process to run the configured command - * - * This function waits up to a fixed timeout for CockroachDB to report its - * listening URL. This function fails if the child process exits before - * that happens or if the timeout expires. - */ + /// Spawns a new process to run the configured command + /// + /// This function waits up to a fixed timeout for CockroachDB to report its + /// listening URL. This function fails if the child process exits before + /// that happens or if the timeout expires. pub async fn start( mut self, ) -> Result { @@ -308,25 +280,21 @@ impl CockroachStarter { })?; let pid = child_process.id().unwrap(); - /* - * Wait for CockroachDB to write out its URL information. There's not a - * great way for us to know when this has happened, unfortunately. So - * we just poll for it up to some maximum timeout. - */ + // Wait for CockroachDB to write out its URL information. There's not a + // great way for us to know when this has happened, unfortunately. So + // we just poll for it up to some maximum timeout. let wait_result = poll::wait_for_condition( || { - /* - * If CockroachDB is not running at any point in this process, - * stop waiting for the file to become available. - * TODO-cleanup This nastiness is because we cannot allow the - * mutable reference to "child_process" to be part of the async - * block. However, we need the return value to be part of the - * async block. So we do the process_exited() bit outside the - * async block. We need to move "exited" into the async block, - * which means anything we reference gets moved into that block, - * which means we need a clone of listen_url_file to avoid - * referencing "self". - */ + // If CockroachDB is not running at any point in this process, + // stop waiting for the file to become available. + // TODO-cleanup This nastiness is because we cannot allow the + // mutable reference to "child_process" to be part of the async + // block. However, we need the return value to be part of the + // async block. So we do the process_exited() bit outside the + // async block. We need to move "exited" into the async block, + // which means anything we reference gets moved into that block, + // which means we need a clone of listen_url_file to avoid + // referencing "self". let exited = process_exited(&mut child_process); let listen_url_file = self.listen_url_file.clone(); async move { @@ -336,16 +304,14 @@ impl CockroachStarter { )); } - /* - * When ready, CockroachDB will write the URL on which it's - * listening to the specified file. Try to read this file. - * Note that its write is not necessarily atomic, so we wait - * for a newline before assuming that it's complete. - * TODO-robustness It would be nice if there were a version - * of tokio::fs::read_to_string() that accepted a maximum - * byte count so that this couldn't, say, use up all of - * memory. - */ + // When ready, CockroachDB will write the URL on which it's + // listening to the specified file. Try to read this file. + // Note that its write is not necessarily atomic, so we wait + // for a newline before assuming that it's complete. + // TODO-robustness It would be nice if there were a version + // of tokio::fs::read_to_string() that accepted a maximum + // byte count so that this couldn't, say, use up all of + // memory. match tokio::fs::read_to_string(&listen_url_file).await { Ok(listen_url) if listen_url.contains('\n') => { let listen_url = listen_url.trim_end(); @@ -377,12 +343,10 @@ impl CockroachStarter { child_process: Some(child_process), }), Err(poll_error) => { - /* - * Abort and tell the user. We'll leave CockroachDB running so - * the user can debug if they want. We'll skip cleanup of the - * temporary directory for the same reason and also so that - * CockroachDB doesn't trip over its files being gone. - */ + // Abort and tell the user. We'll leave CockroachDB running so + // the user can debug if they want. We'll skip cleanup of the + // temporary directory for the same reason and also so that + // CockroachDB doesn't trip over its files being gone. self.temp_dir.into_path(); Err(match poll_error { @@ -426,12 +390,10 @@ pub enum CockroachStartError { TimedOut { pid: u32, time_waited: Duration }, } -/** - * Manages a CockroachDB process running as a single-node cluster - * - * You are **required** to invoke [`CockroachInstance::wait_for_shutdown()`] or - * [`CockroachInstance::cleanup()`] before this object is dropped. - */ +/// Manages a CockroachDB process running as a single-node cluster +/// +/// You are **required** to invoke [`CockroachInstance::wait_for_shutdown()`] or +/// [`CockroachInstance::cleanup()`] before this object is dropped. #[derive(Debug)] pub struct CockroachInstance { /// child process id @@ -447,65 +409,57 @@ pub struct CockroachInstance { } impl CockroachInstance { - /** Returns the pid of the child process running CockroachDB */ + /// Returns the pid of the child process running CockroachDB pub fn pid(&self) -> u32 { self.pid } - /** - * Returns a printable form of the PostgreSQL config provided by - * CockroachDB - * - * This is intended only for printing out. To actually connect to - * PostgreSQL, use [`CockroachInstance::pg_config()`]. (Ideally, that - * object would impl a to_url() or the like, but it does not appear to.) - */ + /// Returns a printable form of the PostgreSQL config provided by + /// CockroachDB + /// + /// This is intended only for printing out. To actually connect to + /// PostgreSQL, use [`CockroachInstance::pg_config()`]. (Ideally, that + /// object would impl a to_url() or the like, but it does not appear to.) pub fn listen_url(&self) -> impl fmt::Display + '_ { &self.pg_config } - /** - * Returns PostgreSQL client configuration suitable for connecting to the - * CockroachDB database - */ + /// Returns PostgreSQL client configuration suitable for connecting to the + /// CockroachDB database pub fn pg_config(&self) -> &PostgresConfigWithUrl { &self.pg_config } - /** - * Returns the path to the temporary directory created for this execution - */ + /// Returns the path to the temporary directory created for this execution pub fn temp_dir(&self) -> &Path { &self.temp_dir_path } - /** Returns a connection to the underlying database */ + /// Returns a connection to the underlying database pub async fn connect(&self) -> Result { Client::connect(self.pg_config(), tokio_postgres::NoTls).await } - /** Wrapper around [`wipe()`] using a connection to this database. */ + /// Wrapper around [`wipe()`] using a connection to this database. pub async fn wipe(&self) -> Result<(), anyhow::Error> { let client = self.connect().await.context("connect")?; wipe(&client).await.context("wipe")?; client.cleanup().await.context("cleaning up after wipe") } - /** Wrapper around [`populate()`] using a connection to this database. */ + /// Wrapper around [`populate()`] using a connection to this database. pub async fn populate(&self) -> Result<(), anyhow::Error> { let client = self.connect().await.context("connect")?; populate(&client).await.context("populate")?; client.cleanup().await.context("cleaning up after wipe") } - /** - * Waits for the child process to exit - * - * Note that CockroachDB will normally run forever unless the caller - * arranges for it to be shutdown. - */ + /// Waits for the child process to exit + /// + /// Note that CockroachDB will normally run forever unless the caller + /// arranges for it to be shutdown. pub async fn wait_for_shutdown(&mut self) -> Result<(), anyhow::Error> { - /* We do not care about the exit status of this process. */ + // We do not care about the exit status of this process. #[allow(unused_must_use)] { self.child_process @@ -518,19 +472,15 @@ impl CockroachInstance { self.cleanup().await } - /** - * Cleans up the child process and temporary directory - * - * If the child process is still running, it will be killed with SIGKILL and - * this function will wait for it to exit. Then the temporary directory - * will be cleaned up. - */ + /// Cleans up the child process and temporary directory + /// + /// If the child process is still running, it will be killed with SIGKILL and + /// this function will wait for it to exit. Then the temporary directory + /// will be cleaned up. pub async fn cleanup(&mut self) -> Result<(), anyhow::Error> { - /* - * SIGTERM the process and wait for it to exit so that we can remove the - * temporary directory that we may have used to store its data. We - * don't care what the result of the process was. - */ + // SIGTERM the process and wait for it to exit so that we can remove the + // temporary directory that we may have used to store its data. We + // don't care what the result of the process was. if let Some(child_process) = self.child_process.as_mut() { let pid = child_process.id().expect("Missing child PID") as i32; let success = @@ -552,15 +502,13 @@ impl CockroachInstance { impl Drop for CockroachInstance { fn drop(&mut self) { - /* - * TODO-cleanup Ideally at this point we would run self.cleanup() to - * kill the child process, wait for it to exit, and then clean up the - * temporary directory. However, we don't have an executor here with - * which to run async/await code. We could create one here, but it's - * not clear how safe or sketchy that would be. Instead, we expect that - * the caller has done the cleanup already. This won't always happen, - * particularly for ungraceful failures. - */ + // TODO-cleanup Ideally at this point we would run self.cleanup() to + // kill the child process, wait for it to exit, and then clean up the + // temporary directory. However, we don't have an executor here with + // which to run async/await code. We could create one here, but it's + // not clear how safe or sketchy that would be. Instead, we expect that + // the caller has done the cleanup already. This won't always happen, + // particularly for ungraceful failures. if self.child_process.is_some() || self.temp_dir.is_some() { eprintln!( "WARN: dropped CockroachInstance without cleaning it up first \ @@ -568,16 +516,14 @@ impl Drop for CockroachInstance { temporary directory leaked)" ); - /* Still, make a best effort. */ + // Still, make a best effort. #[allow(unused_must_use)] if let Some(child_process) = self.child_process.as_mut() { child_process.start_kill(); } #[allow(unused_must_use)] if let Some(temp_dir) = self.temp_dir.take() { - /* - * Do NOT clean up the temporary directory in this case. - */ + // Do NOT clean up the temporary directory in this case. let path = temp_dir.into_path(); eprintln!( "WARN: temporary directory leaked: {}", @@ -623,50 +569,42 @@ pub async fn check_db_version() -> Result<(), CockroachStartError> { Ok(()) } -/** - * Wrapper around tokio::process::Child::try_wait() so that we can unwrap() the - * result in one place with this explanatory comment. - * - * The semantics of that function aren't as clear as we'd like. The docs say: - * - * > If the child has exited, then `Ok(Some(status))` is returned. If the - * > exit status is not available at this time then `Ok(None)` is returned. - * > If an error occurs, then that error is returned. - * - * It seems we can infer that "the exit status is not available at this time" - * means that the process has not exited. After all, if it _had_ exited, we'd - * fall into the first case. It's not clear under what conditions this function - * could ever fail. It's not clear from the source that it's even possible. - */ +/// Wrapper around tokio::process::Child::try_wait() so that we can unwrap() the +/// result in one place with this explanatory comment. +/// +/// The semantics of that function aren't as clear as we'd like. The docs say: +/// +/// > If the child has exited, then `Ok(Some(status))` is returned. If the +/// > exit status is not available at this time then `Ok(None)` is returned. +/// > If an error occurs, then that error is returned. +/// +/// It seems we can infer that "the exit status is not available at this time" +/// means that the process has not exited. After all, if it _had_ exited, we'd +/// fall into the first case. It's not clear under what conditions this function +/// could ever fail. It's not clear from the source that it's even possible. fn process_exited(child_process: &mut tokio::process::Child) -> bool { child_process.try_wait().unwrap().is_some() } -/** - * Populate a database with the Omicron schema and any initial objects - * - * This is not idempotent. It will fail if the database or other objects - * already exist. - */ +/// Populate a database with the Omicron schema and any initial objects +/// +/// This is not idempotent. It will fail if the database or other objects +/// already exist. pub async fn populate( client: &tokio_postgres::Client, ) -> Result<(), anyhow::Error> { let sql = include_str!("../../../common/src/sql/dbinit.sql"); client.batch_execute(sql).await.context("populating Omicron database") - /* - * It's tempting to put hardcoded data in here (like builtin users). That - * probably belongs in Nexus initialization instead. Populating data here - * would work for initial setup, but not for rolling out new data (part of a - * new version of Nexus) to an existing deployment. - */ + // It's tempting to put hardcoded data in here (like builtin users). That + // probably belongs in Nexus initialization instead. Populating data here + // would work for initial setup, but not for rolling out new data (part of a + // new version of Nexus) to an existing deployment. } -/** - * Wipe an Omicron database from the remote database - * - * This is dangerous! Use carefully. - */ +/// Wipe an Omicron database from the remote database +/// +/// This is dangerous! Use carefully. pub async fn wipe( client: &tokio_postgres::Client, ) -> Result<(), anyhow::Error> { @@ -674,56 +612,50 @@ pub async fn wipe( client.batch_execute(sql).await.context("wiping Omicron database") } -/** - * Given a listen URL reported by CockroachDB, returns a parsed - * [`PostgresConfigWithUrl`] suitable for connecting to a database backed by a - * [`CockroachInstance`]. - */ +/// Given a listen URL reported by CockroachDB, returns a parsed +/// [`PostgresConfigWithUrl`] suitable for connecting to a database backed by a +/// [`CockroachInstance`]. fn make_pg_config( listen_url: &str, ) -> Result { - /* - * TODO-design This is really irritating. - * - * CockroachDB reports a listen URL that does not specify a database to - * connect to. (This makes sense.) But we want to expose a client URL that - * does specify a database (since `CockroachInstance` essentially hardcodes - * a specific database name (via dbinit.sql and has_omicron_schema())) and - * user. - * - * We can parse the listen URL here into a tokio_postgres::Config, then use - * methods on that struct to modify it as needed. But if we do that, we'd - * have no way to serialize it back into a URL. Recall that - * tokio_postgres::Config does not provide any way to serialize a config as - * a URL string, which is why PostgresConfigWithUrl exists. But the only - * way to construct a PostgresConfigWithUrl is by parsing a URL string, - * since that's the only way to be sure that the URL string matches the - * parsed config. - * - * Another option is to muck with the URL string directly to insert the user - * and database name. That's brittle and error prone. - * - * So we break down and do what we were trying to avoid when we built - * PostgresConfigWithUrl: we'll construct a URL by hand from the parsed - * representation. Then we'll parse that again. This is just to maintain - * the invariant that the parsed representation is known to match the saved - * URL string. - * - * TODO-correctness this might be better using the "url" package, but it's - * also not clear that PostgreSQL URLs conform to those URLs. - */ + // TODO-design This is really irritating. + // + // CockroachDB reports a listen URL that does not specify a database to + // connect to. (This makes sense.) But we want to expose a client URL that + // does specify a database (since `CockroachInstance` essentially hardcodes + // a specific database name (via dbinit.sql and has_omicron_schema())) and + // user. + // + // We can parse the listen URL here into a tokio_postgres::Config, then use + // methods on that struct to modify it as needed. But if we do that, we'd + // have no way to serialize it back into a URL. Recall that + // tokio_postgres::Config does not provide any way to serialize a config as + // a URL string, which is why PostgresConfigWithUrl exists. But the only + // way to construct a PostgresConfigWithUrl is by parsing a URL string, + // since that's the only way to be sure that the URL string matches the + // parsed config. + // + // Another option is to muck with the URL string directly to insert the user + // and database name. That's brittle and error prone. + // + // So we break down and do what we were trying to avoid when we built + // PostgresConfigWithUrl: we'll construct a URL by hand from the parsed + // representation. Then we'll parse that again. This is just to maintain + // the invariant that the parsed representation is known to match the saved + // URL string. + // + // TODO-correctness this might be better using the "url" package, but it's + // also not clear that PostgreSQL URLs conform to those URLs. let pg_config = listen_url.parse::().with_context(|| { format!("parse PostgreSQL config: {:?}", listen_url) })?; - /* - * Our URL construction makes a bunch of assumptions about the PostgreSQL - * config that we were given. Assert these here. (We do not expect any of - * this to change from CockroachDB itself, and if so, this whole thing is - * used by development tools and the test suite, so this failure mode seems - * okay for now.) - */ + // Our URL construction makes a bunch of assumptions about the PostgreSQL + // config that we were given. Assert these here. (We do not expect any of + // this to change from CockroachDB itself, and if so, this whole thing is + // used by development tools and the test suite, so this failure mode seems + // okay for now.) let check_unsupported = vec![ pg_config.get_application_name().map(|_| "application_name"), pg_config.get_connect_timeout().map(|_| "connect_timeout"), @@ -743,16 +675,14 @@ fn make_pg_config( ); } - /* - * As a side note: it's rather absurd that the default configuration enables - * keepalives with a two-hour timeout. In most networking stacks, - * keepalives are disabled by default. If you enable them and don't specify - * the idle time, you get a default two-hour idle time. That's a relic of - * simpler times that makes no sense in most systems today. It's fine to - * leave keepalives off unless configured by the consumer, but if one is - * going to enable them, one ought to at least provide a more useful default - * idle time. - */ + // As a side note: it's rather absurd that the default configuration enables + // keepalives with a two-hour timeout. In most networking stacks, + // keepalives are disabled by default. If you enable them and don't specify + // the idle time, you get a default two-hour idle time. That's a relic of + // simpler times that makes no sense in most systems today. It's fine to + // leave keepalives off unless configured by the consumer, but if one is + // going to enable them, one ought to at least provide a more useful default + // idle time. if !pg_config.get_keepalives() { bail!( "unsupported PostgreSQL listen URL (keepalives disabled): {:?}", @@ -797,13 +727,11 @@ fn make_pg_config( } } -/** - * Returns true if the database that this client is connected to contains - * the Omicron schema - * - * Panics if the attempt to run a query fails for any reason other than the - * schema not existing. (This is intended to be run from the test suite.) - */ +/// Returns true if the database that this client is connected to contains +/// the Omicron schema +/// +/// Panics if the attempt to run a query fails for any reason other than the +/// schema not existing. (This is intended to be run from the test suite.) pub async fn has_omicron_schema(client: &tokio_postgres::Client) -> bool { match client.batch_execute("SELECT id FROM Project").await { Ok(_) => true, @@ -819,28 +747,26 @@ pub async fn has_omicron_schema(client: &tokio_postgres::Client) -> bool { } } -/** - * Wraps a PostgreSQL connection and client as provided by - * `tokio_postgres::Config::connect()` - * - * Typically, callers of [`tokio_postgres::Config::connect()`] get back both a - * Client and a Connection. You must spawn a separate task to `await` on the - * connection in order for any database operations to happen. When the Client - * is dropped, the Connection is gracefully terminated, its Future completes, - * and the task should be cleaned up. This is awkward to use, particularly if - * you care to be sure that the task finished. - * - * This structure combines the Connection and Client. You can create one from a - * [`tokio_postgres::Config`] or from an existing ([`tokio_postgres::Client`], - * [`tokio_postgres::Connection`]) pair. You can use it just like a - * `tokio_postgres::Client`. When finished, you can call `cleanup()` to drop - * the Client and wait for the Connection's task. - * - * If you do not call `cleanup()`, then the underlying `tokio_postgres::Client` - * will be dropped when this object is dropped. If there has been no connection - * error, then the connection will be closed gracefully, but nothing will check - * for any error from the connection. - */ +/// Wraps a PostgreSQL connection and client as provided by +/// `tokio_postgres::Config::connect()` +/// +/// Typically, callers of [`tokio_postgres::Config::connect()`] get back both a +/// Client and a Connection. You must spawn a separate task to `await` on the +/// connection in order for any database operations to happen. When the Client +/// is dropped, the Connection is gracefully terminated, its Future completes, +/// and the task should be cleaned up. This is awkward to use, particularly if +/// you care to be sure that the task finished. +/// +/// This structure combines the Connection and Client. You can create one from a +/// [`tokio_postgres::Config`] or from an existing ([`tokio_postgres::Client`], +/// [`tokio_postgres::Connection`]) pair. You can use it just like a +/// `tokio_postgres::Client`. When finished, you can call `cleanup()` to drop +/// the Client and wait for the Connection's task. +/// +/// If you do not call `cleanup()`, then the underlying `tokio_postgres::Client` +/// will be dropped when this object is dropped. If there has been no connection +/// error, then the connection will be closed gracefully, but nothing will check +/// for any error from the connection. pub struct Client { client: tokio_postgres::Client, conn_task: tokio::task::JoinHandle>, @@ -867,9 +793,7 @@ impl Deref for Client { } impl Client { - /** - * Invokes `config.connect(tls)` and wraps the result in a `Client`. - */ + /// Invokes `config.connect(tls)` and wraps the result in a `Client`. pub async fn connect( config: &tokio_postgres::config::Config, tls: T, @@ -881,19 +805,15 @@ impl Client { Ok(Client::from(config.connect(tls).await?)) } - /** - * Closes the connection, waits for it to be cleaned up gracefully, and - * returns any error status. - */ + /// Closes the connection, waits for it to be cleaned up gracefully, and + /// returns any error status. pub async fn cleanup(self) -> Result<(), tokio_postgres::Error> { drop(self.client); self.conn_task.await.expect("failed to join on connection task") } } -/* - * These are more integration tests than unit tests. - */ +// These are more integration tests than unit tests. #[cfg(test)] mod test { use super::has_omicron_schema; @@ -916,12 +836,10 @@ mod test { builder } - /* - * Tests that we clean up the temporary directory correctly when the starter - * goes out of scope, even if we never started the instance. This is - * important to avoid leaking the directory if there's an error starting the - * instance, for example. - */ + // Tests that we clean up the temporary directory correctly when the starter + // goes out of scope, even if we never started the instance. This is + // important to avoid leaking the directory if there's an error starting the + // instance, for example. #[tokio::test] async fn test_starter_tmpdir() { let builder = new_builder(); @@ -942,20 +860,16 @@ mod test { ); } - /* - * Tests what happens if the "cockroach" command cannot be found. - */ + // Tests what happens if the "cockroach" command cannot be found. #[tokio::test] async fn test_bad_cmd() { let builder = CockroachStarterBuilder::new_with_cmd("/nonexistent"); let _ = test_database_start_failure(builder).await; } - /* - * Tests what happens if the "cockroach" command exits before writing the - * listening-url file. This looks the same to the caller (us), but - * internally requires different code paths. - */ + // Tests what happens if the "cockroach" command exits before writing the + // listening-url file. This looks the same to the caller (us), but + // internally requires different code paths. #[tokio::test] async fn test_cmd_fails() { let mut builder = new_builder(); @@ -981,12 +895,10 @@ mod test { .expect("failed to remove cockroachdb temp directory"); } - /* - * Helper function for testing cases where the database fails to start. - * Returns the temporary directory used by the failed attempt so that the - * caller can decide whether to check if it was cleaned up or not. The - * expected behavior depends on the failure mode. - */ + // Helper function for testing cases where the database fails to start. + // Returns the temporary directory used by the failed attempt so that the + // caller can decide whether to check if it was cleaned up or not. The + // expected behavior depends on the failure mode. async fn test_database_start_failure( builder: CockroachStarterBuilder, ) -> PathBuf { @@ -999,13 +911,11 @@ mod test { temp_dir } - /* - * Tests when CockroachDB hangs on startup by setting the start timeout - * absurdly short. This unfortunately doesn't cover all cases. By choosing - * a zero timeout, we're not letting the database get very far in its - * startup. But we at least ensure that the test suite does not hang or - * timeout at some very long value. - */ + // Tests when CockroachDB hangs on startup by setting the start timeout + // absurdly short. This unfortunately doesn't cover all cases. By choosing + // a zero timeout, we're not letting the database get very far in its + // startup. But we at least ensure that the test suite does not hang or + // timeout at some very long value. #[tokio::test] async fn test_database_start_hang() { let mut builder = new_builder(); @@ -1018,7 +928,7 @@ mod test { eprintln!("(expected) error starting database: {:?}", error); let pid = match error { CockroachStartError::TimedOut { pid, time_waited } => { - /* We ought to fire a 0-second timeout within 5 seconds. */ + // We ought to fire a 0-second timeout within 5 seconds. assert!(time_waited < Duration::from_secs(5)); pid } @@ -1027,21 +937,19 @@ mod test { other_error ), }; - /* The child process should still be running. */ + // The child process should still be running. assert!(process_running(pid)); - /* The temporary directory should still exist. */ + // The temporary directory should still exist. assert!(fs::metadata(&directory) .await .expect("temporary directory is missing") .is_dir()); - /* Kill the child process (to clean up after ourselves). */ + // Kill the child process (to clean up after ourselves). assert_eq!(0, unsafe { libc::kill(pid as libc::pid_t, libc::SIGKILL) }); - /* - * Wait for the process to exit so that we can reliably clean up the - * temporary directory. We don't have a great way to avoid polling - * here. - */ + // Wait for the process to exit so that we can reliably clean up the + // temporary directory. We don't have a great way to avoid polling + // here. poll::wait_for_condition::<(), std::convert::Infallible, _, _>( || async { if process_running(pid) { @@ -1064,15 +972,13 @@ mod test { }); assert!(!process_running(pid)); - /* - * The temporary directory is normally cleaned up automatically. In - * this case, it's deliberately left around. We need to clean it up - * here. Now, the directory is created with tempfile::TempDir, which - * puts it under std::env::temp_dir(). We assert that here as an - * ultra-conservative safety. We don't want to accidentally try to blow - * away some large directory tree if somebody modifies the code to use - * some other directory for the temporary directory. - */ + // The temporary directory is normally cleaned up automatically. In + // this case, it's deliberately left around. We need to clean it up + // here. Now, the directory is created with tempfile::TempDir, which + // puts it under std::env::temp_dir(). We assert that here as an + // ultra-conservative safety. We don't want to accidentally try to blow + // away some large directory tree if somebody modifies the code to use + // some other directory for the temporary directory. if !directory.starts_with(env::temp_dir()) { panic!( "refusing to remove temporary directory not under @@ -1090,29 +996,21 @@ mod test { }); } - /* - * Test the happy path using the default store directory. - */ + // Test the happy path using the default store directory. #[tokio::test] async fn test_setup_database_default_dir() { let starter = new_builder().build().unwrap(); - /* - * In this configuration, the database directory should exist within the - * starter's temporary directory. - */ + // In this configuration, the database directory should exist within the + // starter's temporary directory. let data_dir = starter.temp_dir().join("data"); - /* - * This common function will verify that the entire temporary directory - * is cleaned up. We do not need to check that again here. - */ + // This common function will verify that the entire temporary directory + // is cleaned up. We do not need to check that again here. test_setup_database(starter, &data_dir, true).await; } - /* - * Test the happy path using an overridden store directory. - */ + // Test the happy path using an overridden store directory. #[tokio::test] async fn test_setup_database_overridden_dir() { let extra_temp_dir = @@ -1120,22 +1018,18 @@ mod test { let data_dir = extra_temp_dir.path().join("custom_data"); let starter = new_builder().store_dir(&data_dir).build().unwrap(); - /* - * This common function will verify that the entire temporary directory - * is cleaned up. We do not need to check that again here. - */ + // This common function will verify that the entire temporary directory + // is cleaned up. We do not need to check that again here. test_setup_database(starter, &data_dir, false).await; - /* - * At this point, our extra temporary directory should still exist. - * This is important -- the library should not clean up a data directory - * that was specified by the user. - */ + // At this point, our extra temporary directory should still exist. + // This is important -- the library should not clean up a data directory + // that was specified by the user. assert!(fs::metadata(&data_dir) .await .expect("CockroachDB data directory is missing") .is_dir()); - /* Clean it up. */ + // Clean it up. let extra_temp_dir_path = extra_temp_dir.path().to_owned(); extra_temp_dir .close() @@ -1150,16 +1044,14 @@ mod test { ); } - /* - * Test the happy path: start the database, run a query against the URL we - * found, and then shut it down cleanly. - */ + // Test the happy path: start the database, run a query against the URL we + // found, and then shut it down cleanly. async fn test_setup_database>( starter: CockroachStarter, data_dir: P, test_populate: bool, ) { - /* Start the database. */ + // Start the database. eprintln!("will run: {}", starter.cmdline()); let mut database = starter.start().await.expect("failed to start database"); @@ -1171,17 +1063,15 @@ mod test { database.listen_url() ); - /* - * The database process should be running and the database's store - * directory should exist. - */ + // The database process should be running and the database's store + // directory should exist. assert!(process_running(pid)); assert!(fs::metadata(data_dir.as_ref()) .await .expect("CockroachDB data directory is missing") .is_dir()); - /* Try to connect to it and run a query. */ + // Try to connect to it and run a query. eprintln!("connecting to database"); let client = database .connect() @@ -1195,27 +1085,23 @@ mod test { assert_eq!(row.len(), 1); assert_eq!(row.get::<'_, _, i64>(0), 12345); - /* - * Run some tests using populate() and wipe(). - */ + // Run some tests using populate() and wipe(). if test_populate { assert!(!has_omicron_schema(&client).await); eprintln!("populating database (1)"); database.populate().await.expect("populating database (1)"); assert!(has_omicron_schema(&client).await); - /* - * populate() fails if the database is already populated. We don't - * want to accidentally destroy data by wiping it first - * automatically. - */ + // populate() fails if the database is already populated. We don't + // want to accidentally destroy data by wiping it first + // automatically. database.populate().await.expect_err("populated database twice"); eprintln!("wiping database (1)"); database.wipe().await.expect("wiping database (1)"); assert!(!has_omicron_schema(&client).await); - /* On the other hand, wipe() is idempotent. */ + // On the other hand, wipe() is idempotent. database.wipe().await.expect("wiping database (2)"); assert!(!has_omicron_schema(&client).await); - /* populate() should work again after a wipe(). */ + // populate() should work again after a wipe(). eprintln!("populating database (2)"); database.populate().await.expect("populating database (2)"); assert!(has_omicron_schema(&client).await); @@ -1224,13 +1110,11 @@ mod test { client.cleanup().await.expect("connection unexpectedly failed"); database.cleanup().await.expect("failed to clean up database"); - /* Check that the database process is no longer running. */ + // Check that the database process is no longer running. assert!(!process_running(pid)); - /* - * Check that the temporary directory used by the starter has been - * cleaned up. - */ + // Check that the temporary directory used by the starter has been + // cleaned up. assert_eq!( libc::ENOENT, fs::metadata(&temp_dir) @@ -1243,10 +1127,8 @@ mod test { eprintln!("cleaned up database and temporary directory"); } - /* - * Test that you can run the database twice concurrently (and have different - * databases!). - */ + // Test that you can run the database twice concurrently (and have different + // databases!). #[tokio::test] async fn test_database_concurrent() { let mut db1 = new_builder() @@ -1289,7 +1171,7 @@ mod test { db2.cleanup().await.expect("failed to clean up second database"); } - /* Success case for make_pg_config() */ + // Success case for make_pg_config() #[test] fn test_make_pg_config_ok() { let url = "postgresql://root@127.0.0.1:45913?sslmode=disable"; @@ -1303,12 +1185,12 @@ mod test { #[test] fn test_make_pg_config_fail() { - /* failure to parse initial listen URL */ + // failure to parse initial listen URL let error = make_pg_config("").unwrap_err().to_string(); eprintln!("found error: {}", error); assert!(error.contains("unsupported PostgreSQL listen URL")); - /* unexpected contents in initial listen URL */ + // unexpected contents in initial listen URL let error = make_pg_config( "postgresql://root@127.0.0.1:45913/foobar?sslmode=disable", ) diff --git a/test-utils/src/dev/mod.rs b/test-utils/src/dev/mod.rs index 3a08caad44..182c1abe1b 100644 --- a/test-utils/src/dev/mod.rs +++ b/test-utils/src/dev/mod.rs @@ -2,10 +2,8 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Facilities intended for development tools and the test suite. These should - * not be used in production code. - */ +//! Facilities intended for development tools and the test suite. These should +//! not be used in production code. pub mod clickhouse; pub mod db; @@ -58,13 +56,11 @@ fn copy_dir( Ok(()) } -/** - * Set up a [`dropshot::test_util::LogContext`] appropriate for a test named - * `test_name` - * - * This function is currently only used by unit tests. (We want the dead code - * warning if it's removed from unit tests, but not during a normal build.) - */ +/// Set up a [`dropshot::test_util::LogContext`] appropriate for a test named +/// `test_name` +/// +/// This function is currently only used by unit tests. (We want the dead code +/// warning if it's removed from unit tests, but not during a normal build.) pub fn test_setup_log(test_name: &str) -> LogContext { let log_config = ConfigLogging::File { level: ConfigLoggingLevel::Debug, @@ -157,13 +153,9 @@ async fn setup_database( database } -/** - * Returns whether the given process is currently running - */ +/// Returns whether the given process is currently running pub fn process_running(pid: u32) -> bool { - /* - * It should be okay to invoke this syscall with these arguments. This - * only checks whether the process is running. - */ + // It should be okay to invoke this syscall with these arguments. This + // only checks whether the process is running. 0 == (unsafe { libc::kill(pid as libc::pid_t, 0) }) } diff --git a/test-utils/src/dev/poll.rs b/test-utils/src/dev/poll.rs index bdfde514ac..a77ad4d1dc 100644 --- a/test-utils/src/dev/poll.rs +++ b/test-utils/src/dev/poll.rs @@ -2,71 +2,65 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Quick-and-dirty polling within a test suite - */ +//! Quick-and-dirty polling within a test suite use std::future::Future; use std::time::Duration; use std::time::Instant; use thiserror::Error; -/** - * Result of one attempt to check a condition (see [`wait_for_condition()`]) - */ +/// Result of one attempt to check a condition (see [`wait_for_condition()`]) #[derive(Debug, Error)] pub enum CondCheckError { - /** the condition we're waiting for is not true */ + /// the condition we're waiting for is not true #[error("poll condition not yet ready")] NotYet, #[error("non-retryable error while polling on condition")] Failed(#[from] E), } -/** Result of [`wait_for_condition()`] */ +/// Result of [`wait_for_condition()`] #[derive(Debug, Error)] pub enum Error { - /** operation timed out before succeeding or failing permanently */ + /// operation timed out before succeeding or failing permanently #[error("timed out after {0:?}")] TimedOut(Duration), #[error("non-retryable error while polling on condition")] PermanentError(E), } -/** - * Poll the given closure until it succeeds, returns a permanent error, or - * a given time has expired - * - * This is intended in the test suite and developer tools for situations where - * you've taken some action and want to wait for its effects to be observable - * _and_ you have no way to directly wait for the observable event. **This - * approach is generally not applicable for production code. See crate::backoff - * for that.** This is similar to the exponential backoff facility provided by - * the "backoff" crate, but this is a non-randomized, constant-interval retry, - * so it's not really a "backoff". - * - * Note that `poll_max` is not a bound on how long this function can take. - * Rather, it's the time beyond which this function will stop trying to check - * `cond`. If `cond` takes an arbitrary amount of time, this function will too. - * - * This function is intended for those situations where it's tempting to sleep - * for "long enough" (often people pick 1, 5, or 10 seconds) and then either - * assume the thing happened or check the condition at that point. We must - * remember Clulow's lament: - * - * Timeouts, timeouts: always wrong! - * Some too short and some too long. - * - * In fact, in trying to balance shorter test execution time against spurious - * timeouts, people often choose a timeout value that is both too long _and_ too - * short, resulting in both long test runs and spurious failures. A better - * pattern is provided here: check the condition relatively frequently with a - * much longer maximum timeout -- long enough that timeout expiration - * essentially reflects incorrect behavior. - * - * But again: this mechanism is a last resort when no mechanism exists to - * wait directly for the condition. - */ +/// Poll the given closure until it succeeds, returns a permanent error, or +/// a given time has expired +/// +/// This is intended in the test suite and developer tools for situations where +/// you've taken some action and want to wait for its effects to be observable +/// _and_ you have no way to directly wait for the observable event. **This +/// approach is generally not applicable for production code. See crate::backoff +/// for that.** This is similar to the exponential backoff facility provided by +/// the "backoff" crate, but this is a non-randomized, constant-interval retry, +/// so it's not really a "backoff". +/// +/// Note that `poll_max` is not a bound on how long this function can take. +/// Rather, it's the time beyond which this function will stop trying to check +/// `cond`. If `cond` takes an arbitrary amount of time, this function will too. +/// +/// This function is intended for those situations where it's tempting to sleep +/// for "long enough" (often people pick 1, 5, or 10 seconds) and then either +/// assume the thing happened or check the condition at that point. We must +/// remember Clulow's lament: +/// +/// Timeouts, timeouts: always wrong! +/// Some too short and some too long. +/// +/// In fact, in trying to balance shorter test execution time against spurious +/// timeouts, people often choose a timeout value that is both too long _and_ too +/// short, resulting in both long test runs and spurious failures. A better +/// pattern is provided here: check the condition relatively frequently with a +/// much longer maximum timeout -- long enough that timeout expiration +/// essentially reflects incorrect behavior. +/// +/// But again: this mechanism is a last resort when no mechanism exists to +/// wait directly for the condition. pub async fn wait_for_condition( mut cond: Func, poll_interval: &Duration, diff --git a/test-utils/src/dev/test_cmds.rs b/test-utils/src/dev/test_cmds.rs index 3421d9dffc..e5d78c4841 100644 --- a/test-utils/src/dev/test_cmds.rs +++ b/test-utils/src/dev/test_cmds.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Functions used for automated testing of command-line programs - */ +//! Functions used for automated testing of command-line programs use std::env::temp_dir; use std::fs; @@ -19,27 +17,21 @@ use subprocess::ExitStatus; use subprocess::NullFile; use subprocess::Redirection; -/* - * Standard exit codes - */ +// Standard exit codes pub const EXIT_SUCCESS: u32 = libc::EXIT_SUCCESS as u32; pub const EXIT_FAILURE: u32 = libc::EXIT_FAILURE as u32; pub const EXIT_USAGE: u32 = 2; -/** - * maximum time to wait for any command - * - * This is important because a bug might actually cause this test to start one - * of the servers and run it indefinitely. - */ +/// maximum time to wait for any command +/// +/// This is important because a bug might actually cause this test to start one +/// of the servers and run it indefinitely. const TIMEOUT: Duration = Duration::from_millis(60000); pub fn path_to_executable(cmd_name: &str) -> PathBuf { let mut rv = PathBuf::from(cmd_name); - /* - * Drop the ".exe" extension on Windows. Otherwise, this appears in stderr - * output, which then differs across platforms. - */ + // Drop the ".exe" extension on Windows. Otherwise, this appears in stderr + // output, which then differs across platforms. rv.set_extension(""); rv } @@ -56,13 +48,11 @@ pub fn assert_exit_code(exit_status: ExitStatus, code: u32) { } } -/** - * Run the given command to completion or up to a hardcoded timeout, whichever - * is shorter. The caller provides a `subprocess::Exec` object that's already - * had its program, arguments, environment, etc. configured, but hasn't been - * started. Stdin will be empty, and both stdout and stderr will be buffered to - * disk and returned as strings. - */ +/// Run the given command to completion or up to a hardcoded timeout, whichever +/// is shorter. The caller provides a `subprocess::Exec` object that's already +/// had its program, arguments, environment, etc. configured, but hasn't been +/// started. Stdin will be empty, and both stdout and stderr will be buffered to +/// disk and returned as strings. pub fn run_command(exec: Exec) -> (ExitStatus, String, String) { let cmdline = exec.to_cmdline_lossy(); let timeout = TIMEOUT; @@ -99,9 +89,7 @@ pub fn run_command(exec: Exec) -> (ExitStatus, String, String) { (exit_status, stdout_text, stderr_text) } -/** - * Create a new temporary file. - */ +/// Create a new temporary file. fn temp_file_create(label: &str) -> (PathBuf, fs::File) { let file_path = temp_file_path(label); let file = fs::OpenOptions::new() @@ -114,9 +102,7 @@ fn temp_file_create(label: &str) -> (PathBuf, fs::File) { static FILE_COUNTER: AtomicU32 = AtomicU32::new(0); -/** - * Create a new temporary file name. - */ +/// Create a new temporary file name. pub fn temp_file_path(label: &str) -> PathBuf { let mut file_path = temp_dir(); let file_name = format!( @@ -129,10 +115,8 @@ pub fn temp_file_path(label: &str) -> PathBuf { file_path } -/** - * Returns the OS-specific error message for the case where a file was not - * found. - */ +/// Returns the OS-specific error message for the case where a file was not +/// found. pub fn error_for_enoent() -> String { io::Error::from_raw_os_error(libc::ENOENT).to_string() } diff --git a/test-utils/tests/test_omicron_dev.rs b/test-utils/tests/test_omicron_dev.rs index 73e693badc..7e069244de 100644 --- a/test-utils/tests/test_omicron_dev.rs +++ b/test-utils/tests/test_omicron_dev.rs @@ -2,9 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -/*! - * Smoke tests for the omicron-dev command-line tool - */ +//! Smoke tests for the omicron-dev command-line tool use expectorate::assert_contents; use omicron_test_utils::dev::db::has_omicron_schema; @@ -20,20 +18,18 @@ use subprocess::Exec; use subprocess::ExitStatus; use subprocess::Redirection; -/** name of the "omicron-dev" executable */ +/// name of the "omicron-dev" executable const CMD_OMICRON_DEV: &str = env!("CARGO_BIN_EXE_omicron-dev"); -/** timeout used for various things that should be pretty quick */ +/// timeout used for various things that should be pretty quick const TIMEOUT: Duration = Duration::from_secs(15); fn path_to_omicron_dev() -> PathBuf { path_to_executable(CMD_OMICRON_DEV) } -/** - * Encapsulates the information we need from a running `omicron-dev db-run` - * command. - */ +/// Encapsulates the information we need from a running `omicron-dev db-run` +/// command. #[derive(Debug)] struct DbRun { subproc: subprocess::Popen, @@ -44,12 +40,10 @@ struct DbRun { temp_dir: PathBuf, } -/** - * Starts the "omicron-dev db-run" command and runs it for long enough to parse - * the child pid, listen URL, and temporary directory. Returns these, along - * with a handle to the child process. - * TODO-robustness It would be great to put a timeout on this. - */ +/// Starts the "omicron-dev db-run" command and runs it for long enough to parse +/// the child pid, listen URL, and temporary directory. Returns these, along +/// with a handle to the child process. +/// TODO-robustness It would be great to put a timeout on this. fn run_db_run(exec: Exec, wait_for_populate: bool) -> DbRun { let cmdline = exec.to_cmdline_lossy(); eprintln!("will run: {}", cmdline); @@ -130,13 +124,11 @@ fn run_db_run(exec: Exec, wait_for_populate: bool) -> DbRun { } } -/** - * Waits for the subprocess to exit and returns status information - * - * This assumes the caller has arranged for the processes to terminate. This - * function verifies that both the omicron-dev and CockroachDB processes are - * gone and that the temporary directory has been cleaned up. - */ +/// Waits for the subprocess to exit and returns status information +/// +/// This assumes the caller has arranged for the processes to terminate. This +/// function verifies that both the omicron-dev and CockroachDB processes are +/// gone and that the temporary directory has been cleaned up. fn verify_graceful_exit(mut dbrun: DbRun) -> subprocess::ExitStatus { let wait_result = dbrun .subproc @@ -159,39 +151,35 @@ fn verify_graceful_exit(mut dbrun: DbRun) -> subprocess::ExitStatus { wait_result } -/* - * Exercises the normal use case of `omicron-dev db-run`: the database starts - * up, we can connect to it and query it, then we simulate the user typing ^C at - * the shell, and then it cleans up its temporary directory. - */ +// Exercises the normal use case of `omicron-dev db-run`: the database starts +// up, we can connect to it and query it, then we simulate the user typing ^C at +// the shell, and then it cleans up its temporary directory. #[tokio::test] async fn test_db_run() { let cmd_path = path_to_omicron_dev(); - /* - * Rather than invoke the command directly, we'll use the shell to run the - * command in a subshell with monitor mode active. This puts the child - * process into a separate process group, which allows us to send the whole - * group SIGINT, which simulates what would happen if this were run - * interactively from the shell (which is what we want to test). Maybe - * there's a better way to do this. (Ideally, we would fork, use - * setpgid(2) in the child, then exec our command. The standard library - * does not provide facilities to do this. Maybe we could use the `libc` - * crate to do this?) - * - * Note that it's not a good test to just send SIGINT to the CockroachDB - * process. In the real-world case we're trying to test, omicron-dev gets - * SIGINT as well. If it doesn't handle it explicitly, the process will be - * terminated and temporary directories will be leaked. However, the test - * would pass because in the test case omicron-dev would never have gotten - * the SIGINT. - * - * We also redirect stderr to stdout just so that it doesn't get dumped to - * the user's terminal during regular `cargo test` runs. - * - * Finally, we set listen-port=0 to avoid conflicting with concurrent - * invocations. - */ + // Rather than invoke the command directly, we'll use the shell to run the + // command in a subshell with monitor mode active. This puts the child + // process into a separate process group, which allows us to send the whole + // group SIGINT, which simulates what would happen if this were run + // interactively from the shell (which is what we want to test). Maybe + // there's a better way to do this. (Ideally, we would fork, use + // setpgid(2) in the child, then exec our command. The standard library + // does not provide facilities to do this. Maybe we could use the `libc` + // crate to do this?) + // + // Note that it's not a good test to just send SIGINT to the CockroachDB + // process. In the real-world case we're trying to test, omicron-dev gets + // SIGINT as well. If it doesn't handle it explicitly, the process will be + // terminated and temporary directories will be leaked. However, the test + // would pass because in the test case omicron-dev would never have gotten + // the SIGINT. + // + // We also redirect stderr to stdout just so that it doesn't get dumped to + // the user's terminal during regular `cargo test` runs. + // + // Finally, we set listen-port=0 to avoid conflicting with concurrent + // invocations. let cmdstr = format!( "( set -o monitor; {} db-run --listen-port 0)", cmd_path.display() @@ -208,10 +196,8 @@ async fn test_db_run() { assert!(has_omicron_schema(&client).await); - /* - * Now run db-populate. It should fail because the database is already - * populated. - */ + // Now run db-populate. It should fail because the database is already + // populated. eprintln!("running db-populate"); let populate_result = Exec::cmd(&cmd_path) .arg("db-populate") @@ -230,7 +216,7 @@ async fn test_db_run() { .contains("database \"omicron\" already exists")); assert!(has_omicron_schema(&client).await); - /* Try again, but with the --wipe flag. */ + // Try again, but with the --wipe flag. eprintln!("running db-populate --wipe"); let populate_result = Exec::cmd(&cmd_path) .arg("db-populate") @@ -242,7 +228,7 @@ async fn test_db_run() { assert!(matches!(populate_result.exit_status, ExitStatus::Exited(0))); assert!(has_omicron_schema(&client).await); - /* Now run db-wipe. This should work. */ + // Now run db-wipe. This should work. eprintln!("running db-wipe"); let wipe_result = Exec::cmd(&cmd_path) .arg("db-wipe") @@ -253,9 +239,7 @@ async fn test_db_run() { assert!(matches!(wipe_result.exit_status, ExitStatus::Exited(0))); assert!(!has_omicron_schema(&client).await); - /* - * The rest of the populate()/wipe() behavior is tested elsewhere. - */ + // The rest of the populate()/wipe() behavior is tested elsewhere. drop(client); conn_task @@ -264,18 +248,14 @@ async fn test_db_run() { .expect("connection failed with an error"); eprintln!("cleaned up connection"); - /* - * Figure out what process group our child processes are in. (That won't be - * the child's pid because the immediate shell will be in our process group, - * and its the omicron-dev command that's the process group leader.) - */ + // Figure out what process group our child processes are in. (That won't be + // the child's pid because the immediate shell will be in our process group, + // and its the omicron-dev command that's the process group leader.) let pgid = unsafe { libc::getpgid(dbrun.db_pid as libc::pid_t) }; assert_ne!(pgid, -1); - /* - * Send SIGINT to that process group. This simulates an interactive session - * where the user hits ^C. Make sure everything is cleaned up gracefully. - */ + // Send SIGINT to that process group. This simulates an interactive session + // where the user hits ^C. Make sure everything is cleaned up gracefully. eprintln!("sending SIGINT to process group {}", pgid); assert_eq!(0, unsafe { libc::kill(-pgid, libc::SIGINT) }); @@ -284,30 +264,24 @@ async fn test_db_run() { assert!(matches!(wait, subprocess::ExitStatus::Exited(0))); } -/* - * Exercises the unusual case of `omicron-dev db-run` where the database shuts - * down unexpectedly. - */ +// Exercises the unusual case of `omicron-dev db-run` where the database shuts +// down unexpectedly. #[tokio::test] async fn test_db_killed() { - /* - * Redirect stderr to stdout just so that it doesn't get dumped to the - * user's terminal during regular `cargo test` runs. - */ + // Redirect stderr to stdout just so that it doesn't get dumped to the + // user's terminal during regular `cargo test` runs. let exec = Exec::cmd(&path_to_omicron_dev()) .arg("db-run") .arg("--listen-port=0") .stderr(Redirection::Merge); - /* - * Although it doesn't seem necessary, we wait for "db-run" to finish - * populating the database before we kill CockroachDB. The main reason is - * that we're trying to verify that if CockroachDB exits under normal - * conditions, then db-run notices. If we don't wait for populate() to - * finish, then we might fail during populate(), and that's a different - * failure path. In particular, that path does _not_ necessarily wait for - * CockroachDB to exit. It arguably should, but this is considerably more - * of an edge case than we're testing here. - */ + // Although it doesn't seem necessary, we wait for "db-run" to finish + // populating the database before we kill CockroachDB. The main reason is + // that we're trying to verify that if CockroachDB exits under normal + // conditions, then db-run notices. If we don't wait for populate() to + // finish, then we might fail during populate(), and that's a different + // failure path. In particular, that path does _not_ necessarily wait for + // CockroachDB to exit. It arguably should, but this is considerably more + // of an edge case than we're testing here. let dbrun = run_db_run(exec, true); assert_eq!(0, unsafe { libc::kill(dbrun.db_pid as libc::pid_t, libc::SIGKILL)