diff --git a/.gitignore b/.gitignore index ae432828..88b8a53e 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,9 @@ Cargo.lock # MSVC Windows builds of rustc generate these, which store debugging information *.pdb +# Some rustc backtrace file +rustc-ice-* + /frontend/dist /.cargo/.build /.cargo/tmp diff --git a/CHANGELOG.md b/CHANGELOG.md index 4aff2475..941aeef4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,3 +13,5 @@ Bob Management GUI changelog - Logger Initialization (#14) - Login Page, backend (#16) - Login Page, frontend (#17) +- Home page, backend (#18) +- Home page, frontend (#22) diff --git a/api/openapi.yaml b/api/openapi.yaml index baeb1a55..ebcc52a0 100644 --- a/api/openapi.yaml +++ b/api/openapi.yaml @@ -5,13 +5,31 @@ info: contact: name: Romanov Simeon ArchArcheoss@proton.me license: - name: '' + name: "" version: 0.0.0 paths: + /api/v1/disks/count: + get: + tags: + - services::api + summary: Returns count of Physical Disks per status + description: Returns count of Physical Disks per status + operationId: get_disks_count + responses: + "200": + description: Returns a list with count of physical disks per status + content: + application/json: + schema: + $ref: "#/components/schemas/DiskCount" + "401": + description: Unauthorized + security: + - api_key: [] /api/v1/login: post: tags: - - services::auth + - services::auth summary: Login to a BOB cluster description: | Login to a BOB cluster @@ -29,77 +47,123 @@ paths: The client couldn't authorize on the host operationId: login parameters: - - name: hostname - in: path - description: Address to connect to - required: true - schema: - $ref: '#/components/schemas/Hostname' - - name: credentials - in: path - description: '[Optional] Credentials used for BOB authentication' - required: true - schema: - allOf: - - $ref: '#/components/schemas/Credentials' - nullable: true + - name: hostname + in: path + description: Address to connect to + required: true + schema: + $ref: "#/components/schemas/Hostname" + - name: credentials + in: path + description: "[Optional] Credentials used for BOB authentication" + required: true + schema: + allOf: + - $ref: "#/components/schemas/Credentials" + nullable: true requestBody: - description: '' + description: "" content: application/json: schema: - $ref: '#/components/schemas/BobConnectionData' + $ref: "#/components/schemas/BobConnectionData" required: true responses: - '200': + "200": description: Successful authorization - '400': + "400": description: Bad Hostname - '401': + "401": description: Bad Credentials - '404': + "404": description: Can't reach specified hostname /api/v1/logout: post: tags: - - services::auth + - services::auth operationId: logout responses: - '200': + "200": description: Logged out - /api/v1/root: + /api/v1/nodes/count: + get: + tags: + - services::api + summary: Get Nodes count per Status + description: Get Nodes count per Status + operationId: get_nodes_count + responses: + "200": + description: Node count list per status + content: + application/json: + schema: + $ref: "#/components/schemas/NodeCount" + "401": + description: Unauthorized + security: + - api_key: [] + /api/v1/nodes/rps: + get: + tags: + - services::api + summary: Returns Total RPS on cluster + description: Returns Total RPS on cluster + operationId: get_rps + responses: + "200": + description: RPS list per operation on all nodes + content: + application/json: + schema: + $ref: "#/components/schemas/RPS" + "401": + description: Unauthorized + security: + - api_key: [] + /api/v1/nodes/space: get: tags: - - crate - operationId: root + - services::api + summary: Return inforamtion about space on cluster + description: Return inforamtion about space on cluster + operationId: get_space responses: - '200': - description: Hello Bob! + "200": + description: Cluster Space Information + content: + application/json: + schema: + $ref: "#/components/schemas/SpaceInfo" + "401": + description: Unauthorized + security: + - api_key: [] components: schemas: BobConnectionData: type: object description: Data needed to connect to a BOB cluster required: - - hostname + - hostname properties: credentials: allOf: - - $ref: '#/components/schemas/Credentials' + - $ref: "#/components/schemas/Credentials" nullable: true hostname: - $ref: '#/components/schemas/Hostname' + $ref: "#/components/schemas/Hostname" example: credentials: login: archeoss - password: '12345' + password: "12345" hostname: 0.0.0.0:7000 Credentials: type: object description: Optional auth credentials for a BOB cluster required: - - login - - password + - login + - password properties: login: type: string @@ -109,9 +173,668 @@ components: description: Password used during auth example: login: archeoss - password: '12345' + password: "12345" + DiskCount: + type: object + description: Disk count by their status + required: + - good + - bad + - offline + properties: + bad: + type: integer + format: int64 + minimum: 0 + good: + type: integer + format: int64 + minimum: 0 + offline: + type: integer + format: int64 + minimum: 0 + example: + bad: 0 + good: 0 + offline: 0 + DiskProblem: + type: string + description: Defines kind of problem on disk + enum: + - freeSpaceRunningOut + DiskStatus: + oneOf: + - type: object + required: + - status + properties: + status: + type: string + enum: + - good + - type: object + required: + - status + - problems + properties: + problems: + type: array + items: + $ref: "#/components/schemas/DiskProblem" + status: + type: string + enum: + - bad + - type: object + required: + - status + properties: + status: + type: string + enum: + - offline + description: |- + Defines disk status + + Variant - Disk Status + Content - List of problems on disk. 'null' if status != 'bad' + discriminator: + propertyName: status + DiskStatusName: + type: string + description: Defines disk status names + enum: + - good + - bad + - offline Hostname: - $ref: '#/components/schemas/Uri' + type: string + MetricsEntryModel: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + MetricsSnapshotModel: + type: object + required: + - metrics + properties: + metrics: + type: object + additionalProperties: + $ref: "#/components/schemas/MetricsEntryModel" + NodeConfiguration: + type: object + properties: + blob_file_name_prefix: + type: string + nullable: true + root_dir_name: + type: string + nullable: true + NodeCount: + type: object + description: Node count by their status + required: + - good + - bad + - offline + properties: + bad: + type: integer + format: int64 + minimum: 0 + good: + type: integer + format: int64 + minimum: 0 + offline: + type: integer + format: int64 + minimum: 0 + example: + bad: 0 + good: 0 + offline: 0 + NodeProblem: + type: string + description: Defines kind of problem on Node + enum: + - aliensExists + - corruptedExists + - freeSpaceRunningOut + - virtualMemLargerThanRAM + - highCPULoad + NodeStatus: + oneOf: + - type: object + required: + - status + properties: + status: + type: string + enum: + - good + - type: object + required: + - status + - problems + properties: + problems: + type: array + items: + $ref: "#/components/schemas/NodeProblem" + status: + type: string + enum: + - bad + - type: object + required: + - status + properties: + status: + type: string + enum: + - offline + description: |- + Defines status of node + + Variants - Node status + + Content - List of problems on node. 'null' if status != 'bad' + discriminator: + propertyName: status + NodeStatusName: + type: string + description: Defines node status names + enum: + - good + - bad + - offline + Operation: + type: string + description: Types of operations on BOB cluster + enum: + - put + - get + - exist + - delete + RPS: + type: object + description: Requests per second by operation + required: + - put + - get + - exist + - delete + properties: + delete: + type: integer + format: int64 + minimum: 0 + exist: + type: integer + format: int64 + minimum: 0 + get: + type: integer + format: int64 + minimum: 0 + put: + type: integer + format: int64 + minimum: 0 + example: + delete: 0 + exist: 0 + get: 0 + put: 0 + RawMetricEntry: + type: string + enum: + - cluster_grinder.get_count_rate + - cluster_grinder.put_count_rate + - cluster_grinder.exist_count_rate + - cluster_grinder.delete_count_rate + - pearl.exist_count_rate + - pearl.get_count_rate + - pearl.put_count_rate + - pearl.delete_count_rate + - backend.alien_count + - backend.corrupted_blob_count + - hardware.bob_virtual_ram + - hardware.total_ram + - hardware.used_ram + - hardware.bob_cpu_load + - hardware.free_space + - hardware.total_space + - hardware.descr_amount + ReplicaProblem: + type: string + description: Reasons why Replica is offline + enum: + - nodeUnavailable + - diskUnavailable + ReplicaStatus: + oneOf: + - type: object + required: + - status + properties: + status: + type: string + enum: + - good + - type: object + required: + - status + - problems + properties: + problems: + type: array + items: + $ref: "#/components/schemas/ReplicaProblem" + status: + type: string + enum: + - offline + description: |- + Replica status. It's either good or offline with the reasons why it is offline + + Variants - Replica status + + Content - List of problems on replica. 'null' if status != 'offline' + discriminator: + propertyName: status + SpaceInfo: + type: object + description: Disk space information in bytes + required: + - total_disk + - free_disk + - used_disk + - occupied_disk + properties: + free_disk: + type: integer + format: int64 + description: The amount of free disk space + minimum: 0 + occupied_disk: + type: integer + format: int64 + description: Disk space occupied only by BOB. occupied_disk should be lesser than used_disk + minimum: 0 + total_disk: + type: integer + format: int64 + description: Total disk space amount + minimum: 0 + used_disk: + type: integer + format: int64 + description: Used disk space amount + minimum: 0 + TypedMetrics: + type: object + description: Raw metrics information + required: + - cluster_grinder.get_count_rate + - cluster_grinder.put_count_rate + - cluster_grinder.exist_count_rate + - cluster_grinder.delete_count_rate + - pearl.exist_count_rate + - pearl.get_count_rate + - pearl.put_count_rate + - pearl.delete_count_rate + - backend.alien_count + - backend.corrupted_blob_count + - hardware.bob_virtual_ram + - hardware.total_ram + - hardware.used_ram + - hardware.bob_cpu_load + - hardware.free_space + - hardware.total_space + - hardware.descr_amount + properties: + backend.alien_count: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + backend.corrupted_blob_count: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + cluster_grinder.delete_count_rate: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + cluster_grinder.exist_count_rate: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + cluster_grinder.get_count_rate: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + cluster_grinder.put_count_rate: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + hardware.bob_cpu_load: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + hardware.bob_virtual_ram: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + hardware.descr_amount: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + hardware.free_space: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + hardware.total_ram: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + hardware.total_space: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + hardware.used_ram: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + pearl.delete_count_rate: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + pearl.exist_count_rate: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + pearl.get_count_rate: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + pearl.put_count_rate: + type: object + required: + - value + - timestamp + properties: + timestamp: + type: integer + format: int64 + minimum: 0 + value: + type: integer + format: int64 + minimum: 0 + example: + backend.alien_count: + timestamp: 0 + value: 0 + backend.corrupted_blob_count: + timestamp: 0 + value: 0 + cluster_grinder.delete_count_rate: + timestamp: 0 + value: 0 + cluster_grinder.exist_count_rate: + timestamp: 0 + value: 0 + cluster_grinder.get_count_rate: + timestamp: 0 + value: 0 + cluster_grinder.put_count_rate: + timestamp: 0 + value: 0 + hardware.bob_cpu_load: + timestamp: 0 + value: 0 + hardware.bob_virtual_ram: + timestamp: 0 + value: 0 + hardware.descr_amount: + timestamp: 0 + value: 0 + hardware.free_space: + timestamp: 0 + value: 0 + hardware.total_ram: + timestamp: 0 + value: 0 + hardware.total_space: + timestamp: 0 + value: 0 + hardware.used_ram: + timestamp: 0 + value: 0 + pearl.delete_count_rate: + timestamp: 0 + value: 0 + pearl.exist_count_rate: + timestamp: 0 + value: 0 + pearl.get_count_rate: + timestamp: 0 + value: 0 + pearl.put_count_rate: + timestamp: 0 + value: 0 + VDiskStatus: + oneOf: + - type: object + required: + - status + properties: + status: + type: string + enum: + - good + - type: object + required: + - status + properties: + status: + type: string + enum: + - bad + - type: object + required: + - status + properties: + status: + type: string + enum: + - offline + description: |- + Virtual disk status. + + Variants - Virtual Disk status + status == 'bad' when at least one of its replicas has problems + example: + status: good + securitySchemes: + api_key: + type: apiKey + in: header + name: bob_apikey tags: -- name: bob - description: BOB management API + - name: bob + description: BOB management API diff --git a/backend/Cargo.toml b/backend/Cargo.toml index bf1e104e..8c62d6af 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -32,12 +32,14 @@ thiserror = "1.0" ## General tokio = { version = "1.32", features = ["rt", "macros", "rt-multi-thread" ] } -hyper = { version = "0.14", features = ["http2", "client"] } +# TODO: Move to hyper 1.0 +hyper = { version = "0.14", features = ["http2", "client"] } hyper_serde = "0.13" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" uuid = { version = "1.4", features = ["v4", "serde", "fast-rng"] } futures = "0.3" +strum = { version = "0.25", features = ["derive"] } ## OpenAPI + Swagger utoipa = { version = "4.0", features = ["yaml", "axum_extras", "chrono", "openapi_extensions"], optional = true } @@ -60,5 +62,4 @@ utoipa = { version = "4.0", features = ["yaml", "axum_extras", "chrono", "opena [features] default = [ "swagger" ] swagger = [ "dep:utoipa", "dep:utoipa-swagger-ui" , "dep:utoipa-redoc", "dep:utoipa-rapidoc" ] -gen_api = [ "dep:utoipa" ] diff --git a/backend/src/connector/api.rs b/backend/src/connector/api.rs index 3c9b6a1f..14976b35 100644 --- a/backend/src/connector/api.rs +++ b/backend/src/connector/api.rs @@ -6,7 +6,6 @@ //! //! -use super::dto::{self}; use super::prelude::*; pub type ServiceError = Box; @@ -22,6 +21,17 @@ pub enum APIError { ResponseError, } +impl IntoResponse for APIError { + fn into_response(self) -> axum::response::Response { + match self { + Self::RequestFailed => StatusCode::NOT_FOUND, + Self::InvalidStatusCode(code) => code, + Self::ResponseError => StatusCode::INTERNAL_SERVER_ERROR, + } + .into_response() + } +} + #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] pub enum GetAlienResponse { /// Alien Node name diff --git a/backend/src/connector/client.rs b/backend/src/connector/client.rs index ed9d64a4..bc866ee3 100644 --- a/backend/src/connector/client.rs +++ b/backend/src/connector/client.rs @@ -99,7 +99,10 @@ where pub fn try_new(base_path: &str) -> Result { let uri = Uri::from_str(base_path).change_context(ClientInitError::InvalidUri)?; - let scheme = uri.scheme_str().ok_or(ClientInitError::InvalidScheme)?; + let scheme = uri.scheme_str().unwrap_or_else(|| { + tracing::info!("couldn't locate URI scheme... Fallback to http"); + "http" + }); let scheme = scheme.to_ascii_lowercase(); let connector = Connector::builder(); @@ -133,7 +136,10 @@ fn into_base_path( .try_into() .change_context(ClientInitError::InvalidUri)?; - let scheme = uri.scheme_str().ok_or(ClientInitError::InvalidScheme)?; + let scheme = uri.scheme_str().unwrap_or_else(|| { + tracing::info!("couldn't locate URI scheme... Fallback to http"); + "http" + }); // Check the scheme if necessary // if let Some(correct_scheme) = correct_scheme { @@ -308,19 +314,68 @@ where /// Return directory of alien #[must_use] async fn get_alien_dir(&self, context: &C) -> Result { - todo!() + let request = self + .form_request("/alien/dir", Method::GET, context) + .change_context(APIError::RequestFailed)?; + let response = self.call(request, context).await?; + + match response.status().as_u16() { + 200 => Ok(self + .handle_response_json(response, |body: Dir| GetAlienDirResponse::Directory(body)) + .await?), + 403 => Ok(self + .handle_response_json(response, |body: StatusExt| { + GetAlienDirResponse::PermissionDenied(body) + }) + .await?), + 406 => Ok(self + .handle_response_json(response, |body: StatusExt| { + GetAlienDirResponse::NotAcceptableBackend(body) + }) + .await?), + _ => Err(APIError::from(response))?, + } } /// Returns the list of disks with their states #[must_use] async fn get_disks(&self, context: &C) -> Result { - todo!() + let request = self + .form_request("/disks/list", Method::GET, context) + .change_context(APIError::RequestFailed)?; + let response = self.call(request, context).await?; + + match response.status().as_u16() { + 200 => Ok(self + .handle_response_json(response, |body: Vec| { + GetDisksResponse::AJSONArrayWithDisksAndTheirStates(body) + }) + .await?), + 403 => Ok(self + .handle_response_json(response, |body: StatusExt| { + GetDisksResponse::PermissionDenied(body) + }) + .await?), + _ => Err(APIError::from(response))?, + } } /// Get metrics #[must_use] async fn get_metrics(&self, context: &C) -> Result { - todo!() + let request = self + .form_request("/metrics", Method::GET, context) + .change_context(APIError::RequestFailed)?; + let response = self.call(request, context).await?; + + match response.status().as_u16() { + 200 => Ok(self + .handle_response_json(response, |body: MetricsSnapshotModel| { + GetMetricsResponse::Metrics(body) + }) + .await?), + _ => Err(APIError::from(response))?, + } } /// Returns a list of known nodes @@ -335,7 +390,7 @@ where match response.status().as_u16() { 200 => Ok(self - .handle_response_json(response, |body: Vec| { + .handle_response_json(response, |body: Vec| { GetNodesResponse::AJSONArrayOfNodesInfoAndVdisksOnThem(body) }) .await?), @@ -348,77 +403,269 @@ where #[must_use] async fn get_partition( &self, - v_disk_id: i32, - partition_id: String, + param_v_disk_id: i32, + param_partition_id: String, context: &C, ) -> Result { - todo!() + let request = self + .form_request( + &format!("/vdisks/{param_v_disk_id}/partitions/{param_partition_id}"), + Method::GET, + context, + ) + .change_context(APIError::RequestFailed)?; + let response = self.call(request, context).await?; + + match response.status().as_u16() { + 200 => Ok(self + .handle_response_json(response, |body: Partition| { + GetPartitionResponse::AJSONWithPartitionInfo(body) + }) + .await?), + 403 => Ok(self + .handle_response_json(response, |body: StatusExt| { + GetPartitionResponse::PermissionDenied(body) + }) + .await?), + 404 => Ok(self + .handle_response_json(response, |body: StatusExt| { + GetPartitionResponse::NotFound(body) + }) + .await?), + _ => Err(APIError::from(response))?, + } } /// Returns a list of partitions #[must_use] async fn get_partitions( &self, - v_disk_id: i32, + param_v_disk_id: i32, context: &C, ) -> Result { - todo!() + let request = self + .form_request( + &format!("/vdisks/{param_v_disk_id}/partitions"), + Method::GET, + context, + ) + .change_context(APIError::RequestFailed)?; + let response = self.call(request, context).await?; + + match response.status().as_u16() { + 200 => Ok(self + .handle_response_json(response, |body: VDiskPartitions| { + GetPartitionsResponse::NodeInfoAndJSONArrayWithPartitionsInfo(body) + }) + .await?), + 403 => Ok(self + .handle_response_json(response, |body: StatusExt| { + GetPartitionsResponse::PermissionDenied(body) + }) + .await?), + + 404 => Ok(self + .handle_response_json(response, |body: StatusExt| { + GetPartitionsResponse::NotFound(body) + }) + .await?), + _ => Err(APIError::from(response))?, + } } /// Returns count of records of this on node #[must_use] async fn get_records( &self, - v_disk_id: i32, + param_v_disk_id: i32, context: &C, ) -> Result { - todo!() + let request = self + .form_request( + &format!("/vdisks/{param_v_disk_id}/records/count"), + Method::GET, + context, + ) + .change_context(APIError::RequestFailed)?; + let response = self.call(request, context).await?; + + match response.status().as_u16() { + 200 => Ok(self + .handle_response_json(response, GetRecordsResponse::RecordsCount) + .await?), + 403 => Ok(self + .handle_response_json(response, |body: StatusExt| { + GetRecordsResponse::PermissionDenied(body) + }) + .await?), + 404 => Ok(self + .handle_response_json(response, |body: StatusExt| { + GetRecordsResponse::NotFound(body) + }) + .await?), + _ => Err(APIError::from(response))?, + } } /// Returns directories of local replicas of vdisk #[must_use] async fn get_replicas_local_dirs( &self, - v_disk_id: i32, + param_v_disk_id: i32, context: &C, ) -> Result { - todo!() + let request = self + .form_request( + &format!("/vdisks/{param_v_disk_id}/replicas/local/dirs"), + Method::GET, + context, + ) + .change_context(APIError::RequestFailed)?; + let response = self.call(request, context).await?; + + match response.status().as_u16() { + 200 => Ok(self + .handle_response_json(response, |body: Vec| { + GetReplicasLocalDirsResponse::AJSONArrayWithDirs(body) + }) + .await?), + 403 => Ok(self + .handle_response_json(response, |body: StatusExt| { + GetReplicasLocalDirsResponse::PermissionDenied(body) + }) + .await?), + 404 => Ok(self + .handle_response_json(response, |body: StatusExt| { + GetReplicasLocalDirsResponse::NotFound(body) + }) + .await?), + _ => Err(APIError::from(response))?, + } } /// Get space info #[must_use] async fn get_space_info(&self, context: &C) -> Result { - todo!() + let request = self + .form_request("/status/space", Method::GET, context) + .change_context(APIError::RequestFailed)?; + let response = self.call(request, context).await?; + + match response.status().as_u16() { + 200 => Ok(self + .handle_response_json(response, |body: SpaceInfo| { + GetSpaceInfoResponse::SpaceInfo(body) + }) + .await?), + _ => Err(APIError::from(response))?, + } } /// Returns information about self #[must_use] async fn get_status(&self, context: &C) -> Result { - todo!() + let request = self + .form_request("/status", Method::GET, context) + .change_context(APIError::RequestFailed)?; + let response = self.call(request, context).await?; + + match response.status().as_u16() { + 200 => Ok(self + .handle_response_json(response, |body: Node| { + GetStatusResponse::AJSONWithNodeInfo(body) + }) + .await?), + _ => Err(APIError::from(response))?, + } } /// Returns a vdisk info by ID #[must_use] - async fn get_v_disk(&self, v_disk_id: i32, context: &C) -> Result { - todo!() + async fn get_v_disk( + &self, + param_v_disk_id: i32, + context: &C, + ) -> Result { + let request = self + .form_request(&format!("/vdisks/{param_v_disk_id}"), Method::GET, context) + .change_context(APIError::RequestFailed)?; + let response = self.call(request, context).await?; + + match response.status().as_u16() { + 200 => Ok(self + .handle_response_json(response, |body: VDisk| { + GetVDiskResponse::AJSONWithVdiskInfo(body) + }) + .await?), + 403 => Ok(self + .handle_response_json(response, |body: StatusExt| { + GetVDiskResponse::PermissionDenied(body) + }) + .await?), + 404 => Ok(self + .handle_response_json(response, |body: StatusExt| GetVDiskResponse::NotFound(body)) + .await?), + _ => Err(APIError::from(response))?, + } } /// Returns a list of vdisks #[must_use] async fn get_v_disks(&self, context: &C) -> Result { - todo!() + let request = self + .form_request("/vdisks", Method::GET, context) + .change_context(APIError::RequestFailed)?; + let response = self.call(request, context).await?; + + match response.status().as_u16() { + 200 => Ok(self + .handle_response_json(response, |body: Vec| { + GetVDisksResponse::AJSONArrayOfVdisksInfo(body) + }) + .await?), + 403 => Ok(GetVDisksResponse::PermissionDenied), + + _ => Err(APIError::from(response))?, + } } /// Returns server version #[must_use] async fn get_version(&self, context: &C) -> Result { - todo!() + let request = self + .form_request("/version", Method::GET, context) + .change_context(APIError::RequestFailed)?; + let response = self.call(request, context).await?; + + match response.status().as_u16() { + 200 => Ok(self + .handle_response_json(response, |body: VersionInfo| { + GetVersionResponse::VersionInfo(body) + }) + .await?), + + _ => Err(APIError::from(response))?, + } } /// Returns configuration of the node #[must_use] async fn get_configuration(&self, context: &C) -> Result { - todo!() + let request = self + .form_request("/configuration", Method::GET, context) + .change_context(APIError::RequestFailed)?; + let response = self.call(request, context).await?; + + match response.status().as_u16() { + 200 => Ok(self + .handle_response_json(response, |body: NodeConfiguration| { + GetConfigurationResponse::ConfigurationObject(body) + }) + .await?), + 403 => Ok(GetConfigurationResponse::PermissionDenied), + + _ => Err(APIError::from(response))?, + } } } diff --git a/backend/src/connector/dto.rs b/backend/src/connector/dto.rs index f66d5bed..9ccc1f34 100644 --- a/backend/src/connector/dto.rs +++ b/backend/src/connector/dto.rs @@ -12,6 +12,8 @@ //! use std::collections::HashMap; +#[cfg(all(feature = "swagger", debug_assertions))] +use utoipa::ToSchema; type StdError = dyn std::error::Error; @@ -362,6 +364,7 @@ impl std::str::FromStr for Error { #[derive(Debug, Default, Clone, serde::Serialize, serde::Deserialize)] #[cfg_attr(feature = "conversion", derive(frunk::LabelledGeneric))] +#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] pub struct MetricsEntryModel { #[serde(rename = "value")] pub value: u64, @@ -370,8 +373,16 @@ pub struct MetricsEntryModel { pub timestamp: u64, } +#[cfg(all(feature = "swagger", debug_assertions))] +impl utoipa::PartialSchema for MetricsEntryModel { + fn schema() -> utoipa::openapi::RefOr { + ::schema().1 + } +} + #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] #[cfg_attr(feature = "conversion", derive(frunk::LabelledGeneric))] +#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] pub struct MetricsSnapshotModel { #[serde(rename = "metrics")] pub metrics: HashMap, @@ -488,6 +499,7 @@ impl std::str::FromStr for Node { #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] #[cfg_attr(feature = "conversion", derive(frunk::LabelledGeneric))] +#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] pub struct NodeConfiguration { #[serde(rename = "blob_file_name_prefix")] #[serde(skip_serializing_if = "Option::is_none")] @@ -774,7 +786,7 @@ impl std::str::FromStr for Replica { } } -#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +#[derive(Default, Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] #[cfg_attr(feature = "conversion", derive(frunk::LabelledGeneric))] pub struct SpaceInfo { #[serde(rename = "total_disk_space_bytes")] diff --git a/backend/src/connector/mod.rs b/backend/src/connector/mod.rs index 114be2cb..7bd7e695 100644 --- a/backend/src/connector/mod.rs +++ b/backend/src/connector/mod.rs @@ -4,6 +4,7 @@ mod prelude { context::{ContextWrapper, DropContextService, Has}, ClientError, Connector, }; + pub use crate::connector::dto::*; pub use crate::{models::shared::XSpanIdString, prelude::*, services::auth::HttpClient}; pub use axum::{ headers::{authorization::Credentials, Authorization, HeaderMapExt}, @@ -31,7 +32,7 @@ pub mod context; pub mod dto; pub mod error; -pub type ApiInterface = dyn ApiNoContext + Send + Sync; +// pub type ApiInterface = dyn ApiNoContext + Send + Sync; #[derive(Debug, Error)] pub enum ClientError { @@ -92,7 +93,7 @@ impl HttpsBuilder { } #[derive(Clone)] -pub struct BobClient + Send + Sync> { +pub struct BobClient + Send + Sync> { /// Unique Identifier id: Uuid, @@ -105,17 +106,21 @@ pub struct BobClient + Send + Sync> { /// Clients for all known nodes cluster: HashMap>, + + context_marker: PhantomData, } #[allow(clippy::missing_fields_in_debug)] -impl + Send + Sync + Clone> std::fmt::Debug - for BobClient +impl< + Context: Send + Sync + Has>>, + Client: ApiNoContext + Send + Sync + Clone, + > std::fmt::Debug for BobClient { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let user = &self .main .context() - .auth_data + .get() .as_ref() .map_or("Unknown", |cred| cred.username()); f.debug_struct("BobClient") @@ -125,7 +130,9 @@ impl + Send + Sync + Clone> std::fmt::Debug } } -impl + Send + Sync> BobClient { +impl + Send + Sync> + BobClient +{ /// Creates new [`BobClient`] from [`BobConnectionData`] /// /// # Errors @@ -171,6 +178,7 @@ impl + Send + Sync> BobClient + Send + Sync> BobClient &ClientContext { + pub fn context(&self) -> &Context { self.main.context() } @@ -299,11 +307,18 @@ impl HttpClient { }, |hostname| Some((name, Client::try_new_http(&hostname.to_string()))), ); - if let Some((name, Ok(client))) = client { - Some((name.clone(), Arc::new(client.with_context(context)))) - } else { - tracing::warn!("couldn't create client for {name}"); - None + match client { + Some((name, Ok(client))) => { + Some((name.clone(), Arc::new(client.with_context(context)))) + } + Some((_, Err(e))) => { + tracing::warn!("couldn't create client: {e}"); + None + } + None => { + tracing::warn!("couldn't create client for {name}"); + None + } } } } diff --git a/backend/src/lib.rs b/backend/src/lib.rs index d562f11d..89a20739 100644 --- a/backend/src/lib.rs +++ b/backend/src/lib.rs @@ -17,32 +17,61 @@ pub mod models; pub mod router; pub mod services; +#[cfg(all(feature = "swagger", debug_assertions))] +struct SecurityAddon; + +#[cfg(all(feature = "swagger", debug_assertions))] +impl Modify for SecurityAddon { + fn modify(&self, openapi: &mut utoipa::openapi::OpenApi) { + if let Some(components) = openapi.components.as_mut() { + components.add_security_scheme( + "api_key", + SecurityScheme::ApiKey(ApiKey::Header(ApiKeyValue::new("bob_apikey"))), + ); + } + } +} + #[cfg_attr(all(feature = "swagger", debug_assertions), derive(OpenApi))] #[cfg_attr(all(feature = "swagger", debug_assertions), openapi( - paths(root, services::auth::login, services::auth::logout), + paths( + services::auth::login, + services::auth::logout, + services::api::get_disks_count, + services::api::get_nodes_count, + services::api::get_rps, + services::api::get_space, + ), components( - schemas(models::shared::Credentials, models::shared::Hostname, models::shared::BobConnectionData) + schemas(models::shared::Credentials, models::shared::Hostname, models::shared::BobConnectionData, + models::api::DiskProblem, + models::api::DiskStatus, + models::api::DiskStatusName, + models::api::DiskCount, + models::api::NodeProblem, + models::api::NodeStatus, + models::api::NodeStatusName, + models::api::NodeCount, + models::api::ReplicaProblem, + models::api::ReplicaStatus, + models::api::SpaceInfo, + models::api::VDiskStatus, + models::api::Operation, + models::api::RPS, + models::api::RawMetricEntry, + models::api::TypedMetrics, + connector::dto::MetricsEntryModel, + connector::dto::MetricsSnapshotModel, + connector::dto::NodeConfiguration + ) ), tags( (name = "bob", description = "BOB management API") - ) + ), + modifiers(&SecurityAddon) ))] pub struct ApiDoc; -// [TEMP] -// TODO: Remove when the actual API will be implemented -#[allow(clippy::unused_async)] -#[cfg_attr(all(feature = "swagger", debug_assertions), utoipa::path( - get, - context_path = ApiV1::to_path(), - path = "/root", - responses( - (status = 200, description = "Hello Bob!") - ) - ))] -pub async fn root() -> &'static str { - "Hello Bob!" -} /// Generate openapi documentation for the project /// /// # Panics @@ -78,11 +107,12 @@ pub fn openapi_doc() -> Router { } pub mod prelude { + pub use crate::ApiDoc; pub use crate::{ connector::{ client::Client, context::{ClientContext, ContextWrapper, DropContextService}, - BobClient, + dto, BobClient, }, error::AppError, models::{ @@ -91,7 +121,6 @@ pub mod prelude { }, router::{ApiV1, ApiVersion, RouteError, RouterApiExt}, services::auth::HttpBobClient, - ApiDoc, }; pub use axum::{ async_trait, @@ -102,26 +131,34 @@ pub mod prelude { pub use error_stack::{Context, Report, Result, ResultExt}; pub use hyper::{client::HttpConnector, Body, Method, Request, StatusCode}; pub use serde::{Deserialize, Serialize}; - pub use std::{collections::HashMap, hash::Hash, marker::PhantomData, str::FromStr}; + pub use std::{ + collections::{HashMap, HashSet}, + hash::Hash, + marker::PhantomData, + str::FromStr, + sync::Arc, + }; pub use thiserror::Error; #[cfg(all(feature = "swagger", debug_assertions))] - pub use utoipa::{IntoParams, OpenApi, ToSchema}; + pub use utoipa::{ + openapi::security::{ApiKey, ApiKeyValue, SecurityScheme}, + IntoParams, Modify, OpenApi, PartialSchema, ToSchema, + }; pub use uuid::Uuid; } pub mod main { pub mod prelude { + pub use crate::ApiDoc; pub use crate::{ config::{ConfigExt, LoggerExt}, models::shared::RequestTimeout, prelude::*, - root, router::{ApiV1, ApiVersion, NoApi, RouterApiExt}, services::{ api_router_v1, auth::{require_auth, AuthState, BobUser, HttpBobClient, InMemorySessionStore}, }, - ApiDoc, }; pub use axum::{ error_handling::HandleErrorLayer, middleware::from_fn_with_state, BoxError, Extension, diff --git a/backend/src/main.rs b/backend/src/main.rs index 9c1c3a9b..890918e4 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -50,8 +50,10 @@ fn router(config: &Config) -> Router { .with_http_only(false), ); - let user_store: InMemorySessionStore = InMemorySessionStore::default(); - let auth_state = AuthState::new(user_store); + let auth_state = AuthState::new( + InMemorySessionStore::default(), + InMemorySessionStore::default(), + ); let mut frontend = env::current_exe().expect("Couldn't get current executable path."); frontend.pop(); @@ -80,8 +82,10 @@ mod tests { use bob_management::services::api_router_v1; #[test] fn register_routes() { - let user_store: InMemorySessionStore = InMemorySessionStore::default(); - let auth_state = AuthState::new(user_store); + let auth_state = AuthState::new( + InMemorySessionStore::default(), + InMemorySessionStore::default(), + ); let _ = api_router_v1(auth_state).expect("Router has invalid API methods"); } } diff --git a/backend/src/models/api.rs b/backend/src/models/api.rs index 8b137891..8acc77d1 100644 --- a/backend/src/models/api.rs +++ b/backend/src/models/api.rs @@ -1 +1,559 @@ +#![allow(unused_qualifications)] +use super::prelude::*; + +pub const DEFAULT_MAX_CPU: u64 = 90; +pub const DEFAULT_MIN_FREE_SPACE_PERCENTAGE: f64 = 0.1; + +/// Connection Data +pub use crate::models::shared::{BobConnectionData, Credentials}; + +/// Defines kind of problem on disk +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Hash)] +#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] +#[tsync] +pub enum DiskProblem { + #[serde(rename = "freeSpaceRunningOut")] + FreeSpaceRunningOut, +} + +/// Defines disk status +/// +/// Variant - Disk Status +/// Content - List of problems on disk. 'null' if status != 'bad' +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash)] +#[serde(tag = "status", content = "problems")] +#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] +#[tsync] +pub enum DiskStatus { + #[serde(rename = "good")] + Good, + #[serde(rename = "bad")] + Bad(Vec), + #[serde(rename = "offline")] + Offline, +} + +impl DiskStatus { + #[must_use] + pub fn from_space_info(space: &dto::SpaceInfo, disk_name: &str) -> Self { + if let Some(&occupied_space) = space.occupied_disk_space_by_disk.get(disk_name) { + #[allow(clippy::cast_precision_loss)] + if ((space.total_disk_space_bytes - occupied_space) as f64 + / space.total_disk_space_bytes as f64) + < DEFAULT_MIN_FREE_SPACE_PERCENTAGE + { + Self::Bad(vec![DiskProblem::FreeSpaceRunningOut]) + } else { + Self::Good + } + } else { + Self::Offline + } + } +} + +/// Defines disk status names +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash, EnumIter)] +#[serde(rename_all = "camelCase")] +#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] +#[tsync] +pub enum DiskStatusName { + Good, + Bad, + Offline, +} + +/// Defines kind of problem on Node +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Hash)] +#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] +#[tsync] +pub enum NodeProblem { + #[serde(rename = "aliensExists")] + AliensExists, + #[serde(rename = "corruptedExists")] + CorruptedExists, + #[serde(rename = "freeSpaceRunningOut")] + FreeSpaceRunningOut, + #[serde(rename = "virtualMemLargerThanRAM")] + VirtualMemLargerThanRAM, + #[serde(rename = "highCPULoad")] + HighCPULoad, +} + +impl NodeProblem { + #[must_use] + pub fn default_from_metrics(node_metrics: &TypedMetrics) -> Vec { + Self::from_metrics( + node_metrics, + DEFAULT_MAX_CPU, + DEFAULT_MIN_FREE_SPACE_PERCENTAGE, + ) + } + + #[must_use] + #[allow(clippy::cast_precision_loss)] + pub fn from_metrics( + node_metrics: &TypedMetrics, + max_cpu: u64, + min_free_space_perc: f64, + ) -> Vec { + let mut res = vec![]; + if node_metrics[RawMetricEntry::BackendAlienCount].value != 0 { + res.push(Self::AliensExists); + } + if node_metrics[RawMetricEntry::BackendCorruptedBlobCount].value != 0 { + res.push(Self::CorruptedExists); + } + if node_metrics[RawMetricEntry::HardwareBobCpuLoad].value >= max_cpu { + res.push(Self::HighCPULoad); + } + if (1. + - (node_metrics[RawMetricEntry::HardwareTotalSpace].value + - node_metrics[RawMetricEntry::HardwareFreeSpace].value) as f64 + / node_metrics[RawMetricEntry::HardwareTotalSpace].value as f64) + < min_free_space_perc + { + res.push(Self::FreeSpaceRunningOut); + } + if node_metrics[RawMetricEntry::HardwareBobVirtualRam] + > node_metrics[RawMetricEntry::HardwareTotalRam] + { + res.push(Self::VirtualMemLargerThanRAM); + } + + res + } +} + +/// Defines status of node +/// +/// Variants - Node status +/// +/// Content - List of problems on node. 'null' if status != 'bad' +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash)] +#[serde(tag = "status", content = "problems")] +#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] +#[tsync] +pub enum NodeStatus { + #[serde(rename = "good")] + Good, + #[serde(rename = "bad")] + Bad(Vec), + #[serde(rename = "offline")] + Offline, +} + +impl NodeStatus { + #[must_use] + pub fn from_problems(problems: Vec) -> Self { + if problems.is_empty() { + Self::Good + } else { + Self::Bad(problems) + } + } +} + +impl TypedMetrics { + #[allow(clippy::cast_precision_loss)] + #[must_use] + pub fn is_bad_node(&self) -> bool { + self[RawMetricEntry::BackendAlienCount].value != 0 + || self[RawMetricEntry::BackendCorruptedBlobCount].value != 0 + || self[RawMetricEntry::HardwareBobCpuLoad].value >= DEFAULT_MAX_CPU + || (1. + - (self[RawMetricEntry::HardwareTotalSpace].value + - self[RawMetricEntry::HardwareFreeSpace].value) as f64 + / self[RawMetricEntry::HardwareTotalSpace].value as f64) + < DEFAULT_MIN_FREE_SPACE_PERCENTAGE + || self[RawMetricEntry::HardwareBobVirtualRam] > self[RawMetricEntry::HardwareTotalRam] + } +} + +/// Defines node status names +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash, EnumIter)] +#[serde(rename_all = "camelCase")] +#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] +#[tsync] +pub enum NodeStatusName { + Good, + Bad, + Offline, +} + +/// Reasons why Replica is offline +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Serialize, Deserialize)] +#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] +#[tsync] +pub enum ReplicaProblem { + #[serde(rename = "nodeUnavailable")] + NodeUnavailable, + #[serde(rename = "diskUnavailable")] + DiskUnavailable, +} + +/// Replica status. It's either good or offline with the reasons why it is offline +/// +/// Variants - Replica status +/// +/// Content - List of problems on replica. 'null' if status != 'offline' +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +#[serde(tag = "status", content = "problems")] +#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] +#[tsync] +pub enum ReplicaStatus { + #[serde(rename = "good")] + Good, + #[serde(rename = "offline")] + Offline(Vec), +} + +/// Disk space information in bytes +#[derive(Debug, Default, Clone, Eq, PartialEq, Serialize, Deserialize)] +#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] +#[tsync] +pub struct SpaceInfo { + /// Total disk space amount + pub total_disk: u64, + + /// The amount of free disk space + pub free_disk: u64, + + /// Used disk space amount + pub used_disk: u64, + + /// Disk space occupied only by BOB. occupied_disk should be lesser than used_disk + pub occupied_disk: u64, +} + +/// Virtual disk status. +/// +/// Variants - Virtual Disk status +/// status == 'bad' when at least one of its replicas has problems +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +#[serde(tag = "status")] +#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] +#[cfg_attr(all(feature = "swagger", debug_assertions), + schema(example = json!({"status": "good"})))] +#[tsync] +pub enum VDiskStatus { + #[serde(rename = "good")] + Good, + #[serde(rename = "bad")] + Bad, + #[serde(rename = "offline")] + Offline, +} + +/// Types of operations on BOB cluster +#[derive(Debug, Clone, Serialize, Deserialize, Hash, Eq, PartialEq, PartialOrd, Ord, EnumIter)] +#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] +#[serde(rename_all = "camelCase")] +#[tsync] +pub enum Operation { + Put, + Get, + Exist, + Delete, +} + +#[derive(Clone, Debug, Serialize, Deserialize, Hash, Eq, PartialEq, PartialOrd, Ord, EnumIter)] +#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] +#[tsync] +pub enum RawMetricEntry { + #[serde(rename = "cluster_grinder.get_count_rate")] + ClusterGrinderGetCountRate, + #[serde(rename = "cluster_grinder.put_count_rate")] + ClusterGrinderPutCountRate, + #[serde(rename = "cluster_grinder.exist_count_rate")] + ClusterGrinderExistCountRate, + #[serde(rename = "cluster_grinder.delete_count_rate")] + ClusterGrinderDeleteCountRate, + #[serde(rename = "pearl.exist_count_rate")] + PearlExistCountRate, + #[serde(rename = "pearl.get_count_rate")] + PearlGetCountRate, + #[serde(rename = "pearl.put_count_rate")] + PearlPutCountRate, + #[serde(rename = "pearl.delete_count_rate")] + PearlDeleteCountRate, + #[serde(rename = "backend.alien_count")] + BackendAlienCount, + #[serde(rename = "backend.corrupted_blob_count")] + BackendCorruptedBlobCount, + #[serde(rename = "hardware.bob_virtual_ram")] + HardwareBobVirtualRam, + #[serde(rename = "hardware.total_ram")] + HardwareTotalRam, + #[serde(rename = "hardware.used_ram")] + HardwareUsedRam, + #[serde(rename = "hardware.bob_cpu_load")] + HardwareBobCpuLoad, + #[serde(rename = "hardware.free_space")] + HardwareFreeSpace, + #[serde(rename = "hardware.total_space")] + HardwareTotalSpace, + #[serde(rename = "hardware.descr_amount")] + HardwareDescrAmount, +} + +#[allow(dead_code, clippy::expect_used)] +#[cfg(all(feature = "swagger", debug_assertions))] +fn get_map_schema( +) -> Object { + let mut res = ObjectBuilder::new(); + let mut example = serde_json::Map::new(); + for key in Id::iter() { + let key = serde_json::to_string(&key).expect("infallible"); + let key = key.trim_matches('"'); + res = res.required(key).property(key, V::schema()); + example.insert( + key.to_string(), + serde_json::to_value(V::default()).expect("infallible"), + ); + } + res.example(serde_json::to_value(example).ok()).build() +} + +// #[cfg(not(all(feature = "swagger", debug_assertions)))] +#[tsync] +pub type RPS = TypedMap; +// #[cfg(not(all(feature = "swagger", debug_assertions)))] +#[tsync] +pub type TypedMetrics = TypedMap; +// #[cfg(not(all(feature = "swagger", debug_assertions)))] +#[tsync] +pub type NodeCount = TypedMap; +// #[cfg(not(all(feature = "swagger", debug_assertions)))] +#[tsync] +pub type DiskCount = TypedMap; + +impl RPS { + #[must_use] + pub fn from_metrics(metrics: &TypedMetrics) -> Self { + let mut rps = Self::new(); + rps[Operation::Get] += metrics[RawMetricEntry::ClusterGrinderGetCountRate].value; + rps[Operation::Delete] += metrics[RawMetricEntry::ClusterGrinderDeleteCountRate].value; + rps[Operation::Exist] += metrics[RawMetricEntry::ClusterGrinderExistCountRate].value; + rps[Operation::Put] += metrics[RawMetricEntry::ClusterGrinderPutCountRate].value; + + rps + } +} + +impl AddAssign for RPS { + fn add_assign(&mut self, rhs: Self) { + self[Operation::Get] += rhs[Operation::Get]; + self[Operation::Delete] += rhs[Operation::Delete]; + self[Operation::Exist] += rhs[Operation::Exist]; + self[Operation::Put] += rhs[Operation::Put]; + } +} + +impl Add for RPS { + type Output = Self; + + fn add(mut self, rhs: Self) -> Self::Output { + self += rhs; + + self + } +} + +#[derive(Debug, Serialize, Clone)] +// #[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] +// #[cfg_attr(all(feature = "swagger", debug_assertions), +// aliases( +// RPS = TypedMap, +// TypedMetrics = TypedMap, +// NodeCount = TypedMap, +// DiskCount = TypedMap +// ) +// )] +// #[cfg_attr(all(feature = "swagger", debug_assertions), +// schema(example = json!({"put": 7, "get": 8, "delete": 2, "exist": 3})))] +#[tsync] +pub struct TypedMap { + // FIXME: Bugged; Remove manual impl's of `ToSchema` and uncomment when fixed + // See -> https://github.com/juhaku/utoipa/issues/644 + // #[schema(schema_with = get_map_schema::)] + // #[serde(flatten)] // NOTE: tsync doesn't respect serde flatten directive. Removed for compatibility + map: HashMap, +} + +// FIXME: Remove this when utoipa's bug fixed +#[cfg(all(feature = "swagger", debug_assertions))] +impl< + 'a, + Id: IntoEnumIterator + Eq + Hash + Serialize, + Value: PartialSchema + Default + Serialize, + > utoipa::ToSchema<'a> for TypedMap +{ + fn schema() -> ( + &'a str, + utoipa::openapi::RefOr, + ) { + ( + std::any::type_name::(), + get_map_schema::().into(), + ) + } + + fn aliases() -> Vec<(&'a str, utoipa::openapi::schema::Schema)> { + vec![ + ("RPS", { + let mut schema = get_map_schema::(); + let _ = schema + .description + .insert("Requests per second by operation".to_string()); + schema.into() + }), + ("TypedMetrics", { + let mut schema = get_map_schema::(); + let _ = schema + .description + .insert("Raw metrics information".to_string()); + schema.into() + }), + ("NodeCount", { + let mut schema = get_map_schema::(); + let _ = schema + .description + .insert("Node count by their status".to_string()); + schema.into() + }), + ("DiskCount", { + let mut schema = get_map_schema::(); + let _ = schema + .description + .insert("Disk count by their status".to_string()); + schema.into() + }), + ] + } +} + +// pub type TypedMetrics = TypedMap; + +impl std::ops::Index for TypedMap { + type Output = V; + + fn index(&self, index: Id) -> &Self::Output { + self.map.index(&index) + } +} + +#[allow(clippy::expect_used)] +impl std::ops::IndexMut for TypedMap { + fn index_mut(&mut self, index: Id) -> &mut Self::Output { + self.map.get_mut(&index).expect("infallible") + } +} + +impl Default for TypedMap { + fn default() -> Self { + let mut map = HashMap::new(); + for key in Id::iter() { + map.insert(key, V::default()); + } + + Self { map } + } +} + +impl TypedMap { + #[must_use] + pub fn new() -> Self { + Self::default() + } +} + +pub trait Util { + fn key_iter() -> Id::Iterator; +} + +impl Util for TypedMap { + fn key_iter() -> Id::Iterator { + Id::iter() + } +} + +#[allow(clippy::expect_used)] +impl From for TypedMetrics { + fn from(value: dto::MetricsSnapshotModel) -> Self { + let mut map = HashMap::new(); + let mut value = value.metrics; + for key in RawMetricEntry::iter() { + let value = value + .remove(&serde_json::to_string(&key).expect("infallible")) + .unwrap_or_default(); + map.insert(key, value); + } + + Self { map } + } +} + +#[cfg(test)] +mod tests { + use super::{ + DiskCount, DiskStatusName, NodeCount, NodeStatusName, Operation, RawMetricEntry, + TypedMetrics, RPS, + }; + use crate::connector::dto::MetricsEntryModel; + use strum::IntoEnumIterator; + + #[test] + fn raw_metrics_entry_iter() { + for key in RawMetricEntry::iter() { + assert!(serde_json::to_string(&key).is_ok()); + } + } + + #[test] + fn disk_status_iter() { + for key in DiskStatusName::iter() { + assert!(serde_json::to_string(&key).is_ok()); + } + } + + #[test] + fn node_status_iter() { + for key in NodeStatusName::iter() { + assert!(serde_json::to_string(&key).is_ok()); + } + } + + #[test] + fn metrics_index() { + let metrics = TypedMetrics::default(); + for key in RawMetricEntry::iter() { + assert_eq!(metrics[key], MetricsEntryModel::default()); + } + } + + #[test] + fn node_count_index() { + let node_count = NodeCount::default(); + for key in NodeStatusName::iter() { + assert_eq!(node_count[key], 0); + } + } + + #[test] + fn disk_count_index() { + let disk_count = DiskCount::default(); + for key in DiskStatusName::iter() { + assert_eq!(disk_count[key], 0); + } + } + + #[test] + fn rps_index() { + let rps = RPS::default(); + for key in Operation::iter() { + assert_eq!(rps[key], 0); + } + } +} diff --git a/backend/src/models/mod.rs b/backend/src/models/mod.rs index 1f832bdb..77251483 100644 --- a/backend/src/models/mod.rs +++ b/backend/src/models/mod.rs @@ -5,6 +5,13 @@ pub mod shared; pub mod prelude { pub use crate::prelude::*; pub use hyper::Uri; - pub use std::{net::SocketAddr, time::Duration}; + pub use std::{ + net::SocketAddr, + ops::{Add, AddAssign}, + time::Duration, + }; + pub use strum::{EnumIter, IntoEnumIterator}; pub use tsync::tsync; + #[cfg(all(feature = "swagger", debug_assertions))] + pub use utoipa::openapi::{Object, ObjectBuilder}; } diff --git a/backend/src/models/shared.rs b/backend/src/models/shared.rs index b57627cb..b625feb7 100644 --- a/backend/src/models/shared.rs +++ b/backend/src/models/shared.rs @@ -3,6 +3,7 @@ use std::result::Result; #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] #[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] +#[cfg_attr(all(feature = "swagger", debug_assertions), schema(value_type = String))] pub struct Hostname( #[serde( deserialize_with = "hyper_serde::deserialize", @@ -77,14 +78,18 @@ pub struct BobConnectionData { pub hostname: Hostname, /// [Optional] Credentials used for BOB authentication - #[serde(skip_serializing_if = "Option::is_none")] + // #[serde(skip_serializing_if = "Option::is_none")] pub credentials: Option, } /// Optional auth credentials for a BOB cluster #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize)] -#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))] -#[cfg_attr(all(feature = "swagger", debug_assertions), schema(example = json!({"login": "archeoss", "password": "12345"})))] +#[cfg_attr( + all(feature = "swagger", debug_assertions), + derive(IntoParams, ToSchema) +)] +#[cfg_attr(all(feature = "swagger", debug_assertions), + schema(example = json!({"login": "archeoss", "password": "12345"})))] #[tsync] pub struct Credentials { /// Login used during auth diff --git a/backend/src/services/api.rs b/backend/src/services/api.rs new file mode 100644 index 00000000..cd04edaa --- /dev/null +++ b/backend/src/services/api.rs @@ -0,0 +1,227 @@ +use super::prelude::*; + +/// Returns count of Physical Disks per status +#[cfg_attr(all(feature = "swagger", debug_assertions), + utoipa::path( + get, + context_path = ApiV1::to_path(), + path = "/disks/count", + responses( + ( + status = 200, body = DiskCount, + content_type = "application/json", + description = "Returns a list with count of physical disks per status" + ), + (status = 401, description = "Unauthorized") + ), + security(("api_key" = [])) +))] +#[tracing::instrument(ret, skip(client), level = "info", fields(method = "GET"))] +pub async fn get_disks_count(Extension(client): Extension) -> Json { + tracing::info!("get /disks/count : {:?}", client); + + let mut space: FuturesUnordered<_> = client + .cluster() + .map(move |node| { + let handle = node.clone(); + tokio::spawn(async move { (handle.get_disks().await, handle.get_space_info().await) }) + }) + .collect(); + + let mut count = DiskCount::new(); + + while let Some(res) = space.next().await { + let Ok((disks, space)) = res else { + tracing::warn!("couldn't finish request: tokio task failed. Err: {res:?}"); + continue; + }; + let Ok(GetSpaceInfoResponse::SpaceInfo(space)) = space else { + tracing::warn!("couldn't finish getSpace request. {space:?}"); + continue; + }; + let disks = match disks { + Ok(GetDisksResponse::AJSONArrayWithDisksAndTheirStates(disks)) => disks, + Ok(GetDisksResponse::PermissionDenied(err)) => { + count[DiskStatusName::Offline] += 1; + tracing::warn!("Permission Denied. Err: {err:?}"); + continue; + } + Err(err) => { + count[DiskStatusName::Offline] += 1; + tracing::warn!("couldn't finish getDisks request. Err: {err}"); + continue; + } + }; + let mut active = 0; + disks.iter().filter(|disk| disk.is_active).for_each(|disk| { + active += 1; + match DiskStatus::from_space_info(&space, &disk.name) { + DiskStatus::Good => count[DiskStatusName::Good] += 1, + DiskStatus::Offline => count[DiskStatusName::Offline] += 1, + DiskStatus::Bad(_) => count[DiskStatusName::Bad] += 1, + } + }); + count[DiskStatusName::Offline] = (disks.len() - active) as u64; + } + tracing::info!("total disks count: {count:?}"); + + Json(count) +} + +/// Get Nodes count per Status +#[cfg_attr(all(feature = "swagger", debug_assertions), + utoipa::path( + get, + context_path = ApiV1::to_path(), + path = "/nodes/count", + responses( + ( + status = 200, body = NodeCount, + content_type = "application/json", + description = "Node count list per status" + ), + (status = 401, description = "Unauthorized") + ), + security(("api_key" = [])) +))] +#[tracing::instrument(ret, skip(client), level = "info", fields(method = "GET"))] +pub async fn get_nodes_count(Extension(client): Extension) -> Json { + tracing::info!("get /nodes/count : {:?}", client); + + let mut metrics: FuturesUnordered<_> = client + .cluster() + .map(move |node| { + let handle = node.clone(); + tokio::spawn(async move { handle.get_metrics().await }) + }) + .collect(); + + let mut count = NodeCount::new(); + + while let Some(res) = metrics.next().await { + if let Ok(Ok(GetMetricsResponse::Metrics(metrics))) = res { + tracing::trace!("metrics received successfully"); + if Into::::into(metrics).is_bad_node() { + count[NodeStatusName::Bad] += 1; + } else { + count[NodeStatusName::Good] += 1; + } + } else { + tracing::warn!("couldn't receive metrics from node"); // TODO: Some better message + count[NodeStatusName::Offline] += 1; + } + } + tracing::info!("total nodes per status count: {count:?}"); + + Json(count) +} + +/// Returns Total RPS on cluster +#[cfg_attr(all(feature = "swagger", debug_assertions), + utoipa::path( + get, + context_path = ApiV1::to_path(), + path = "/nodes/rps", + responses( + ( + status = 200, body = RPS, + content_type = "application/json", + description = "RPS list per operation on all nodes" + ), + (status = 401, description = "Unauthorized") + ), + security(("api_key" = [])) +))] +#[tracing::instrument(ret, skip(client), level = "info", fields(method = "GET"))] +pub async fn get_rps(Extension(client): Extension) -> Json { + tracing::info!("get /nodes/rps : {:?}", client); + + let mut metrics: FuturesUnordered<_> = client + .cluster() + .map(move |node| { + let handle = node.clone(); + tokio::spawn(async move { handle.get_metrics().await }) + }) + .collect(); + + let mut rps = RPS::new(); + while let Some(res) = metrics.next().await { + if let Ok(Ok(metrics)) = res { + tracing::info!("metrics received successfully"); + let GetMetricsResponse::Metrics(metrics) = metrics; + rps += RPS::from_metrics(&metrics.into()); + } else { + tracing::warn!("couldn't receive metrics from node"); // TODO: Some better message + } + } + tracing::info!("total rps: {rps:?}"); + + Json(rps) +} + +/// Return inforamtion about space on cluster +#[cfg_attr(all(feature = "swagger", debug_assertions), + utoipa::path( + get, + context_path = ApiV1::to_path(), + path = "/nodes/space", + responses( + (status = 200, body = SpaceInfo, content_type = "application/json", description = "Cluster Space Information"), + (status = 401, description = "Unauthorized") + ), + security(("api_key" = [])) +))] +#[tracing::instrument(ret, skip(client), level = "info", fields(method = "GET"))] +pub async fn get_space(Extension(client): Extension) -> Json { + tracing::info!("get /space : {:?}", client); + let mut spaces: FuturesUnordered<_> = client + .cluster() + .map(move |node| { + let handle = node.clone(); + tokio::spawn(async move { handle.get_space_info().await }) + }) + .collect(); + + let mut total_space = SpaceInfo::default(); + while let Some(res) = spaces.next().await { + if let Ok(Ok(space)) = res { + tracing::info!("space info received successfully"); + let GetSpaceInfoResponse::SpaceInfo(space) = space; + total_space.total_disk += space.total_disk_space_bytes; + total_space.free_disk += space.free_disk_space_bytes; + total_space.used_disk += space.total_disk_space_bytes - space.free_disk_space_bytes; + total_space.occupied_disk += space.occupied_disk_space_bytes; + } else { + tracing::warn!("couldn't receive space info from node"); // Some better message + } + } + tracing::trace!("send response: {total_space:?}"); + + Json(total_space) +} + +#[allow(clippy::cast_precision_loss)] +fn is_bad_node(node_metrics: &TypedMetrics) -> bool { + node_metrics[RawMetricEntry::BackendAlienCount].value != 0 + || node_metrics[RawMetricEntry::BackendCorruptedBlobCount].value != 0 + || node_metrics[RawMetricEntry::HardwareBobCpuLoad].value >= DEFAULT_MAX_CPU + || (1. + - (node_metrics[RawMetricEntry::HardwareTotalSpace].value + - node_metrics[RawMetricEntry::HardwareFreeSpace].value) as f64 + / node_metrics[RawMetricEntry::HardwareTotalSpace].value as f64) + < DEFAULT_MIN_FREE_SPACE_PERCENTAGE + || node_metrics[RawMetricEntry::HardwareBobVirtualRam] + > node_metrics[RawMetricEntry::HardwareTotalRam] +} + +#[allow(clippy::cast_precision_loss)] +fn disk_status_from_space(space: &dto::SpaceInfo, occupied_space: u64) -> DiskStatus { + if ((space.total_disk_space_bytes - occupied_space) as f64 + / space.total_disk_space_bytes as f64) + < DEFAULT_MIN_FREE_SPACE_PERCENTAGE + { + DiskStatus::Bad(vec![DiskProblem::FreeSpaceRunningOut]) + } else { + DiskStatus::Good + } +} diff --git a/backend/src/services/auth.rs b/backend/src/services/auth.rs index 772ffb71..d5d5822a 100644 --- a/backend/src/services/auth.rs +++ b/backend/src/services/auth.rs @@ -8,7 +8,7 @@ pub type HttpClient = ContextWrapper< >, ClientContext, >; -pub type HttpBobClient = BobClient; +pub type HttpBobClient = BobClient; #[derive(Debug, Error)] pub enum AuthError { @@ -68,7 +68,7 @@ pub async fn login( Extension(request_timeout): Extension, Json(bob): Json, ) -> AxumResult { - let bob_client = BobClient::::try_new(bob.clone(), request_timeout) + let bob_client = BobClient::<_, HttpClient>::try_new(bob.clone(), request_timeout) .await .map_err(|err| { tracing::error!("{err:?}"); @@ -109,7 +109,13 @@ pub async fn login( tracing::error!("{err:?}"); StatusCode::UNAUTHORIZED })?; - auth.client_store.insert(*bob_client.id(), bob_client); + auth.client_store + .save(*bob_client.id(), bob_client) + .await + .map_err(|err| { + tracing::error!("{err:?}"); + StatusCode::INTERNAL_SERVER_ERROR + })?; } Ok(res) @@ -147,38 +153,43 @@ pub struct BobUser { } #[derive(Debug, Clone)] -pub struct AuthState { +pub struct AuthState { user_store: UserStore, - client_store: HashMap, + client_store: ClientStore, _user: PhantomData, + _id: PhantomData, } -impl AuthState { - pub fn new(user_store: UserStore) -> Self { +impl AuthState { + pub const fn new(user_store: UserStore, client_store: ClientStore) -> Self { Self { user_store, _user: PhantomData, - client_store: HashMap::new(), + client_store, + _id: PhantomData, } } } #[derive(Debug, Clone)] -pub struct AuthStore +pub struct AuthStore where SessionStore: Store, { session: Session, auth_data: AuthData, user_store: SessionStore, - client_store: HashMap, + client_store: ClientStore, + _client: PhantomData, } -impl AuthStore +impl + AuthStore where User: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, Id: Clone + Serialize + for<'a> Deserialize<'a> + Sync + Send, SessionStore: Store + Sync + Send, + ClientStore: Store + Sync + Send, Client: Send, { async fn login(&mut self, user_id: &Id) -> Result<(), AuthError> { @@ -209,11 +220,13 @@ where } } -impl AuthStore +impl + AuthStore where User: Clone + Serialize + for<'a> Deserialize<'a>, Id: Clone + Serialize + for<'a> Deserialize<'a>, SessionStore: Store, + ClientStore: Store, { const AUTH_DATA_KEY: &'static str = "_auth_data"; /// Update session of this [`AuthStore`]. @@ -230,13 +243,15 @@ where // NOTE: async_trait is used in `FromRequestParts` declaration, so we still need to use it here #[async_trait] -impl FromRequestParts for AuthStore +impl FromRequestParts + for AuthStore where S: Send + Sync, User: Serialize + for<'a> Deserialize<'a> + Clone + Send, Id: Serialize + for<'a> Deserialize<'a> + Clone + Send + Sync, UserStore: Store + Send + Sync, - AuthState: FromRef, + ClientStore: Store + Send + Sync, + AuthState: FromRef, Client: Send, { type Rejection = (StatusCode, &'static str); @@ -280,6 +295,7 @@ where auth_data, user_store, client_store, + _client: PhantomData, }) } } @@ -289,19 +305,30 @@ where /// # Errors /// /// This function will return an error if a protected route was called from unauthorized context -pub async fn require_auth( - auth: AuthStore, - request: Request, +pub async fn require_auth( + auth: AuthStore, + mut request: Request, next: Next, ) -> std::result::Result where User: Serialize + for<'a> Deserialize<'a> + Clone + Send + Sync, - Id: Serialize + for<'a> Deserialize<'a> + Clone + Send + Sync, + Id: Serialize + for<'a> Deserialize<'a> + Clone + Send + Sync + Hash + Eq, UserStore: Store + Send + Sync, - Client: Send, + ClientStore: Store + Send + Sync, + Client: Send + Sync + Clone + 'static, Body: Send + Sync, { - if auth.user().is_some() { + if let Some(id) = &auth.auth_data.user_id { + request.extensions_mut().insert( + auth.client_store + .load(id) + .await + .map_err(|err| { + tracing::error!("{err:?}"); + StatusCode::UNAUTHORIZED + })? + .ok_or(StatusCode::UNAUTHORIZED)?, + ); let response = next.run(request).await; Ok(response) } else { @@ -335,9 +362,16 @@ where } } -pub type BobAuth = AuthStore>; +pub type BobAuth = AuthStore< + BobUser, + Uuid, + Client, + InMemorySessionStore, + InMemorySessionStore, +>; -#[cfg_attr(feature = "swagger", utoipa::path( +#[cfg_attr(all(feature = "swagger", debug_assertions), + utoipa::path( post, context_path = ApiV1::to_path(), path = "/logout", diff --git a/backend/src/services/methods.rs b/backend/src/services/methods.rs new file mode 100644 index 00000000..9bfc23d2 --- /dev/null +++ b/backend/src/services/methods.rs @@ -0,0 +1,163 @@ +use super::prelude::*; + +/// Fetches metrics from `ApiNoContext` instance. +/// +/// # Errors +/// +/// This function will return an error if the request to the specified client failed +pub async fn fetch_metrics< + Context: Send + Sync, + ApiInterface: ApiNoContext + Send + Sync, +>( + client: &ApiInterface, +) -> AxumResult { + let GetMetricsResponse::Metrics(metrics) = client.get_metrics().await.map_err(|err| { + tracing::error!("{err}"); + APIError::RequestFailed + })?; + + Ok(metrics) +} + +/// Fetches vdisks information from `ApiNoContext` instance. +/// +/// # Errors +/// +/// This function will return an error if the request to the specified client failed or the invalid +/// status code was received +pub async fn fetch_vdisks< + Context: Send + Sync, + ApiInterface: ApiNoContext + Send + Sync, +>( + client: &ApiInterface, +) -> AxumResult> { + let GetVDisksResponse::AJSONArrayOfVdisksInfo(virt_disks) = + client.get_v_disks().await.map_err(|err| { + tracing::error!("{err}"); + APIError::RequestFailed + })? + else { + return Err(APIError::InvalidStatusCode(StatusCode::FORBIDDEN).into()); + }; + + Ok(virt_disks) +} + +/// Fetches space information from `ApiNoContext` instance. +/// +/// # Errors +/// +/// This function will return an error if . +/// This function will return an error if the request to the specified client failed +pub async fn fetch_space_info< + Context: Send + Sync, + ApiInterface: ApiNoContext + Send + Sync, +>( + client: &ApiInterface, +) -> AxumResult { + let GetSpaceInfoResponse::SpaceInfo(space) = client.get_space_info().await.map_err(|err| { + tracing::error!("{err}"); + APIError::RequestFailed + })?; + + Ok(space) +} + +/// Fetches node status information from `ApiNoContext` instance. +/// +/// # Errors +/// +/// This function will return an error if . +/// This function will return an error if the request to the specified client failed +pub async fn fetch_node_status< + Context: Send + Sync, + ApiInterface: ApiNoContext + Send + Sync, +>( + client: &ApiInterface, +) -> AxumResult { + let GetStatusResponse::AJSONWithNodeInfo(node_status) = + client.get_status().await.map_err(|err| { + tracing::error!("{err}"); + APIError::RequestFailed + })?; + + Ok(node_status) +} + +/// Fetches disk information on some node from `ApiNoContext` instance. +/// +/// # Errors +/// +/// This function will return an error if the request to the specified client failed or the invalid +/// status code was received +pub async fn fetch_disks< + Context: Send + Sync, + ApiInterface: ApiNoContext + Send + Sync, +>( + client: &ApiInterface, +) -> AxumResult> { + let GetDisksResponse::AJSONArrayWithDisksAndTheirStates(disks) = + client.get_disks().await.map_err(|err| { + tracing::error!("{err}"); + APIError::RequestFailed + })? + else { + tracing::error!( + "client received invalid status code: {}", + StatusCode::FORBIDDEN + ); + return Err(APIError::InvalidStatusCode(StatusCode::FORBIDDEN).into()); + }; + + Ok(disks) +} + +/// Fetches configuration from `ApiNoContext` instance. +/// +/// # Errors +/// +/// This function will return an error if the request to the specified client failed or the invalid +/// status code was received +pub async fn fetch_configuration< + Context: Send + Sync, + ApiInterface: ApiNoContext + Send + Sync, +>( + client: &ApiInterface, +) -> AxumResult { + let GetConfigurationResponse::ConfigurationObject(configuration) = + client.get_configuration().await.map_err(|err| { + tracing::error!("couldn't get node's configuration: {err}"); + APIError::RequestFailed + })? + else { + tracing::error!("received invalid ststus code: {}", StatusCode::FORBIDDEN); + return Err(APIError::InvalidStatusCode(StatusCode::FORBIDDEN).into()); + }; + + Ok(configuration) +} + +/// Fetches all known nodes information from `ApiNoContext` instance. +/// +/// # Errors +/// +/// This function will return an error if the request to the specified client failed or the invalid +/// status code was received +pub async fn fetch_nodes< + Context: Send + Sync, + ApiInterface: ApiNoContext + Send + Sync, +>( + client: &ApiInterface, +) -> AxumResult> { + let GetNodesResponse::AJSONArrayOfNodesInfoAndVdisksOnThem(nodes) = + client.get_nodes().await.map_err(|err| { + tracing::error!("couldn't get nodes list from bob: {err}"); + APIError::RequestFailed + })? + else { + tracing::error!("received invalid ststus code: {}", StatusCode::FORBIDDEN); + return Err(APIError::InvalidStatusCode(StatusCode::FORBIDDEN).into()); + }; + + Ok(nodes) +} diff --git a/backend/src/services/mod.rs b/backend/src/services/mod.rs index a22cd7c7..bbdf977c 100644 --- a/backend/src/services/mod.rs +++ b/backend/src/services/mod.rs @@ -1,25 +1,38 @@ mod prelude { - pub use crate::connector::ClientError; - pub use crate::prelude::*; - pub use axum::middleware::from_fn_with_state; + pub use crate::{ + connector::{ + api::{prelude::*, ApiNoContext}, + ClientError, + }, + models::api::*, + prelude::*, + }; pub use axum::{ extract::{FromRef, FromRequestParts}, http::request::Parts, - middleware::Next, + middleware::{from_fn_with_state, Next}, Router, }; + pub use futures::{stream::FuturesUnordered, StreamExt}; pub use std::sync::Arc; pub use tokio::sync::Mutex; pub use tower_sessions::Session; } +pub mod api; pub mod auth; +pub mod methods; -use crate::root; +use api::{get_disks_count, get_nodes_count, get_rps, get_space}; use auth::{login, logout, require_auth, AuthState, BobUser, HttpBobClient, InMemorySessionStore}; use prelude::*; -type BobAuthState = AuthState, HttpBobClient>; +type BobAuthState = AuthState< + BobUser, + Uuid, + InMemorySessionStore, + InMemorySessionStore, +>; /// Export all secured API routes /// @@ -30,7 +43,10 @@ type BobAuthState = AuthState pub fn api_router_v1(auth_state: BobAuthState) -> Result, RouteError> { Router::new() .with_context::() - .api_route("/root", &Method::GET, root) + .api_route("/disks/count", &Method::GET, get_disks_count) + .api_route("/nodes/count", &Method::GET, get_nodes_count) + .api_route("/nodes/rps", &Method::GET, get_rps) + .api_route("/nodes/space", &Method::GET, get_space) .unwrap()? .route_layer(from_fn_with_state(auth_state, require_auth)) .with_context::() diff --git a/frontend/.eslintrc.cjs b/frontend/.eslintrc.cjs index b819085c..e40a2346 100644 --- a/frontend/.eslintrc.cjs +++ b/frontend/.eslintrc.cjs @@ -25,7 +25,7 @@ module.exports = { 'plugin:astro/recommended', ], plugins: ['react-refresh'], - ignorePatterns: ['dist', '.eslintrc.cjs'], + ignorePatterns: ['dist', '.eslintrc.cjs', 'rust.d.ts'], rules: { 'react-refresh/only-export-components': ['warn', { allowConstantExport: true }], }, diff --git a/frontend/astro.config.mjs b/frontend/astro.config.mjs index 780fea90..0befc1fb 100644 --- a/frontend/astro.config.mjs +++ b/frontend/astro.config.mjs @@ -6,6 +6,9 @@ import tailwind from '@astrojs/tailwind'; // https://astro.build/config export default defineConfig({ // output: 'server', + redirects: { + '/': '/dashboard', + }, integrations: [ react({ experimentalReactChildren: true, diff --git a/frontend/build.rs b/frontend/build.rs index eb102e52..2320761b 100644 --- a/frontend/build.rs +++ b/frontend/build.rs @@ -62,7 +62,7 @@ pub fn build_types() { inputs[0].push("backend"); inputs[1].push("frontend"); - inputs[1].push("frontend.rs"); + inputs[1].push("bindings.rs"); output.push("src/types/rust.d.ts"); tsync::generate_typescript_defs(inputs, output, false); diff --git a/frontend/package.json b/frontend/package.json index 16266b3e..4035dd67 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -33,10 +33,13 @@ "@types/react-dom": "^18.0.6", "@types/react-router-dom": "^5.3.3", "astro": "^3.0.2", + "axios": "^1.6.7", + "chart.js": "^4.4.1", "mui-image": "^1.0.7", "nanostores": "^0.9.5", "prop-types": "^15.8.1", "react": "^18.0.0", + "react-chartjs-2": "^5.2.0", "react-dom": "^18.0.0", "styled-components": "^6.1.1", "tailwindcss": "^3.0.24", diff --git a/frontend/src/components/backdrop/backdrop.tsx b/frontend/src/components/backdrop/backdrop.tsx new file mode 100644 index 00000000..75ae4cc9 --- /dev/null +++ b/frontend/src/components/backdrop/backdrop.tsx @@ -0,0 +1,23 @@ +import { Backdrop, CircularProgress } from '@mui/material'; +import { Box } from '@mui/system'; +import React from 'react'; + +const FetchingBackdrop = () => { + return ( + theme.zIndex.drawer + 1 }} open={true}> + +

Data is fetching. Please, wait...

+ +
+
+ ); +}; + +export default FetchingBackdrop; diff --git a/frontend/src/components/clusterRpsChart/clusterRpsChart.tsx b/frontend/src/components/clusterRpsChart/clusterRpsChart.tsx new file mode 100644 index 00000000..6513d2f7 --- /dev/null +++ b/frontend/src/components/clusterRpsChart/clusterRpsChart.tsx @@ -0,0 +1,84 @@ +import { + CategoryScale, + Chart as ChartJS, + Legend, + LinearScale, + LineElement, + PointElement, + Title, + Tooltip, +} from 'chart.js'; +import React from 'react'; +import { Line } from 'react-chartjs-2'; +ChartJS.register(CategoryScale, LinearScale, PointElement, LineElement, Title, Tooltip, Legend); + +const options = { + maintainAspectRatio: false, + responsive: true, + + plugins: { + legend: { + position: 'top' as const, + align: 'end' as 'end' | 'start' | 'center' | undefined, + labels: { + color: '#efefef', + font: { + size: 14, + }, + usePointStyle: true, + }, + }, + title: { + display: true, + text: 'Общий RPS кластера', + align: 'start' as 'end' | 'start' | 'center' | undefined, + color: '#efefef', + font: { + size: 16, + }, + }, + + scales: { + yAxes: [ + { + min: 0, + ticks: { + beginAtZero: true, + }, + gridLines: { + color: '#ffffff', + }, + }, + ], + xAxes: [ + { + type: 'time', + ticks: { + source: 'labels', + }, + gridLines: { + color: '#ffffff', + }, + }, + ], + }, + }, +}; + +const ClusterRpsChart = ({ timex, rpsy }: { timex: string[]; rpsy: number[] }) => { + const data = { + labels: timex, + datasets: [ + { + label: 'RPS', + data: rpsy, + borderColor: '#ED7246', + backgroundColor: '#ED7246', + }, + ], + }; + + return ; +}; + +export default ClusterRpsChart; diff --git a/frontend/src/components/crudChart/crudChart.tsx b/frontend/src/components/crudChart/crudChart.tsx new file mode 100644 index 00000000..70d05016 --- /dev/null +++ b/frontend/src/components/crudChart/crudChart.tsx @@ -0,0 +1,110 @@ +import { + CategoryScale, + Chart as ChartJS, + Legend, + LinearScale, + LineElement, + PointElement, + Title, + Tooltip, +} from 'chart.js'; +import React from 'react'; +import { Line } from 'react-chartjs-2'; + +ChartJS.register(CategoryScale, LinearScale, PointElement, LineElement, Title, Tooltip, Legend); + +const options = { + maintainAspectRatio: false, + responsive: true, + + plugins: { + legend: { + position: 'top' as const, + align: 'end' as 'end' | 'start' | 'center' | undefined, + labels: { + color: '#efefef', + font: { + size: 14, + }, + usePointStyle: true, + }, + }, + title: { + display: true, + text: 'RPS breakdown', + align: 'start' as 'end' | 'start' | 'center' | undefined, + color: '#efefef', + font: { + size: 16, + }, + }, + + scales: { + yAxes: [ + { + ticks: { + beginAtZero: true, + }, + gridLines: { + color: '#ffffff', + }, + }, + ], + xAxes: [ + { + type: 'time', + gridLines: { + color: '#ffffff', + }, + }, + ], + }, + }, +}; + +const CrudChart = ({ + time, + get, + put, + exist, + del, +}: { + time: string[]; + get: number[]; + put: number[]; + exist: number[]; + del: number[]; +}) => { + const data = { + labels: time, + datasets: [ + { + label: 'Get', + data: get, + borderColor: '#EC7146', + backgroundColor: '#EC7146', + }, + { + label: 'Put', + data: put, + borderColor: '#A12F45', + backgroundColor: '#A12F45', + }, + { + label: 'Exist', + data: exist, + borderColor: '#7C817E', + backgroundColor: '#7C817E', + }, + { + label: 'Delete', + data: del, + borderColor: '#5EB46B', + backgroundColor: '#5EB46B', + }, + ], + }; + return ; +}; + +export default CrudChart; diff --git a/frontend/src/components/currentRps/currentRps.module.css b/frontend/src/components/currentRps/currentRps.module.css new file mode 100644 index 00000000..67e4dad7 --- /dev/null +++ b/frontend/src/components/currentRps/currentRps.module.css @@ -0,0 +1,31 @@ +.greendot { + height: 16px; + width: 16px; + background-color: #34b663; + border-radius: 50%; + display: inline-block; +} + +.reddot { + height: 16px; + width: 16px; + background-color: #c3234b; + border-radius: 50%; + display: inline-block; +} + +.graydot { + height: 16px; + width: 16px; + background-color: #7b817e; + border-radius: 50%; + display: inline-block; +} + +.orangedot { + height: 16px; + width: 16px; + background-color: #ff6936; + border-radius: 50%; + display: inline-block; +} diff --git a/frontend/src/components/currentRps/currentRps.tsx b/frontend/src/components/currentRps/currentRps.tsx new file mode 100644 index 00000000..a3c87a16 --- /dev/null +++ b/frontend/src/components/currentRps/currentRps.tsx @@ -0,0 +1,63 @@ +import { Box } from '@mui/material'; +import React from 'react'; + +import style from './page.module.css'; + +const OperationColor: Record = { + put: style.reddot, + get: style.orangedot, + exist: style.graydot, + delete: style.greendot, +}; + +const RpsLine = ({ type, rps }: { type: Operation; rps: number }) => { + return ( + + + {type.toUpperCase()} + + {rps} + + + ); +}; + +const CurrentRps = ({ map: rps }: RPS) => { + return ( + + Total RPS per operation: + + + + + + ); +}; + +export default CurrentRps; diff --git a/frontend/src/components/dashboard/Dashboard.tsx b/frontend/src/components/dashboard/Dashboard.tsx new file mode 100644 index 00000000..b4a2099c --- /dev/null +++ b/frontend/src/components/dashboard/Dashboard.tsx @@ -0,0 +1,204 @@ +import { Context } from '@appTypes/context.ts'; +import defaultTheme from '@layouts/DefaultTheme.ts'; +import { Box, Grid, ThemeProvider } from '@mui/material'; +import { useStore } from '@nanostores/react'; +import axios from 'axios'; +import React, { useEffect, useMemo, useState } from 'react'; + +import FetchingBackdrop from '../backdrop/backdrop.tsx'; +import ClusterRpsChart from '../clusterRpsChart/clusterRpsChart.tsx'; +import CrudChart from '../crudChart/crudChart.tsx'; +import DiskPie from '../diskPie/diskPie.tsx'; +import DiskBreakdown from '../diskState/diskState.tsx'; +import TotalNodes from '../totalNodes/totalNodes.tsx'; + +interface DashboardState { + diskSpace: SpaceInfo; + nodeCount: NodeCount; + disksCount: DiskCount; + rps: RPS; + timeList: string[]; + rpsTotalList: number[]; + rpsBreakdownList: RPSList; + dataLoaded: boolean; +} + +const initialDashboard: DashboardState = { + diskSpace: {} as SpaceInfo, + nodeCount: {} as NodeCount, + disksCount: {} as DiskCount, + rps: {} as RPS, + timeList: [] as string[], + rpsTotalList: [] as number[], + rpsBreakdownList: { + map: { + get: [], + put: [], + exist: [], + delete: [], + }, + } as RPSList, + dataLoaded: false, +}; + +const Dashboard = () => { + const [isPageLoaded, setIsPageLoaded] = useState(false); + const context = useStore(Context); + + const [dashboard, setDashboard] = useState(initialDashboard); + + window.onload = () => { + const loadedDashboard = window.sessionStorage.getItem('dashboard'); + if (loadedDashboard) { + const parsedDasboard: DashboardState = JSON.parse(loadedDashboard); + parsedDasboard.dataLoaded = true; + setDashboard(parsedDasboard); + } + }; + window.onbeforeunload = () => { + window.sessionStorage.setItem('dashboard', JSON.stringify(dashboard)); + }; + + // let time = 0; + + const fetchData = useMemo( + () => async () => { + try { + const [space, disksCount, nodesCount, rps] = await Promise.all([ + axios.get('/api/v1/nodes/space'), + axios.get('/api/v1/disks/count'), + axios.get('/api/v1/nodes/count'), + axios.get('/api/v1/nodes/rps'), + ]); + + /// Don't think we need to preserve time state?.. + + // time += Context.get().refreshTime * 60; + + dashboard.timeList.push(new Date().toLocaleTimeString()); + dashboard.rpsTotalList.push( + rps.data.map.put + rps.data.map.get + rps.data.map.exist + rps.data.map.delete, + ); + dashboard.rpsBreakdownList.map.get.push(rps.data.map.get); + dashboard.rpsBreakdownList.map.put.push(rps.data.map.put); + dashboard.rpsBreakdownList.map.exist.push(rps.data.map.exist); + dashboard.rpsBreakdownList.map.delete.push(rps.data.map.delete); + + setDashboard({ + ...dashboard, + diskSpace: space.data, + nodeCount: nodesCount.data, + disksCount: disksCount.data, + rps: rps.data, + dataLoaded: true, + }); + } catch (err) { + setDashboard({ + ...dashboard, + dataLoaded: false, + }); + // location.assign('/login'); + } + }, + [dashboard], + ); + + useEffect(() => { + if (!isPageLoaded) { + fetchData(); + } + + setIsPageLoaded(true); + }, [fetchData, isPageLoaded]); + + useEffect(() => { + const interval = setInterval(() => { + if (context.enabled) { + fetchData(); + } + }, context.refreshTime * 1000); + + return () => clearInterval(interval); + }, [context.enabled, context.refreshTime, fetchData]); + + if (!isPageLoaded) { + return null; + } + + if (!dashboard.dataLoaded) { + return ; + } + + return ( + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ); +}; + +export default Dashboard; diff --git a/frontend/src/components/dashboard/dashboard.module.css b/frontend/src/components/dashboard/dashboard.module.css new file mode 100644 index 00000000..02c16920 --- /dev/null +++ b/frontend/src/components/dashboard/dashboard.module.css @@ -0,0 +1,12 @@ +.main { + display: flex; + justify-content: center; + align-items: center; + min-height: 100vh; + flex-direction: column; + background-color: rgb(26, 28, 33); +} + +.form { + background-color: rgb(33, 35, 40); +} diff --git a/frontend/src/components/diskPie/diskPie.tsx b/frontend/src/components/diskPie/diskPie.tsx new file mode 100644 index 00000000..d2cb67d9 --- /dev/null +++ b/frontend/src/components/diskPie/diskPie.tsx @@ -0,0 +1,80 @@ +import { ArcElement, Chart as ChartJS, Legend, Tooltip } from 'chart.js'; +import React from 'react'; +import { Doughnut } from 'react-chartjs-2'; + +ChartJS.register(ArcElement, Tooltip, Legend); + +const config = { + maintainAspectRatio: false, + responsive: true, + cutout: 170, + plugins: { + title: { + display: true, + text: 'Total occupied space on disks', + align: 'center' as 'end' | 'start' | 'center' | undefined, + color: '#efefef', + font: { + size: 16, + }, + }, + }, +}; + +function formatBytes(bytes: number, decimals = 0) { + if (!+bytes) return '0 Bytes'; + + const k = 1024; + const dm = decimals < 0 ? 0 : decimals; + const sizes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']; + + const i = Math.floor(Math.log(bytes) / Math.log(k)); + + return `${parseFloat((bytes / Math.pow(k, i)).toFixed(dm))}${sizes[i]}`; +} + +const DiskPie = ({ spaceInfo: { used_disk: usedSpace, total_disk: totalSpace } }: { spaceInfo: SpaceInfo }) => { + const textCenter = { + id: 'textCenter', + afterDraw(chart: ChartJS<'doughnut', number[], unknown>) { + const { ctx } = chart; + ctx.save(); + ctx.font = 'bolder 50px sans-serif'; + ctx.fillStyle = 'rgb(255, 104, 54)'; + ctx.textAlign = 'center'; + const numb = usedSpace / totalSpace; + ctx.fillText( + (+numb.toFixed(3) * 100).toFixed(0) + '%', + chart.getDatasetMeta(0).data[0].x, + chart.getDatasetMeta(0).data[0].y - 20, + ); + ctx.font = 'bolder 30px sans-serif'; + ctx.fillStyle = 'white'; + ctx.fillText( + formatBytes(usedSpace) + '/' + formatBytes(totalSpace), + chart.getDatasetMeta(0).data[0].x, + chart.getDatasetMeta(0).data[0].y + 25, + ); + }, + }; + + const f = (cur: number, total: number): number[] => { + return [cur, total - cur]; + }; + + const data = { + datasets: [ + { + label: 'Occupied space', + data: f(usedSpace, totalSpace), + backgroundColor: ['#FF6936', '#282A2F'], + borderColor: ['#FF6936', '#282A2F'], + borderWidth: 20, + }, + ], + }; + + return ; +}; + +export default DiskPie; diff --git a/frontend/src/components/diskState/diskState.module.css b/frontend/src/components/diskState/diskState.module.css new file mode 100644 index 00000000..7d39af67 --- /dev/null +++ b/frontend/src/components/diskState/diskState.module.css @@ -0,0 +1,34 @@ +.main { + display: flex; + width: 500px; + height: 250px; + padding: 14px 24px 16px 24px; + flex-direction: column; + align-items: flex-start; + flex-shrink: 0; +} + +.totalLabelPercent { + font-size: 16px; + min-width: 40px; +} + +.totalGoodDisksLabel { + font-size: 48px; + color: #5eb36b; +} + +.titleLabel { + max-width: 131px; + font-size: 12px; +} + +.totalBadDisksLabel { + font-size: 48px; + color: #7c817e; +} + +.totalOfflineDisksLabel { + font-size: 48px; + color: #b3344d; +} diff --git a/frontend/src/components/diskState/diskState.tsx b/frontend/src/components/diskState/diskState.tsx new file mode 100644 index 00000000..ba3b4140 --- /dev/null +++ b/frontend/src/components/diskState/diskState.tsx @@ -0,0 +1,77 @@ +import { Box, LinearProgress } from '@mui/material'; +import React from 'react'; + +import style from './diskState.module.css'; + +const BarColor: Record = { + good: '#5EB36B', + bad: '#7C817E', + offline: '#B3344D', +}; + +const BarLabelColor: Record = { + good: style.totalGoodDisksLabel, + bad: style.totalBadDisksLabel, + offline: style.totalOfflineDisksLabel, +}; + +const DiskState = ({ diskCount, status }: { diskCount: Record; status: DiskStatusName }) => { + const total = diskCount.good + diskCount.bad + diskCount.offline; + const percent = Math.floor((diskCount[status] / total) * 100) || 0; + return ( + +

{diskCount[status]}

+

{status.charAt(0).toUpperCase() + status.slice(1)}

+ +

{percent}%

+
+ +
+ ); +}; + +const DiskBreakdown = ({ diskCount: { map: count } }: { diskCount: DiskCount }) => { + return ( + +

State of the physical disks in the cluster

+ + + +
+ ); +}; + +export default DiskBreakdown; diff --git a/frontend/src/components/login/Login.tsx b/frontend/src/components/login/Login.tsx index 1310864d..0a6aa462 100644 --- a/frontend/src/components/login/Login.tsx +++ b/frontend/src/components/login/Login.tsx @@ -1,4 +1,4 @@ -import { removeEmpty } from '@components/common.ts'; +import { removeEmpty } from '@appTypes/common.ts'; import defaultTheme from '@layouts/DefaultTheme.ts'; import ThemeRegistry from '@layouts/ThemeRegistry.tsx'; import { Alert, Box, Button, Grid, Snackbar, TextField } from '@mui/material'; @@ -39,7 +39,7 @@ const LoginPage = ({ redirectTo }: { redirectTo: string }) => { ), }); if (response.ok) { - location.assign(redirectTo); + location.replace(redirectTo); } else { setSnackbarMessage('Wrong data'); setOpenSnackbar(true); diff --git a/frontend/src/components/navbar/Navbar.tsx b/frontend/src/components/navbar/Navbar.tsx index b24270e2..5f921460 100644 --- a/frontend/src/components/navbar/Navbar.tsx +++ b/frontend/src/components/navbar/Navbar.tsx @@ -1,5 +1,6 @@ -import { cookieAuthId, eraseCookie, getCookie, refreshTimes } from '@components/common.ts'; -import { Context } from '@components/Context.ts'; +import { cookieAuthId, eraseCookie, getCookie } from '@appTypes/common.ts'; +import { Context, refreshTimes } from '@appTypes/context.ts'; +import { isLocation, type NavLocation } from '@appTypes/navigation.ts'; import defaultTheme from '@layouts/DefaultTheme.ts'; import { ExitToApp } from '@mui/icons-material'; import { @@ -21,38 +22,28 @@ import { ThemeProvider } from '@mui/material/styles'; import { useStore } from '@nanostores/react'; import BrandMark from 'public/brandmark.svg'; import React, { useEffect, useState } from 'react'; -// import type { RefreshTimes } from '../../env.d.ts'; const Navbar = ({ logoutRedirectTo }: { logoutRedirectTo: string }) => { const context = useStore(Context); - const [value, setValue] = useState(0); + const [tab, setTab] = useState('/dashboard' as NavLocation); const [refresh, setRefresh] = useState(context.refreshTime); + // FIXME: button's render is not on the same state on page refresh as the context (always on) + // I hate react.... const [switchButton, setSwitchButton] = useState(context.enabled); + const path = window.location.pathname.replace(/\/$/, ''); useEffect(() => { - const path = window.location.pathname; - if (path === '/dashboard') { - setValue(0); - } else if (path === '/nodelist') { - setValue(1); - } else if (path === '/vdisklist') { - setValue(2); + // setSwitchButton(Context.get().enabled); + if (isLocation(path)) { + setTab(path); } - }, []); + }, [setSwitchButton, context, switchButton, path]); - const updateContext = () => { - Context.set({ - refreshTime: refresh, - enabled: switchButton, - }); - }; - - const pathname = window.location.pathname.replace(/\/$/, ''); - if (getCookie('id') === '' && pathname !== '/login') { - location.assign(logoutRedirectTo); + if (getCookie('id') === '' && path !== '/login') { + location.replace(logoutRedirectTo); } - if (pathname === '/login') { + if (path === '/login') { return
; } @@ -61,7 +52,7 @@ const Navbar = ({ logoutRedirectTo }: { logoutRedirectTo: string }) => { method: 'POST', }); eraseCookie(cookieAuthId); - location.assign(logoutRedirectTo); + location.replace(logoutRedirectTo); } return ( @@ -87,28 +78,30 @@ const Navbar = ({ logoutRedirectTo }: { logoutRedirectTo: string }) => { { - setValue(val); - updateContext(); + setTab(val); }} > @@ -124,16 +117,13 @@ const Navbar = ({ logoutRedirectTo }: { logoutRedirectTo: string }) => { }} > { - setSwitchButton(c); - updateContext(); - }} - /> - } - label="STOP" + value={switchButton} + control={switchButton ? : } + onChange={() => { + setSwitchButton(!switchButton); + Context.setKey('enabled', !switchButton); + }} + label={'STOP: ' + switchButton} labelPlacement="start" sx={{ '&.MuiFormControlLabel-labelPlacementStart': { @@ -156,10 +146,11 @@ const Navbar = ({ logoutRedirectTo }: { logoutRedirectTo: string }) => { labelId="polling-select-label-id" id="pollint-select-id" label="min" - value={refresh} + // value={refresh} + defaultValue={refresh} onChange={(e) => { setRefresh(e.target.value); - updateContext(); + Context.setKey('refreshTime', e.target.value); }} > {refreshTimes.map((val: string) => ( @@ -179,7 +170,6 @@ const Navbar = ({ logoutRedirectTo }: { logoutRedirectTo: string }) => { { handleLogout(); - updateContext(); }} color="inherit" > diff --git a/frontend/src/components/totalNodes/totalNodes.module.css b/frontend/src/components/totalNodes/totalNodes.module.css new file mode 100644 index 00000000..f935cdcd --- /dev/null +++ b/frontend/src/components/totalNodes/totalNodes.module.css @@ -0,0 +1,34 @@ +.main { + display: flex; + width: 500px; + height: 250px; + padding: 14px 24px 16px 24px; + flex-direction: column; + align-items: flex-start; + flex-shrink: 0; +} + +.totalLabelPercent { + font-size: 16px; + min-width: 40px; +} + +.totalGoodNodesLabel { + font-size: 48px; + color: #5eb36b; +} + +.titleLabel { + max-width: 120px; + font-size: 12px; +} + +.totalBadNodesLabel { + font-size: 48px; + color: #7c817e; +} + +.totalOfflineNodesLabel { + font-size: 48px; + color: #b3344d; +} diff --git a/frontend/src/components/totalNodes/totalNodes.tsx b/frontend/src/components/totalNodes/totalNodes.tsx new file mode 100644 index 00000000..6b6ad0f9 --- /dev/null +++ b/frontend/src/components/totalNodes/totalNodes.tsx @@ -0,0 +1,77 @@ +import { Box, LinearProgress } from '@mui/material'; +import React from 'react'; + +import style from './totalNodes.module.css'; + +const NodeColor: Record = { + good: '#5EB36B', + bad: '#7C817E', + offline: '#B3344D', +}; + +const NodeLabelColor: Record = { + good: style.totalGoodNodesLabel, + bad: style.totalBadNodesLabel, + offline: style.totalOfflineNodesLabel, +}; + +const NodeState = ({ nodeCount, status }: { nodeCount: Record; status: NodeStatusName }) => { + const total = nodeCount.good + nodeCount.bad + nodeCount.offline; + const percent = Math.floor((nodeCount[status] / total) * 100) || 0; + return ( + +

{nodeCount[status]}

+

{status.charAt(0).toUpperCase() + status.slice(1)}

+ +

{percent}%

+
+ +
+ ); +}; + +const TotalNodes = ({ nodeCount: { map: count } }: { nodeCount: NodeCount }) => { + return ( + +

State of the nodes in the cluster

+ + + +
+ ); +}; + +export default TotalNodes; diff --git a/frontend/src/layouts/Layout.astro b/frontend/src/layouts/Layout.astro index 1f05d429..4629fc00 100644 --- a/frontend/src/layouts/Layout.astro +++ b/frontend/src/layouts/Layout.astro @@ -24,7 +24,7 @@ const { title } = Astro.props; /> {title} - +