diff --git a/CHANGELOG.md b/CHANGELOG.md
index 941aeef4..12716910 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -14,4 +14,6 @@ Bob Management GUI changelog
 - Login Page, backend (#16)
 - Login Page, frontend (#17)
 - Home page, backend (#18)
+- Node list page, backend (#19)
 - Home page, frontend (#22)
+- Node list page, frontend (#23)
diff --git a/api/openapi.yaml b/api/openapi.yaml
index ebcc52a0..60eabbcc 100644
--- a/api/openapi.yaml
+++ b/api/openapi.yaml
@@ -1,3 +1,4 @@
+<<<<<<< HEAD
 openapi: 3.0.3
 info:
   title: bob-management
@@ -838,3 +839,6 @@ components:
 tags:
   - name: bob
     description: BOB management API
+=======
+
+>>>>>>> 3a6bfd4 (Add NodeList Page)
diff --git a/backend/src/connector/dto.rs b/backend/src/connector/dto.rs
index 9ccc1f34..c8eaae3c 100644
--- a/backend/src/connector/dto.rs
+++ b/backend/src/connector/dto.rs
@@ -409,6 +409,8 @@ impl Ord for MetricsEntryModel {
 impl Eq for MetricsEntryModel {}
 #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
 #[cfg_attr(feature = "conversion", derive(frunk::LabelledGeneric))]
+#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
+#[cfg_attr(all(feature = "swagger", debug_assertions), schema(as = dto::Node))]
 pub struct Node {
     #[serde(rename = "name")]
     pub name: String,
@@ -418,6 +420,7 @@ pub struct Node {
 
     #[serde(rename = "vdisks")]
     #[serde(skip_serializing_if = "Option::is_none")]
+    #[cfg_attr(all(feature = "swagger", debug_assertions), schema(value_type = Option<Vec<dto::VDisk>>))]
     pub vdisks: Option<Vec<VDisk>>,
 }
 
@@ -697,6 +700,8 @@ impl std::str::FromStr for Partition {
 
 #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
 #[cfg_attr(feature = "conversion", derive(frunk::LabelledGeneric))]
+#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
+#[cfg_attr(all(feature = "swagger", debug_assertions), schema(as = dto::Replica))]
 pub struct Replica {
     #[serde(rename = "node")]
     pub node: String,
@@ -896,12 +901,15 @@ impl std::str::FromStr for StatusExt {
 
 #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
 #[cfg_attr(feature = "conversion", derive(frunk::LabelledGeneric))]
+#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
+#[cfg_attr(all(feature = "swagger", debug_assertions), schema(as = dto::VDisk))]
 pub struct VDisk {
     #[serde(rename = "id")]
     pub id: i32,
 
     #[serde(rename = "replicas")]
     #[serde(skip_serializing_if = "Option::is_none")]
+    #[cfg_attr(all(feature = "swagger", debug_assertions), schema(value_type = Option<Vec<dto::Replica>>))]
     pub replicas: Option<Vec<Replica>>,
 }
 
diff --git a/backend/src/connector/mod.rs b/backend/src/connector/mod.rs
index 7bd7e695..f0462bb8 100644
--- a/backend/src/connector/mod.rs
+++ b/backend/src/connector/mod.rs
@@ -10,8 +10,9 @@ mod prelude {
         headers::{authorization::Credentials, Authorization, HeaderMapExt},
         http::{HeaderName, HeaderValue},
     };
-    pub use futures::StreamExt;
-    pub use hyper::{service::Service, Response, Uri};
+    pub use futures::{Stream, StreamExt};
+    pub use hyper::{body::Bytes, service::Service, Response, Uri};
+    pub use std::collections::BTreeMap;
     pub use std::{
         str::FromStr,
         sync::Arc,
@@ -105,7 +106,7 @@ pub struct BobClient<Context: Send + Sync, Client: ApiNoContext<Context> + Send
     main: Arc<Client>,
 
     /// Clients for all known nodes
-    cluster: HashMap<NodeName, Arc<Client>>,
+    cluster: BTreeMap<NodeName, Arc<Client>>,
 
     context_marker: PhantomData<fn(Context)>,
 }
@@ -168,7 +169,7 @@ impl<Context: Send + Sync, ApiInterface: ApiNoContext<Context> + Send + Sync>
                 .attach_printable(format!("Hostname: {}", hostname.to_string()))?
         };
 
-        let cluster: HashMap<NodeName, Arc<_>> = nodes
+        let cluster: BTreeMap<NodeName, Arc<_>> = nodes
             .iter()
             .filter_map(|node| HttpClient::from_node(node, &bob_data.hostname, context.clone()))
             .collect();
@@ -177,6 +178,7 @@ impl<Context: Send + Sync, ApiInterface: ApiNoContext<Context> + Send + Sync>
             id: Uuid::new_v4(),
             hostname: bob_data.hostname,
             main: Arc::new(client.with_context(context)),
+            // main: Arc::new(client),
             cluster,
             context_marker: PhantomData,
         })
@@ -275,7 +277,7 @@ impl<Context: Send + Sync, ApiInterface: ApiNoContext<Context> + Send + Sync>
     }
 
     #[must_use]
-    pub const fn cluster_with_addr(&self) -> &HashMap<NodeName, Arc<ApiInterface>> {
+    pub const fn cluster_with_addr(&self) -> &BTreeMap<NodeName, Arc<ApiInterface>> {
         &self.cluster
     }
 
diff --git a/backend/src/lib.rs b/backend/src/lib.rs
index 89a20739..21fb1523 100644
--- a/backend/src/lib.rs
+++ b/backend/src/lib.rs
@@ -41,25 +41,36 @@ impl Modify for SecurityAddon {
         services::api::get_nodes_count,
         services::api::get_rps,
         services::api::get_space,
+        services::api::raw_metrics_by_node,
+        services::api::raw_configuration_by_node,
+        services::api::get_node_info,
+        services::api::get_nodes_list,
     ),
     components(
         schemas(models::shared::Credentials, models::shared::Hostname, models::shared::BobConnectionData,
+            models::api::Disk,
             models::api::DiskProblem,
             models::api::DiskStatus,
             models::api::DiskStatusName,
             models::api::DiskCount,
+            models::api::NodeInfo,
             models::api::NodeProblem,
             models::api::NodeStatus,
             models::api::NodeStatusName,
             models::api::NodeCount,
+            models::api::Replica,
             models::api::ReplicaProblem,
             models::api::ReplicaStatus,
             models::api::SpaceInfo,
+            models::api::VDisk,
             models::api::VDiskStatus,
             models::api::Operation,
             models::api::RPS,
             models::api::RawMetricEntry,
             models::api::TypedMetrics,
+            connector::dto::Node,
+            connector::dto::VDisk,
+            connector::dto::Replica,
             connector::dto::MetricsEntryModel,
             connector::dto::MetricsSnapshotModel,
             connector::dto::NodeConfiguration
diff --git a/backend/src/models/api.rs b/backend/src/models/api.rs
index 8acc77d1..a2eb1a33 100644
--- a/backend/src/models/api.rs
+++ b/backend/src/models/api.rs
@@ -8,8 +8,33 @@ pub const DEFAULT_MIN_FREE_SPACE_PERCENTAGE: f64 = 0.1;
 /// Connection Data
 pub use crate::models::shared::{BobConnectionData, Credentials};
 
+/// Physical disk definition
+#[derive(Debug, Clone, Eq, PartialEq, Serialize)]
+#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
+#[serde(rename_all = "camelCase")]
+#[tsync]
+pub struct Disk {
+    /// Disk name
+    pub name: String,
+
+    /// Disk path
+    pub path: String,
+
+    /// Disk status
+    // #[serde(flatten)]
+    pub status: DiskStatus,
+
+    #[serde(rename = "totalSpace")]
+    pub total_space: u64,
+
+    #[serde(rename = "usedSpace")]
+    pub used_space: u64,
+
+    pub iops: u64,
+}
+
 /// Defines kind of problem on disk
-#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Hash)]
+#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Serialize, Hash)]
 #[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
 #[tsync]
 pub enum DiskProblem {
@@ -21,7 +46,7 @@ pub enum DiskProblem {
 ///
 /// Variant - Disk Status
 /// Content - List of problems on disk. 'null' if status != 'bad'
-#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash)]
+#[derive(Debug, Clone, Eq, PartialEq, Serialize, Hash)]
 #[serde(tag = "status", content = "problems")]
 #[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
 #[tsync]
@@ -29,7 +54,7 @@ pub enum DiskStatus {
     #[serde(rename = "good")]
     Good,
     #[serde(rename = "bad")]
-    Bad(Vec<DiskProblem>),
+    Bad { problems: Vec<DiskProblem> },
     #[serde(rename = "offline")]
     Offline,
 }
@@ -43,7 +68,9 @@ impl DiskStatus {
                 / space.total_disk_space_bytes as f64)
                 < DEFAULT_MIN_FREE_SPACE_PERCENTAGE
             {
-                Self::Bad(vec![DiskProblem::FreeSpaceRunningOut])
+                Self::Bad {
+                    problems: vec![DiskProblem::FreeSpaceRunningOut],
+                }
             } else {
                 Self::Good
             }
@@ -54,7 +81,7 @@ impl DiskStatus {
 }
 
 /// Defines disk status names
-#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash, EnumIter)]
+#[derive(Debug, Clone, Eq, PartialEq, Serialize, Hash, EnumIter)]
 #[serde(rename_all = "camelCase")]
 #[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
 #[tsync]
@@ -64,8 +91,37 @@ pub enum DiskStatusName {
     Offline,
 }
 
+#[derive(Debug, Clone, Serialize)]
+#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
+#[serde(rename_all = "camelCase")]
+#[tsync]
+pub struct NodeInfo {
+    pub name: String,
+
+    pub hostname: String,
+
+    pub vdisks: Vec<VDisk>,
+    // #[serde(flatten)]
+    pub status: NodeStatus,
+
+    #[serde(rename = "rps")]
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub rps: Option<RPS>,
+
+    #[serde(rename = "alienCount")]
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub alien_count: Option<u64>,
+
+    #[serde(rename = "corruptedCount")]
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub corrupted_count: Option<u64>,
+
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub space: Option<SpaceInfo>,
+}
+
 /// Defines kind of problem on Node
-#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Serialize, Deserialize, Hash)]
+#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Serialize, Hash)]
 #[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
 #[tsync]
 pub enum NodeProblem {
@@ -108,10 +164,8 @@ impl NodeProblem {
         if node_metrics[RawMetricEntry::HardwareBobCpuLoad].value >= max_cpu {
             res.push(Self::HighCPULoad);
         }
-        if (1.
-            - (node_metrics[RawMetricEntry::HardwareTotalSpace].value
-                - node_metrics[RawMetricEntry::HardwareFreeSpace].value) as f64
-                / node_metrics[RawMetricEntry::HardwareTotalSpace].value as f64)
+        if (node_metrics[RawMetricEntry::HardwareFreeSpace].value as f64
+            / node_metrics[RawMetricEntry::HardwareTotalSpace].value as f64)
             < min_free_space_perc
         {
             res.push(Self::FreeSpaceRunningOut);
@@ -131,7 +185,7 @@ impl NodeProblem {
 /// Variants - Node status
 ///
 /// Content - List of problems on node. 'null' if status != 'bad'
-#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash)]
+#[derive(Debug, Clone, Eq, PartialEq, Serialize, Hash)]
 #[serde(tag = "status", content = "problems")]
 #[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
 #[tsync]
@@ -139,7 +193,7 @@ pub enum NodeStatus {
     #[serde(rename = "good")]
     Good,
     #[serde(rename = "bad")]
-    Bad(Vec<NodeProblem>),
+    Bad { problems: Vec<NodeProblem> },
     #[serde(rename = "offline")]
     Offline,
 }
@@ -150,7 +204,7 @@ impl NodeStatus {
         if problems.is_empty() {
             Self::Good
         } else {
-            Self::Bad(problems)
+            Self::Bad { problems }
         }
     }
 }
@@ -172,7 +226,7 @@ impl TypedMetrics {
 }
 
 /// Defines node status names
-#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash, EnumIter)]
+#[derive(Debug, Clone, Eq, PartialEq, Serialize, Hash, EnumIter)]
 #[serde(rename_all = "camelCase")]
 #[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
 #[tsync]
@@ -182,8 +236,24 @@ pub enum NodeStatusName {
     Offline,
 }
 
+/// [`VDisk`]'s replicas
+#[derive(Debug, Clone, Eq, PartialEq, Serialize)]
+#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
+#[serde(rename_all = "camelCase")]
+#[tsync]
+pub struct Replica {
+    pub node: String,
+
+    pub disk: String,
+
+    pub path: String,
+
+    // #[serde(flatten)]
+    pub status: ReplicaStatus,
+}
+
 /// Reasons why Replica is offline
-#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Serialize, Deserialize)]
+#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Serialize)]
 #[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
 #[tsync]
 pub enum ReplicaProblem {
@@ -198,7 +268,7 @@ pub enum ReplicaProblem {
 /// Variants - Replica status
 ///
 /// Content - List of problems on replica. 'null' if status != 'offline'
-#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
+#[derive(Debug, Clone, Eq, PartialEq, Serialize)]
 #[serde(tag = "status", content = "problems")]
 #[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
 #[tsync]
@@ -206,11 +276,11 @@ pub enum ReplicaStatus {
     #[serde(rename = "good")]
     Good,
     #[serde(rename = "offline")]
-    Offline(Vec<ReplicaProblem>),
+    Offline { problems: Vec<ReplicaProblem> },
 }
 
 /// Disk space information in bytes
-#[derive(Debug, Default, Clone, Eq, PartialEq, Serialize, Deserialize)]
+#[derive(Debug, Default, Clone, Eq, PartialEq, Serialize)]
 #[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
 #[tsync]
 pub struct SpaceInfo {
@@ -227,16 +297,61 @@ pub struct SpaceInfo {
     pub occupied_disk: u64,
 }
 
+impl From<dto::SpaceInfo> for SpaceInfo {
+    fn from(space: dto::SpaceInfo) -> Self {
+        Self {
+            total_disk: space.total_disk_space_bytes,
+            free_disk: space.total_disk_space_bytes - space.used_disk_space_bytes,
+            used_disk: space.used_disk_space_bytes,
+            occupied_disk: space.occupied_disk_space_bytes,
+        }
+    }
+}
+
+impl AddAssign for SpaceInfo {
+    fn add_assign(&mut self, rhs: Self) {
+        self.total_disk = rhs.total_disk;
+        self.free_disk = rhs.free_disk;
+        self.used_disk = rhs.used_disk;
+        self.occupied_disk = rhs.occupied_disk;
+    }
+}
+
+impl Add for SpaceInfo {
+    type Output = Self;
+
+    fn add(mut self, rhs: Self) -> Self::Output {
+        self += rhs;
+
+        self
+    }
+}
+
+/// Virtual disk Component
+#[derive(Debug, Clone, Eq, PartialEq, Serialize)]
+#[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
+pub struct VDisk {
+    pub id: u64,
+
+    // #[serde(flatten)]
+    pub status: VDiskStatus,
+
+    #[serde(rename = "partitionCount")]
+    pub partition_count: u64,
+
+    pub replicas: Vec<Replica>,
+}
+
 /// Virtual disk status.
 ///
 /// Variants - Virtual Disk status
 /// status == 'bad' when at least one of its replicas has problems
-#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
+#[derive(Debug, Clone, Eq, PartialEq, Serialize)]
 #[serde(tag = "status")]
 #[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
-#[cfg_attr(all(feature = "swagger", debug_assertions),
-    schema(example = json!({"status": "good"})))]
 #[tsync]
+// #[cfg_attr(all(feature = "swagger", debug_assertions),
+//     schema(example = json!({"status": "good"})))]
 pub enum VDiskStatus {
     #[serde(rename = "good")]
     Good,
@@ -247,7 +362,7 @@ pub enum VDiskStatus {
 }
 
 /// Types of operations on BOB cluster
-#[derive(Debug, Clone, Serialize, Deserialize, Hash, Eq, PartialEq, PartialOrd, Ord, EnumIter)]
+#[derive(Debug, Clone, Serialize, Hash, Eq, PartialEq, PartialOrd, Ord, EnumIter)]
 #[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
 #[serde(rename_all = "camelCase")]
 #[tsync]
@@ -258,7 +373,7 @@ pub enum Operation {
     Delete,
 }
 
-#[derive(Clone, Debug, Serialize, Deserialize, Hash, Eq, PartialEq, PartialOrd, Ord, EnumIter)]
+#[derive(Clone, Debug, Serialize, Hash, Eq, PartialEq, PartialOrd, Ord, EnumIter)]
 #[cfg_attr(all(feature = "swagger", debug_assertions), derive(ToSchema))]
 #[tsync]
 pub enum RawMetricEntry {
@@ -434,8 +549,6 @@ impl<
     }
 }
 
-// pub type TypedMetrics = TypedMap<RawMetricEntry, MetricsEntryModel>;
-
 impl<Id: IntoEnumIterator + Eq + Hash, V> std::ops::Index<Id> for TypedMap<Id, V> {
     type Output = V;
 
@@ -486,7 +599,11 @@ impl From<dto::MetricsSnapshotModel> for TypedMetrics {
         let mut value = value.metrics;
         for key in RawMetricEntry::iter() {
             let value = value
-                .remove(&serde_json::to_string(&key).expect("infallible"))
+                .remove(
+                    serde_json::to_string(&key)
+                        .expect("infallible")
+                        .trim_matches('"'),
+                )
                 .unwrap_or_default();
             map.insert(key, value);
         }
diff --git a/backend/src/services/api.rs b/backend/src/services/api.rs
index cd04edaa..fee57f4d 100644
--- a/backend/src/services/api.rs
+++ b/backend/src/services/api.rs
@@ -1,5 +1,9 @@
 use super::prelude::*;
 
+// TODO: For methods, that requires information from all nodes (/disks/count, /nodes/rps, etc.),
+// think of better method of returning info
+// another thread that constantly updates info in period and cache the results?
+
 /// Returns count of Physical Disks per status
 #[cfg_attr(all(feature = "swagger", debug_assertions),
     utoipa::path(
@@ -58,7 +62,7 @@ pub async fn get_disks_count(Extension(client): Extension<HttpBobClient>) -> Jso
             match DiskStatus::from_space_info(&space, &disk.name) {
                 DiskStatus::Good => count[DiskStatusName::Good] += 1,
                 DiskStatus::Offline => count[DiskStatusName::Offline] += 1,
-                DiskStatus::Bad(_) => count[DiskStatusName::Bad] += 1,
+                DiskStatus::Bad { .. } => count[DiskStatusName::Bad] += 1,
             }
         });
         count[DiskStatusName::Offline] = (disks.len() - active) as u64;
@@ -200,28 +204,257 @@ pub async fn get_space(Extension(client): Extension<HttpBobClient>) -> Json<Spac
     Json(total_space)
 }
 
-#[allow(clippy::cast_precision_loss)]
-fn is_bad_node(node_metrics: &TypedMetrics) -> bool {
-    node_metrics[RawMetricEntry::BackendAlienCount].value != 0
-        || node_metrics[RawMetricEntry::BackendCorruptedBlobCount].value != 0
-        || node_metrics[RawMetricEntry::HardwareBobCpuLoad].value >= DEFAULT_MAX_CPU
-        || (1.
-            - (node_metrics[RawMetricEntry::HardwareTotalSpace].value
-                - node_metrics[RawMetricEntry::HardwareFreeSpace].value) as f64
-                / node_metrics[RawMetricEntry::HardwareTotalSpace].value as f64)
-            < DEFAULT_MIN_FREE_SPACE_PERCENTAGE
-        || node_metrics[RawMetricEntry::HardwareBobVirtualRam]
-            > node_metrics[RawMetricEntry::HardwareTotalRam]
+/// Returns simple list of all known nodes
+///
+/// # Errors
+///
+/// This function will return an error if a call to the primary node will fail
+#[cfg_attr(feature = "swagger", utoipa::path(
+        get,
+        context_path = ApiV1::to_path(),
+        path = "/nodes/list",
+        responses(
+            (
+                status = 200, body = Vec<dto::Node>,
+                content_type = "application/json",
+                description = "Simple Node List"
+            ),
+            (status = 401, description = "Unauthorized")
+        ),
+        security(("api_key" = []))
+    ))]
+pub async fn get_nodes_list(
+    Extension(client): Extension<HttpBobClient>,
+) -> AxumResult<Json<Vec<dto::Node>>> {
+    tracing::info!("get /nodes/list : {client:?}");
+    fetch_nodes(client.api_main()).await.map(Json)
 }
 
-#[allow(clippy::cast_precision_loss)]
-fn disk_status_from_space(space: &dto::SpaceInfo, occupied_space: u64) -> DiskStatus {
-    if ((space.total_disk_space_bytes - occupied_space) as f64
-        / space.total_disk_space_bytes as f64)
-        < DEFAULT_MIN_FREE_SPACE_PERCENTAGE
+/// Returns simple list of all known vdisks
+///
+/// # Errors
+///
+/// This function will return an error if a call to the primary node will fail
+#[cfg_attr(feature = "swagger", utoipa::path(
+        get,
+        context_path = ApiV1::to_path(),
+        path = "/vdisks/list",
+        responses(
+            (
+                status = 200, body = Vec<dto::VDisk>,
+                content_type = "application/json",
+                description = "Simple Node List"
+            ),
+            (status = 401, description = "Unauthorized")
+        ),
+        security(("api_key" = []))
+    ))]
+pub async fn get_vdisks_list(
+    Extension(client): Extension<HttpBobClient>,
+) -> AxumResult<Json<Vec<dto::VDisk>>> {
+    tracing::info!("get /vdisks/list : {client:?}");
+    fetch_vdisks(client.api_main()).await.map(Json)
+}
+/// Returns vdisk inforamtion by their id
+///
+/// # Errors
+///
+/// This function will return an error if a call to the main node will fail or vdisk with
+/// specified id not found
+#[cfg_attr(feature = "swagger", utoipa::path(
+        get,
+        context_path = ApiV1::to_path(),
+        path = "/vdisks/{vdisk_id}",
+        responses(
+            (
+                status = 200, body = VDisk,
+                content_type = "application/json",
+                description = "VDisk Inforamtion"
+            ),
+            (status = 401, description = "Unauthorized"),
+            (status = 404, description = "VDisk not found"),
+        ),
+        security(("api_key" = []))
+    ))]
+pub async fn get_vdisk_info(
+    Extension(client): Extension<HttpBobClient>,
+    Path(vdisk_id): Path<u64>,
+) -> AxumResult<Json<VDisk>> {
+    tracing::info!("get /vdisks/{vdisk_id} : {client:?}");
+    get_vdisk_by_id(&client, vdisk_id).await.map(Json)
+}
+
+/// Returns node inforamtion by their node name
+///
+/// # Errors
+///
+/// This function will return an error if a call to the specified node will fail or node with
+/// specified name not found
+#[cfg_attr(feature = "swagger", utoipa::path(
+        get,
+        context_path = ApiV1::to_path(),
+        path = "/nodes/{node_name}",
+        responses(
+            (
+                status = 200, body = Node,
+                content_type = "application/json",
+                description = "Node Inforamtion"
+            ),
+            (status = 401, description = "Unauthorized"),
+            (status = 404, description = "Node not found"),
+        ),
+        security(("api_key" = []))
+    ))]
+pub async fn get_node_info(
+    Extension(client): Extension<HttpBobClient>,
+    Path(node_name): Path<NodeName>,
+) -> AxumResult<Json<NodeInfo>> {
+    tracing::info!("get /nodes/{node_name} : {client:?}");
+    let handle = Arc::new(
+        client
+            .api_secondary(&node_name)
+            .cloned()
+            .ok_or(StatusCode::NOT_FOUND)?,
+    );
+
+    let nodes = {
+        let handle = client.api_main().clone();
+        tokio::spawn(async move { fetch_nodes(&handle).await })
+    };
+    let status = {
+        let handle = handle.clone();
+        tokio::spawn(async move { handle.get_status().await })
+    };
+    let metrics = {
+        let handle = handle.clone();
+        tokio::spawn(async move { handle.clone().get_metrics().await })
+    };
+    let space_info = {
+        let handle = handle.clone();
+        tokio::spawn(async move { handle.clone().get_space_info().await })
+    };
+
+    let mut node = NodeInfo {
+        name: node_name.clone(),
+        hostname: String::new(),
+        vdisks: vec![],
+        status: NodeStatus::Offline,
+        rps: None,
+        alien_count: None,
+        corrupted_count: None,
+        space: None,
+    };
+
+    let Ok(Ok(GetStatusResponse::AJSONWithNodeInfo(status))) = status.await else {
+        // Fallback to general info
+        node.hostname = if let Ok(Ok(nodes)) = nodes.await {
+            nodes
+                .iter()
+                .find(|node| node.name == node_name)
+                .ok_or(StatusCode::NOT_FOUND)?
+                .address
+                .clone()
+        } else {
+            String::new()
+        };
+        return Ok(Json(node));
+    };
+    node.hostname = status.address;
+
+    let mut vdisks: FuturesUnordered<_> = status
+        .vdisks
+        .iter()
+        .flatten()
+        .map(|vdisk| {
+            let handle = client.clone();
+            let id = vdisk.id as u64;
+            tokio::spawn(async move { get_vdisk_by_id(&handle, id).await })
+        })
+        .collect();
+
+    if let (
+        Ok(Ok(GetMetricsResponse::Metrics(metric))),
+        Ok(Ok(GetSpaceInfoResponse::SpaceInfo(space))),
+    ) = (metrics.await, space_info.await)
     {
-        DiskStatus::Bad(vec![DiskProblem::FreeSpaceRunningOut])
-    } else {
-        DiskStatus::Good
+        let metric = Into::<TypedMetrics>::into(metric);
+        node.status = NodeStatus::from_problems(NodeProblem::default_from_metrics(&metric));
+        node.rps = Some(RPS::from_metrics(&metric));
+        node.alien_count = Some(metric[RawMetricEntry::BackendAlienCount].value);
+        node.corrupted_count = Some(metric[RawMetricEntry::BackendCorruptedBlobCount].value);
+        node.space = Some(SpaceInfo::from(space));
     }
+
+    while let Some(vdisk) = vdisks.next().await {
+        if let Ok(Ok(vdisk)) = vdisk {
+            node.vdisks.push(vdisk);
+        } else {
+            tracing::warn!("some warning"); //TODO
+        }
+    }
+
+    Ok(Json(node))
+}
+
+/// Get Raw Metrics from Node
+///
+/// # Errors
+///
+/// This function will return an error if the server was unable to get node'a client or the request to get metrics fails
+#[cfg_attr(feature = "swagger", utoipa::path(
+        get,
+        context_path = ApiV1::to_path(),
+        path = "/nodes/{node_name}/metrics",
+        responses(
+            (status = 200, body = TypedMetrics, content_type = "application/json", description = "Node's metrics"),
+            (status = 401, description = "Unauthorized"),
+            (status = 404, description = "Node Not Found")
+        ),
+        security(("api_key" = []))
+    ))]
+pub async fn raw_metrics_by_node(
+    Extension(client): Extension<HttpBobClient>,
+    Path(node_name): Path<NodeName>,
+) -> AxumResult<Json<TypedMetrics>> {
+    Ok(Json(
+        fetch_metrics(
+            &client
+                .api_secondary(&node_name)
+                .cloned()
+                .ok_or(StatusCode::NOT_FOUND)?,
+        )
+        .await?
+        .into(),
+    ))
+}
+
+/// Get Configuration from Node
+///
+/// # Errors
+///
+/// This function will return an error if the server was unable to get node'a client or the request to get configuration fails
+#[cfg_attr(feature = "swagger", utoipa::path(
+        get,
+        context_path = ApiV1::to_path(),
+        path = "/nodes/{node_name}/configuration",
+        responses(
+            (status = 200, body = NodeConfiguration, content_type = "application/json", description = "Node's configuration"),
+            (status = 401, description = "Unauthorized"),
+            (status = 404, description = "Node Not Found")
+        ),
+        security(("api_key" = []))
+    ))]
+pub async fn raw_configuration_by_node(
+    Extension(client): Extension<HttpBobClient>,
+    Path(node_name): Path<NodeName>,
+) -> AxumResult<Json<dto::NodeConfiguration>> {
+    Ok(Json(
+        fetch_configuration(
+            &client
+                .api_secondary(&node_name)
+                .cloned()
+                .ok_or(StatusCode::NOT_FOUND)?,
+        )
+        .await?,
+    ))
 }
diff --git a/backend/src/services/methods.rs b/backend/src/services/methods.rs
index 9bfc23d2..922dbe60 100644
--- a/backend/src/services/methods.rs
+++ b/backend/src/services/methods.rs
@@ -67,7 +67,6 @@ pub async fn fetch_space_info<
 ///
 /// # Errors
 ///
-/// This function will return an error if .
 /// This function will return an error if the request to the specified client failed
 pub async fn fetch_node_status<
     Context: Send + Sync,
@@ -161,3 +160,113 @@ pub async fn fetch_nodes<
 
     Ok(nodes)
 }
+
+/// Return `VDisk` information by id
+///
+/// # Errors
+///
+/// This function will return an error if vdisks information couldn't be fetched or no vdisk with
+/// provided id was found
+pub async fn get_vdisk_by_id(client: &HttpBobClient, vdisk_id: u64) -> AxumResult<VDisk> {
+    let virtual_disks = fetch_vdisks(client.api_main()).await?;
+    let virtual_disks = virtual_disks
+        .iter()
+        .find(|vdisk| vdisk.id as u64 == vdisk_id)
+        .ok_or_else(|| StatusCode::NOT_FOUND.into_response())?;
+    let clients = virtual_disks
+        .replicas
+        .iter()
+        .flatten()
+        .map(|replica| replica.node.clone())
+        .collect::<HashSet<_>>()
+        .iter()
+        .filter_map(|node_name| client.api_secondary(node_name))
+        .collect::<Vec<_>>();
+    let partition_count = if let Some(handle) = clients.first() {
+        handle.get_partitions(vdisk_id as i32).await.map_or_else(
+            |_err| 0,
+            |parts| {
+                if let GetPartitionsResponse::NodeInfoAndJSONArrayWithPartitionsInfo(parts) = parts
+                {
+                    parts.partitions.unwrap_or_default().len()
+                } else {
+                    0
+                }
+            },
+        )
+    } else {
+        0
+    };
+    let mut disks: FuturesUnordered<_> = clients
+        .iter()
+        .map(move |&node| {
+            let handle = node.clone();
+            tokio::spawn(async move { (handle.get_status().await, handle.get_disks().await) })
+        })
+        .collect();
+    let mut replicas: HashMap<_, _> = virtual_disks
+        .replicas
+        .clone()
+        .into_iter()
+        .flatten()
+        .map(|replica| {
+            (
+                (replica.disk.clone(), replica.node.clone()),
+                Replica {
+                    node: replica.node,
+                    disk: replica.disk,
+                    path: replica.path,
+                    status: ReplicaStatus::Offline {
+                        problems: vec![ReplicaProblem::NodeUnavailable],
+                    },
+                },
+            )
+        })
+        .collect();
+    while let Some(res) = disks.next().await {
+        if let Ok((
+            Ok(GetStatusResponse::AJSONWithNodeInfo(status)),
+            Ok(GetDisksResponse::AJSONArrayWithDisksAndTheirStates(disks)),
+        )) = res
+        {
+            for disk in disks {
+                replicas.insert(
+                    (disk.name.clone(), status.name.clone()),
+                    Replica {
+                        node: status.name.clone(),
+                        disk: disk.name,
+                        path: disk.path,
+                        status: disk
+                            .is_active
+                            .then_some(ReplicaStatus::Good)
+                            .unwrap_or_else(|| ReplicaStatus::Offline {
+                                problems: vec![ReplicaProblem::DiskUnavailable],
+                            }),
+                    },
+                );
+            }
+        } else {
+            tracing::warn!("couldn't receive node's space info");
+        }
+    }
+
+    let replicas: Vec<_> = replicas.into_values().collect();
+    let count = replicas
+        .iter()
+        .filter(|replica| matches!(replica.status, ReplicaStatus::Offline { .. }))
+        .count();
+    let status = if count == 0 {
+        VDiskStatus::Good
+    } else if count == replicas.len() {
+        VDiskStatus::Offline
+    } else {
+        VDiskStatus::Bad
+    };
+
+    Ok(VDisk {
+        id: vdisk_id,
+        status,
+        partition_count: partition_count as u64,
+        replicas,
+    })
+}
diff --git a/backend/src/services/mod.rs b/backend/src/services/mod.rs
index bbdf977c..7ac21ae8 100644
--- a/backend/src/services/mod.rs
+++ b/backend/src/services/mod.rs
@@ -1,4 +1,7 @@
 mod prelude {
+    pub use super::methods::{
+        fetch_configuration, fetch_metrics, fetch_nodes, fetch_vdisks, get_vdisk_by_id,
+    };
     pub use crate::{
         connector::{
             api::{prelude::*, ApiNoContext},
@@ -8,13 +11,12 @@ mod prelude {
         prelude::*,
     };
     pub use axum::{
-        extract::{FromRef, FromRequestParts},
+        extract::{FromRef, FromRequestParts, Path},
         http::request::Parts,
         middleware::{from_fn_with_state, Next},
         Router,
     };
     pub use futures::{stream::FuturesUnordered, StreamExt};
-    pub use std::sync::Arc;
     pub use tokio::sync::Mutex;
     pub use tower_sessions::Session;
 }
@@ -23,7 +25,10 @@ pub mod api;
 pub mod auth;
 pub mod methods;
 
-use api::{get_disks_count, get_nodes_count, get_rps, get_space};
+use api::{
+    get_disks_count, get_node_info, get_nodes_count, get_nodes_list, get_rps, get_space,
+    raw_configuration_by_node, raw_metrics_by_node,
+};
 use auth::{login, logout, require_auth, AuthState, BobUser, HttpBobClient, InMemorySessionStore};
 use prelude::*;
 
@@ -47,6 +52,18 @@ pub fn api_router_v1(auth_state: BobAuthState) -> Result<Router<BobAuthState>, R
         .api_route("/nodes/count", &Method::GET, get_nodes_count)
         .api_route("/nodes/rps", &Method::GET, get_rps)
         .api_route("/nodes/space", &Method::GET, get_space)
+        .api_route("/nodes/list", &Method::GET, get_nodes_list)
+        .api_route("/nodes/:node_name", &Method::GET, get_node_info)
+        .api_route(
+            "/nodes/:node_name/metrics",
+            &Method::GET,
+            raw_metrics_by_node,
+        )
+        .api_route(
+            "/nodes/:node_name/configuration",
+            &Method::GET,
+            raw_configuration_by_node,
+        )
         .unwrap()?
         .route_layer(from_fn_with_state(auth_state, require_auth))
         .with_context::<ApiV1, ApiDoc>()
diff --git a/frontend/bindings.rs b/frontend/bindings.rs
index 5514ff90..090b0b17 100644
--- a/frontend/bindings.rs
+++ b/frontend/bindings.rs
@@ -4,3 +4,42 @@ use tsync::tsync;
 
 #[tsync]
 pub type Hostname = String;
+
+// Same as in `backend/src/connector/dto.rs`
+// Imported for bindings generation (tsync doesn't respect serde(rename))
+
+/// BOB's Node interface
+#[tsync]
+pub struct DTONode {
+    pub name: String,
+
+    pub address: String,
+
+    pub vdisks: Option<Vec<DTOVDisk>>,
+}
+
+/// BOB's Node Configuration interface
+#[tsync]
+pub struct DTONodeConfiguration {
+    pub blob_file_name_prefix: Option<String>,
+
+    pub root_dir_name: Option<String>,
+}
+
+/// BOB's VDisk interface
+#[tsync]
+pub struct DTOVDisk {
+    pub id: i32,
+
+    pub replicas: Option<Vec<DTOReplica>>,
+}
+
+/// BOB's Replica interface
+#[tsync]
+pub struct DTOReplica {
+    pub node: String,
+
+    pub disk: String,
+
+    pub path: String,
+}
diff --git a/frontend/src/components/nodeList/nodeList.module.css b/frontend/src/components/nodeList/nodeList.module.css
new file mode 100644
index 00000000..5ca0c0f5
--- /dev/null
+++ b/frontend/src/components/nodeList/nodeList.module.css
@@ -0,0 +1,27 @@
+.greendot {
+    height: 16px;
+    width: 16px;
+    background-color: #34b663;
+    border-radius: 50%;
+    display: inline-block;
+}
+
+.reddot {
+    height: 16px;
+    width: 16px;
+    background-color: #c3234b;
+    border-radius: 50%;
+    display: inline-block;
+}
+
+.graydot {
+    height: 16px;
+    width: 16px;
+    background-color: #7b817e;
+    border-radius: 50%;
+    display: inline-block;
+}
+
+.totalspace {
+    color: #7b817e;
+}
diff --git a/frontend/src/components/nodeList/nodeList.tsx b/frontend/src/components/nodeList/nodeList.tsx
new file mode 100644
index 00000000..db1fb187
--- /dev/null
+++ b/frontend/src/components/nodeList/nodeList.tsx
@@ -0,0 +1,131 @@
+import { Context } from '@appTypes/context.ts';
+import defaultTheme from '@layouts/DefaultTheme.ts';
+import { Box, ThemeProvider } from '@mui/system';
+import { useStore } from '@nanostores/react';
+import axios from 'axios';
+import React, { useCallback, useEffect, useMemo, useState } from 'react';
+
+import FetchingBackdrop from '../backdrop/backdrop.tsx';
+import NodeTable from '../nodeTable/nodeTable.tsx';
+
+const stubNode: NodeInfo = {
+    name: 'loading...',
+    hostname: 'loading...',
+    vdisks: [],
+    status: {
+        status: 'Offline',
+    },
+    rps: {
+        map: {
+            put: 0,
+            get: 0,
+            exist: 0,
+            delete: 0,
+        },
+    },
+    alienCount: 0,
+    corruptedCount: 0,
+    space: {
+        total_disk: 0,
+        free_disk: 0,
+        used_disk: 0,
+        occupied_disk: 0,
+    },
+};
+
+const NodeListPage = () => {
+    const [nodes, setNodes] = useState<NodeInfo[]>([]);
+    const [nodeList, setNodeList] = useState<DTONode[]>([]);
+    const [isPageLoaded, setIsPageLoaded] = useState(false);
+    const context = useStore(Context);
+
+    const fetchNodeList = useMemo(
+        () => async () => {
+            try {
+                const [res] = await Promise.all([axios.get<DTONode[]>('/api/v1/nodes/list')]);
+                setNodes(
+                    res.data
+                        .map((dtoNode: DTONode) => {
+                            return {
+                                ...stubNode,
+                                name: dtoNode.name,
+                                hostname: dtoNode.address,
+                            } as NodeInfo;
+                        })
+                        .sort((a, b) => (a.name < b.name ? -1 : 1)),
+                );
+                setNodeList(res.data);
+            } catch (err) {
+                console.log(err);
+            }
+        },
+        [],
+    );
+
+    const fetchNode = useCallback(
+        (nodeName: string) => async () => {
+            try {
+                const [res] = await Promise.all([axios.get<NodeInfo>('/api/v1/nodes/' + nodeName)]);
+                return res.data;
+            } catch (err) {
+                console.log(err);
+            }
+        },
+        [],
+    );
+
+    useEffect(() => {
+        const fetchNodes = async () => {
+            const res = (
+                await Promise.all(
+                    nodeList.map(async (node) => {
+                        return fetchNode(node.name)()
+                            .catch(console.error)
+                            .then((resultNode) => resultNode);
+                    }),
+                )
+            ).filter((node): node is NodeInfo => {
+                return typeof node !== undefined;
+            });
+            setNodes(res.concat(nodes.filter((item) => !res.find((n) => (n?.name || '') == item.name))));
+        };
+        if (!isPageLoaded && nodeList.length !== 0) {
+            fetchNodes();
+            setIsPageLoaded(true);
+        }
+        const interval = setInterval(() => {
+            fetchNodes();
+        }, context.refreshTime * 1000);
+
+        return () => clearInterval(interval);
+    }, [fetchNode, context.enabled, context.refreshTime, nodeList, nodes, isPageLoaded]);
+
+    useEffect(() => {
+        fetchNodeList();
+    }, [fetchNodeList]);
+    if (!isPageLoaded) {
+        return <FetchingBackdrop />;
+    }
+    return (
+        <ThemeProvider theme={defaultTheme}>
+            <Box
+                sx={{
+                    marginLeft: '52px',
+                    marginRight: '52px',
+                    marginTop: '38px',
+                    '&:hover': {
+                        color: '#282A2F',
+                    },
+                    height: '820px',
+                    backgroundColor: '#1F2125',
+                    borderColor: '#2E2E33',
+                    border: '1',
+                }}
+            >
+                <NodeTable nodes={nodes} />
+            </Box>
+        </ThemeProvider>
+    );
+};
+
+export default NodeListPage;
diff --git a/frontend/src/components/nodeTable/nodeTable.module.css b/frontend/src/components/nodeTable/nodeTable.module.css
new file mode 100644
index 00000000..7217ac99
--- /dev/null
+++ b/frontend/src/components/nodeTable/nodeTable.module.css
@@ -0,0 +1,31 @@
+.greendot {
+    height: 16px;
+    width: 16px;
+    background-color: #34b663;
+    border-radius: 50%;
+    display: inline-block;
+}
+
+.reddot {
+    height: 16px;
+    width: 16px;
+    background-color: #c3234b;
+    border-radius: 50%;
+    display: inline-block;
+}
+
+.graydot {
+    height: 16px;
+    width: 16px;
+    background-color: #7b817e;
+    border-radius: 50%;
+    display: inline-block;
+}
+
+.totalspace {
+    color: #7b817e;
+}
+
+.greyHeader {
+    background-color: #35373c;
+}
diff --git a/frontend/src/components/nodeTable/nodeTable.tsx b/frontend/src/components/nodeTable/nodeTable.tsx
new file mode 100644
index 00000000..c8609f86
--- /dev/null
+++ b/frontend/src/components/nodeTable/nodeTable.tsx
@@ -0,0 +1,183 @@
+import { formatBytes } from '@appTypes/common.ts';
+import { Link } from '@mui/material';
+import { Box } from '@mui/system';
+import type { GridColDef, GridRenderCellParams, GridValidRowModel } from '@mui/x-data-grid';
+import { DataGrid, GridToolbar } from '@mui/x-data-grid';
+import axios from 'axios';
+import React from 'react';
+
+import style from './nodeTable.module.css';
+
+axios.defaults.withCredentials = true;
+
+const DotMap: Record<NodeStatusName, string> = {
+    good: style.greendot,
+    bad: style.graydot,
+    offline: style.reddot,
+};
+
+const defaultRps: RPS = {
+    map: {
+        put: 0,
+        exist: 0,
+        get: 0,
+        delete: 0,
+    },
+};
+
+const defaultSpace: SpaceInfo = {
+    total_disk: 0,
+    used_disk: 0,
+    occupied_disk: 0,
+    free_disk: 0,
+};
+
+const columns: GridColDef[] = [
+    {
+        field: 'nodename',
+        headerName: 'Node Name',
+        flex: 1,
+        width: 200,
+        align: 'center',
+        headerAlign: 'center',
+        headerClassName: style.greyHeader,
+        sortable: false,
+        renderCell: (params: GridRenderCellParams<GridValidRowModel, string>) => {
+            return (
+                <Link href={params.value}>
+                    <b>{params.value}</b>
+                </Link>
+            );
+        },
+    },
+    {
+        field: 'hostname',
+        headerName: 'Hostname',
+        flex: 1,
+        width: 200,
+        align: 'center',
+        headerAlign: 'center',
+        headerClassName: style.greyHeader,
+    },
+    {
+        field: 'status',
+        headerName: 'Status',
+        flex: 1,
+        width: 200,
+        align: 'left',
+        headerAlign: 'center',
+        headerClassName: style.greyHeader,
+        renderCell: (params: GridRenderCellParams<GridValidRowModel, NodeStatusName>) => {
+            const status = params.value || 'offline';
+            return (
+                <Box
+                    sx={{
+                        display: 'flex',
+                        flexDirection: 'row',
+                        alignItems: 'center',
+                        gap: '18px',
+                    }}
+                >
+                    <span className={DotMap[status]}></span>
+                    {status.charAt(0).toUpperCase() + status.slice(1)}
+                </Box>
+            );
+        },
+    },
+    {
+        field: 'space',
+        headerName: 'Occupied Space',
+        flex: 1,
+        width: 150,
+        align: 'center',
+        headerAlign: 'center',
+        headerClassName: style.greyHeader,
+        renderCell: (params: GridRenderCellParams<GridValidRowModel, SpaceInfo>) => {
+            const space = params.value || defaultSpace;
+            return (
+                <div>
+                    {formatBytes(space.used_disk)} /{' '}
+                    <span className={style.totalspace}>{formatBytes(space.total_disk)}</span>
+                </div>
+            );
+        },
+    },
+    {
+        field: 'rps',
+        headerName: 'RPS',
+        flex: 1,
+        width: 150,
+        align: 'center',
+        headerAlign: 'center',
+        headerClassName: style.greyHeader,
+        renderCell: (params: GridRenderCellParams<GridValidRowModel, RPS>) => {
+            const rps = (params.value || defaultRps).map;
+            return <div>{rps.get + rps.put + rps.exist + rps.delete}</div>;
+        },
+    },
+    {
+        field: 'aliens',
+        headerName: 'Aliens',
+        flex: 1,
+        width: 200,
+        align: 'center',
+        headerAlign: 'center',
+        headerClassName: style.greyHeader,
+    },
+    {
+        field: 'corruptedBlobs',
+        headerName: 'Corrupted BLOBs',
+        flex: 1,
+        width: 200,
+        align: 'center',
+        headerAlign: 'center',
+        headerClassName: style.greyHeader,
+    },
+];
+
+const NodeTable = ({ nodes }: { nodes: NodeInfo[] }) => {
+    const data = nodes.sort()
+        ? nodes.map((node, i) => {
+              return {
+                  id: i,
+                  nodename: node.name,
+                  hostname: node.hostname,
+                  status: node.status.status.toLowerCase(),
+                  space: node.space,
+                  rps: node.rps,
+                  aliens: node.alienCount || 0,
+                  corruptedBlobs: node.corruptedCount || 0,
+              } as NodeTableCols;
+          })
+        : [];
+    return (
+        <DataGrid
+            rows={data}
+            columns={columns}
+            initialState={{
+                filter: {
+                    filterModel: {
+                        items: [],
+                        quickFilterValues: [],
+                        quickFilterExcludeHiddenColumns: true,
+                    },
+                },
+            }}
+            disableColumnFilter
+            disableColumnSelector
+            disableDensitySelector
+            slots={{ toolbar: GridToolbar }}
+            slotProps={{
+                toolbar: {
+                    showQuickFilter: true,
+                    quickFilterProps: {
+                        debounceMs: 500,
+                        quickFilterParser: (searchInput) => searchInput.split(',').map((value) => value.trim()),
+                    },
+                },
+            }}
+        />
+    );
+};
+
+export default NodeTable;
diff --git a/frontend/src/pages/nodelist/index.astro b/frontend/src/pages/nodelist/index.astro
index 1ff9feb3..54e101a1 100644
--- a/frontend/src/pages/nodelist/index.astro
+++ b/frontend/src/pages/nodelist/index.astro
@@ -1,5 +1,10 @@
 ---
 import Layout from '@layouts/Layout.astro';
+import NodeListPage from '@components/nodeList/nodeList.tsx';
 ---
 
-<Layout title="BOB: Node List" />
+<Layout title="BOB: Dashboard">
+    <main>
+        <NodeListPage client:only />
+    </main>
+</Layout>
diff --git a/frontend/src/types/common.ts b/frontend/src/types/common.ts
index 01fc75b9..961fdb8b 100644
--- a/frontend/src/types/common.ts
+++ b/frontend/src/types/common.ts
@@ -31,3 +31,29 @@ export function getCookie(field: string) {
 export function eraseCookie(name: string) {
     document.cookie = name + '=; Max-Age=-99999999;';
 }
+
+export function formatBytes(bytes: number, decimals = 0) {
+    if (!+bytes) return '0B';
+
+    const k = 1024;
+    const dm = decimals < 0 ? 0 : decimals;
+    const sizes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'];
+
+    const i = Math.floor(Math.log(bytes) / Math.log(k));
+
+    return `${parseFloat((bytes / Math.pow(k, i)).toFixed(dm))}${sizes[i]}`;
+}
+
+export function proxiedPropertiesOf<TObj>() {
+    return new Proxy(
+        {},
+        {
+            get: (_, prop) => prop,
+            set: () => {
+                throw Error('Set not supported');
+            },
+        },
+    ) as {
+            [P in keyof TObj]?: P;
+        };
+}
diff --git a/frontend/src/types/data.d.ts b/frontend/src/types/data.d.ts
index 439ab1e8..07e37272 100644
--- a/frontend/src/types/data.d.ts
+++ b/frontend/src/types/data.d.ts
@@ -11,3 +11,14 @@ interface DashboardState {
     rpsBreakdownList: RPSList;
     dataLoaded: boolean;
 }
+
+interface NodeTableCols {
+    id: number;
+    nodename: string;
+    hostname: string;
+    status: NodeStatusName;
+    space?: SpaceInfo;
+    rps?: RPS;
+    aliens?: number;
+    corruptedBlobs?: number;
+}
diff --git a/frontend/src/types/rust.d.ts b/frontend/src/types/rust.d.ts
index 1b4483d0..294e15d9 100644
--- a/frontend/src/types/rust.d.ts
+++ b/frontend/src/types/rust.d.ts
@@ -1,5 +1,18 @@
 /* This file is generated and managed by tsync */
 
+/** Physical disk definition */
+interface Disk {
+  /** Disk name */
+  name: string;
+  /** Disk path */
+  path: string;
+  /** Disk status */
+  status: DiskStatus;
+  totalSpace: number;
+  usedSpace: number;
+  iops: number;
+}
+
 /** Defines kind of problem on disk */
 type DiskProblem =
   | "FreeSpaceRunningOut";
@@ -12,11 +25,16 @@ type DiskProblem =
  */
 type DiskStatus =
   | DiskStatus__Good
+  | DiskStatus__Bad
   | DiskStatus__Offline;
 
 type DiskStatus__Good = {
   status: "Good";
 };
+type DiskStatus__Bad = {
+  status: "Bad";
+  problems: Array<DiskProblem>;
+};
 type DiskStatus__Offline = {
   status: "Offline";
 };
@@ -25,6 +43,17 @@ type DiskStatus__Offline = {
 type DiskStatusName =
   | "good" | "bad" | "offline";
 
+interface NodeInfo {
+  name: string;
+  hostname: string;
+  vdisks: Array<VDisk>;
+  status: NodeStatus;
+  rps?: RPS;
+  alienCount?: number;
+  corruptedCount?: number;
+  space?: SpaceInfo;
+}
+
 /** Defines kind of problem on Node */
 type NodeProblem =
   | "AliensExists" | "CorruptedExists" | "FreeSpaceRunningOut" | "VirtualMemLargerThanRAM" | "HighCPULoad";
@@ -38,11 +67,16 @@ type NodeProblem =
  */
 type NodeStatus =
   | NodeStatus__Good
+  | NodeStatus__Bad
   | NodeStatus__Offline;
 
 type NodeStatus__Good = {
   status: "Good";
 };
+type NodeStatus__Bad = {
+  status: "Bad";
+  problems: Array<NodeProblem>;
+};
 type NodeStatus__Offline = {
   status: "Offline";
 };
@@ -51,6 +85,14 @@ type NodeStatus__Offline = {
 type NodeStatusName =
   | "good" | "bad" | "offline";
 
+/** [`VDisk`]'s replicas */
+interface Replica {
+  node: string;
+  disk: string;
+  path: string;
+  status: ReplicaStatus;
+}
+
 /** Reasons why Replica is offline */
 type ReplicaProblem =
   | "NodeUnavailable" | "DiskUnavailable";
@@ -63,11 +105,16 @@ type ReplicaProblem =
  * Content - List of problems on replica. 'null' if status != 'offline'
  */
 type ReplicaStatus =
-  | ReplicaStatus__Good;
+  | ReplicaStatus__Good
+  | ReplicaStatus__Offline;
 
 type ReplicaStatus__Good = {
   status: "Good";
 };
+type ReplicaStatus__Offline = {
+  status: "Offline";
+  problems: Array<ReplicaProblem>;
+};
 
 /** Disk space information in bytes */
 interface SpaceInfo {
@@ -126,3 +173,29 @@ interface Credentials {
 }
 
 type Hostname = string
+
+/** BOB's Node interface */
+interface DTONode {
+  name: string;
+  address: string;
+  vdisks?: Array<DTOVDisk>;
+}
+
+/** BOB's Node Configuration interface */
+interface DTONodeConfiguration {
+  blob_file_name_prefix?: string;
+  root_dir_name?: string;
+}
+
+/** BOB's VDisk interface */
+interface DTOVDisk {
+  id: number;
+  replicas?: Array<DTOReplica>;
+}
+
+/** BOB's Replica interface */
+interface DTOReplica {
+  node: string;
+  disk: string;
+  path: string;
+}