From ea46291a1a25492eccc134a8c6efe7ae5a1cd614 Mon Sep 17 00:00:00 2001 From: Javier Viola Date: Tue, 8 Aug 2023 12:13:29 -0300 Subject: [PATCH 01/69] wip, refactor provider trait --- crates/configuration/src/lib.rs | 3 + crates/provider/src/lib.rs | 114 +++-- crates/provider/src/native.rs | 701 ++++++++-------------------- crates/provider/src/shared/types.rs | 5 +- 4 files changed, 256 insertions(+), 567 deletions(-) diff --git a/crates/configuration/src/lib.rs b/crates/configuration/src/lib.rs index dc7158f44..f71d7212a 100644 --- a/crates/configuration/src/lib.rs +++ b/crates/configuration/src/lib.rs @@ -12,3 +12,6 @@ pub use hrmp_channel::{HrmpChannelConfig, HrmpChannelConfigBuilder}; pub use network::{NetworkConfig, NetworkConfigBuilder}; pub use parachain::{ParachainConfig, ParachainConfigBuilder}; pub use relaychain::{RelaychainConfig, RelaychainConfigBuilder}; + +// re-export shared +pub use shared::{types, node::NodeConfig}; \ No newline at end of file diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs index 636ee240f..b4731a0e8 100644 --- a/crates/provider/src/lib.rs +++ b/crates/provider/src/lib.rs @@ -6,88 +6,80 @@ use std::{net::IpAddr, path::PathBuf}; use async_trait::async_trait; use errors::ProviderError; -use shared::types::{FileMap, NativeRunCommandOptions, PodDef, Port, RunCommandResponse}; +use shared::types::{FileMap, NativeRunCommandOptions, Port, RunCommandResponse}; #[async_trait] pub trait Provider { + // TODO(team): I think we should require that the `Node` impl some `ProviderNode trait` + // Provider Node + type Node; + + /// Does the provider require an image (e.g k8s, podman) + fn require_image() -> bool; + /// Create namespace async fn create_namespace(&mut self) -> Result<(), ProviderError>; - async fn get_node_ip(&self) -> Result; - async fn get_port_mapping( + /// Destroy namespace (and inner resources). + async fn destroy_namespace(&self) -> Result<(), ProviderError>; + /// Spawn a long live node/process. + async fn spawn_node( + &self, + node: Self::Node, + // Files to inject, `before` we run the provider command. + files_inject: Vec, + // TODO: keystore logic should live in the orchestrator + keystore: &str, + // chain_spec_id: String, + // TODO: abstract logic for download and uncompress + db_snapshot: &str, + ) -> Result<(), ProviderError>; + /// Spawn a temporary node, will be shutodown after `get` the desired files or output. + async fn spawn_temp( + &self, + node: Self::Node, + // Files to inject, `before` we run the provider command. + files_inject: Vec, + // Files to get, `after` we run the provider command. + files_get: Vec, + ) -> Result<(), ProviderError>; + /// Copy a single file from node to local filesystem. + async fn copy_file_from_node( &mut self, - port: Port, - pod_name: String, - ) -> Result; - async fn get_node_info(&mut self, pod_name: String) -> Result<(IpAddr, Port), ProviderError>; + node_file_path: PathBuf, + local_file_path: PathBuf, + ) -> Result<(), ProviderError>; + /// Run a command inside the node. async fn run_command( &self, args: Vec, opts: NativeRunCommandOptions, ) -> Result; + /// Run a script inside the node, should be a shell script. + /// zombienet will upload the content first. async fn run_script( &mut self, identifier: String, script_path: String, args: Vec, ) -> Result; - async fn spawn_from_def( + async fn get_node_logs(&mut self, node_name: &str) -> Result; + async fn dump_logs(&mut self, path: String, node_name: String) -> Result<(), ProviderError>; + async fn get_logs_command(&self, node_name: &str) -> Result; + async fn pause(&self, node_name: &str) -> Result<(), ProviderError>; + async fn resume(&self, node_name: &str) -> Result<(), ProviderError>; + async fn restart( &mut self, - pod_def: PodDef, - files_to_copy: Vec, - keystore: String, - chain_spec_id: String, - db_snapshot: String, - ) -> Result<(), ProviderError>; - async fn copy_file_from_pod( - &mut self, - pod_file_path: PathBuf, - local_file_path: PathBuf, - ) -> Result<(), ProviderError>; - async fn create_resource( - &mut self, - resource_def: PodDef, - scoped: bool, - wait_ready: bool, - ) -> Result<(), ProviderError>; - async fn wait_node_ready(&mut self, node_name: String) -> Result<(), ProviderError>; - async fn get_node_logs(&mut self, node_name: String) -> Result; - async fn dump_logs(&mut self, path: String, pod_name: String) -> Result<(), ProviderError>; - fn get_pause_args(&mut self, name: String) -> Vec; - fn get_resume_args(&mut self, name: String) -> Vec; - async fn restart_node(&mut self, name: String, timeout: u64) -> Result; - async fn get_help_info(&mut self) -> Result<(), ProviderError>; - async fn destroy_namespace(&mut self) -> Result<(), ProviderError>; - async fn get_logs_command(&mut self, name: String) -> Result; - async fn put_local_magic_file( - &self, - _name: String, - _container: Option, - ) -> Result<(), ProviderError> { - Ok(()) - } - fn is_pod_monitor_available() -> Result { - Ok(false) - } - async fn spawn_introspector() -> Result<(), ProviderError> { - Ok(()) - } - - async fn static_setup() -> Result<(), ProviderError> { - Ok(()) - } + node_name: &str, + after_sec: Option, + ) -> Result; + async fn get_node_info(&self, node_name: &str) -> Result<(IpAddr, Port), ProviderError>; + async fn get_node_ip(&self, node_name: &str) -> Result; + async fn get_port_mapping(&self, port: Port, node_name: &str) -> Result; + async fn static_setup(&mut self) -> Result<(), ProviderError>; async fn create_static_resource() -> Result<(), ProviderError> { Ok(()) } - async fn create_pod_monitor() -> Result<(), ProviderError> { - Ok(()) - } - async fn setup_cleaner() -> Result<(), ProviderError> { - Ok(()) - } - #[allow(clippy::diverging_sub_expression)] - async fn upsert_cron_job() -> Result<(), ProviderError> { - unimplemented!(); - } + // TODO(team): Do we need at this point to handle cleanner/pod-monitor? } // re-exports diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 817a455f9..e2bdaeb76 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -1,47 +1,38 @@ use std::{ self, - collections::{ - hash_map::Entry::{Occupied, Vacant}, - HashMap, - }, + collections::HashMap, fmt::Debug, net::IpAddr, path::{Path, PathBuf}, }; use async_trait::async_trait; -use serde::Serialize; -use support::{fs::FileSystem, net::download_file}; +use support::fs::FileSystem; use tokio::{ - process::Command, + process::{Child, Command}, time::{sleep, Duration}, }; +use configuration::types::Port; use super::Provider; use crate::{ errors::ProviderError, shared::{ constants::{DEFAULT_DATA_DIR, DEFAULT_REMOTE_DIR, LOCALHOST, P2P_PORT}, - types::{ - FileMap, NativeRunCommandOptions, PodDef, Port, Process, RunCommandResponse, ZombieRole, - }, + types::{FileMap, NativeRunCommandOptions, Process, RunCommandResponse}, }, }; -#[derive(Debug, Serialize, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub struct NativeProvider { // Namespace of the client (isolation directory) namespace: String, + // TODO: re-iterate, since we are creating the config with the sdk // Path where configuration relies, all the `files` are accessed relative to this. - config_path: String, - // Variable that shows if debug is activated - is_debug: bool, - // The timeout for start the node - timeout: u32, + // config_path: String, // Command to use, e.g "bash" command: String, // Temporary directory, root directory for the network tmp_dir: String, - local_magic_file_path: String, remote_dir: String, data_dir: String, process_map: HashMap, @@ -51,37 +42,51 @@ pub struct NativeProvider { impl NativeProvider { /// Zombienet `native` provider allows to run the nodes as a local process in the local environment /// params: - /// namespace: Namespace of the clien + /// namespace: Namespace of the client /// config_path: Path where configuration relies /// tmp_dir: Temporary directory where files will be placed /// filesystem: Filesystem to use (std::fs::FileSystem, mock etc.) pub fn new( namespace: impl Into, - config_path: impl Into, + //config_path: impl Into, tmp_dir: impl Into, filesystem: T, ) -> Self { - let tmp_dir: String = tmp_dir.into(); + let tmp_dir = tmp_dir.into(); let process_map: HashMap = HashMap::new(); Self { namespace: namespace.into(), - config_path: config_path.into(), - is_debug: true, - timeout: 60, // seconds - local_magic_file_path: format!("{}/finished.txt", &tmp_dir), + // config_path: config_path.into(), remote_dir: format!("{}{}", &tmp_dir, DEFAULT_REMOTE_DIR), - data_dir: format!("{}{}", &tmp_dir, DEFAULT_DATA_DIR), + data_dir: format!("{}{}", &tmp_dir,DEFAULT_DATA_DIR), command: "bash".into(), tmp_dir, process_map, filesystem, } } + + fn get_process_by_node_name(&self, node_name: &str) -> Result<&Process, ProviderError> { + self.process_map + .get(node_name) + .ok_or(ProviderError::MissingNodeInfo( + node_name.to_owned(), + "process".into(), + )) + } } +pub struct Node {} + #[async_trait] -impl Provider for NativeProvider { +impl Provider for NativeProvider where T: FileSystem + Send + Sync { + type Node = Node; + + fn require_image() -> bool { + false + } + async fn create_namespace(&mut self) -> Result<(), ProviderError> { // Native provider don't have the `namespace` isolation. // but we create the `remoteDir` to place files @@ -92,29 +97,92 @@ impl Provider for NativeProvider { Ok(()) } - async fn get_port_mapping( - &mut self, - port: Port, - pod_name: String, - ) -> Result { - let r = match self.process_map.get(&pod_name) { - Some(process) => match process.port_mapping.get(&port) { - Some(port) => Ok(*port), - None => Err(ProviderError::MissingNodeInfo(pod_name, "port".into())), - }, - None => Err(ProviderError::MissingNodeInfo(pod_name, "process".into())), - }; + async fn destroy_namespace(&self) -> Result<(), ProviderError> { + // get pids to kill all related process + let pids: Vec = self + .process_map + .iter() + .filter(|(_, process)| process.pid != 0) + .map(|(_, process)| process.pid.to_string()) + .collect(); - return r; + // TODO: use a crate (or even std) to get this info instead of relying on bash + let result = self + .run_command( + [format!( + "ps ax| awk '{{print $1}}'| grep -E '{}'", + pids.join("|") + )] + .to_vec(), + NativeRunCommandOptions { + is_failure_allowed: true, + }, + ) + .await + .unwrap(); + + if result.exit_code.code().unwrap() == 0 { + let pids_to_kill: Vec = result + .std_out + .split(|c| c == '\n') + .map(|s| s.into()) + .collect(); + + let _ = self + .run_command( + [format!("kill -9 {}", pids_to_kill.join(" "))].to_vec(), + NativeRunCommandOptions { + is_failure_allowed: true, + }, + ) + .await?; + } + Ok(()) } - async fn get_node_info(&mut self, pod_name: String) -> Result<(IpAddr, Port), ProviderError> { - let host_port = self.get_port_mapping(P2P_PORT, pod_name).await?; - Ok((LOCALHOST, host_port)) + async fn static_setup(&mut self) -> Result<(), ProviderError> { + Ok(()) } - async fn get_node_ip(&self) -> Result { - Ok(LOCALHOST) + async fn spawn_node( + &self, + _node: Node, + _files_inject: Vec, + _keystore: &str, + _db_snapshot: &str, + ) -> Result<(), ProviderError> { + // TODO: We should implement the logic to go from the `Node` (nodeSpec) + // to the running node, since we will no expose anymore the underline `Def`. + // We can follow the logic of the spawn_from_def later. + + Ok(()) + } + + async fn spawn_temp( + &self, + _node: Node, + _files_inject: Vec, + _files_get: Vec, + ) -> Result<(), ProviderError> { + // TODO: We should implement the logic to go from the `Node` (nodeSpec) + // to the running node, since we will no expose anymore the underline `Def`. + // We can follow the logic of the spawn_from_def later. + + Ok(()) + } + + async fn copy_file_from_node( + &mut self, + pod_file_path: PathBuf, + local_file_path: PathBuf, + ) -> Result<(), ProviderError> { + //log::debug!("cp {} {}", pod_file_path.to_string_lossy(), local_file_path.to_string_lossy()); + + self.filesystem + .copy(&pod_file_path, &local_file_path) + .await + .map_err(|e| ProviderError::FSError(Box::new(e)))?; + Ok(()) } async fn run_command( @@ -209,266 +277,7 @@ impl Provider for NativeProvider { } // TODO: Add test - async fn spawn_from_def( - &mut self, - pod_def: PodDef, - files_to_copy: Vec, - keystore: String, - chain_spec_id: String, - // TODO: add logic to download the snapshot - db_snapshot: String, - ) -> Result<(), ProviderError> { - let name = pod_def.metadata.name.clone(); - // TODO: log::debug!(format!("{}", serde_json::to_string(&pod_def))); - - // keep this in the client. - self.process_map.entry(name.clone()).and_modify(|p| { - p.logs = format!("{}/{}.log", self.tmp_dir, name); - p.port_mapping = pod_def - .spec - .ports - .iter() - .map(|item| (item.container_port, item.host_port)) - .collect(); - }); - - // TODO: check how we will log with tables - // let logTable = new CreateLogTable({ - // colWidths: [25, 100], - // }); - - // const logs = [ - // [decorators.cyan("Pod"), decorators.green(name)], - // [decorators.cyan("Status"), decorators.green("Launching")], - // [ - // decorators.cyan("Command"), - // decorators.white(podDef.spec.command.join(" ")), - // ], - // ]; - // if (dbSnapshot) { - // logs.push([decorators.cyan("DB Snapshot"), decorators.green(dbSnapshot)]); - // } - // logTable.pushToPrint(logs); - - // we need to get the snapshot from a public access - // and extract to /data - let _ = self - .filesystem - .create_dir(pod_def.spec.data_path.clone()) - .await; - - let _ = download_file(db_snapshot, format!("{}/db.tgz", pod_def.spec.data_path)).await; - let command = format!("cd {}/.. && tar -xzvf data/db.tgz", pod_def.spec.data_path); - - self.run_command(vec![command], NativeRunCommandOptions::default()) - .await?; - - if !keystore.is_empty() { - // initialize keystore - let keystore_remote_dir = format!( - "{}/chains/{}/keystore", - pod_def.spec.data_path, chain_spec_id - ); - - let _ = self - .filesystem - .create_dir(keystore_remote_dir.clone()) - .await; - - let _ = self.filesystem.copy(&keystore, &keystore_remote_dir).await; - } - - let files_to_copy_iter = files_to_copy.iter(); - - for file in files_to_copy_iter { - // log::debug!(format!("file.local_file_path: {}", file.local_file_path)); - // log::debug!(format!("file.remote_file_path: {}", file.remote_file_path)); - - // log::debug!(format!("self.remote_dir: {}", self.remote_dir); - // log::debug!(format!("self.data_dir: {}", self.data_dir); - - let remote_file_path_str: String = file - .clone() - .remote_file_path - .into_os_string() - .into_string() - .unwrap(); - - let resolved_remote_file_path = if remote_file_path_str.contains(&self.remote_dir) { - format!( - "{}/{}", - &pod_def.spec.cfg_path, - remote_file_path_str.replace(&self.remote_dir, "") - ) - } else { - format!( - "{}/{}", - &pod_def.spec.data_path, - remote_file_path_str.replace(&self.data_dir, "") - ) - }; - - let _ = self - .filesystem - .copy( - file.clone() - .local_file_path - .into_os_string() - .into_string() - .unwrap(), - resolved_remote_file_path, - ) - .await; - } - - self.create_resource(pod_def, false, true).await?; - - // TODO: check how we will log with tables - // logTable = new CreateLogTable({ - // colWidths: [40, 80], - // }); - // logTable.pushToPrint([ - // [decorators.cyan("Pod"), decorators.green(name)], - // [decorators.cyan("Status"), decorators.green("Ready")], - // ]); - Ok(()) - } - - async fn copy_file_from_pod( - &mut self, - pod_file_path: PathBuf, - local_file_path: PathBuf, - ) -> Result<(), ProviderError> { - // TODO: log::debug!(format!("cp {} {}", pod_file_path, local_file_path)); - - self.filesystem - .copy(&pod_file_path, &local_file_path) - .await - .map_err(|e| ProviderError::FSError(Box::new(e)))?; - Ok(()) - } - - async fn create_resource( - &mut self, - mut resource_def: PodDef, - _scoped: bool, - wait_ready: bool, - ) -> Result<(), ProviderError> { - let name: String = resource_def.metadata.name.clone(); - let local_file_path: String = format!("{}/{}.yaml", &self.tmp_dir, name); - let content: String = serde_json::to_string(&resource_def)?; - - self.filesystem - .write(&local_file_path, content) - .await - .map_err(|e| ProviderError::FSError(Box::new(e)))?; - - if resource_def.spec.command.get(0) == Some(&"bash".into()) { - resource_def.spec.command.remove(0); - } - - if resource_def.metadata.labels.zombie_role == ZombieRole::Temp { - // for temp we run some short living cmds - self.run_command( - resource_def.spec.command, - NativeRunCommandOptions { - is_failure_allowed: Some(true).is_some(), - }, - ) - .await?; - } else { - // Allow others are spawned. - let logs = format!("{}/{}.log", self.tmp_dir, name); - let file_handler = self - .filesystem - .create(logs.clone()) - .await - .map_err(|e| ProviderError::FSError(Box::new(e)))?; - - let final_command = resource_def.spec.command.join(" "); - let child_process = std::process::Command::new(&self.command) - .arg("-c") - .arg(final_command.clone()) - .stdout(file_handler) - // TODO: redirect stderr to the same stdout - //.stderr() - .spawn()?; - - // TODO: log::debug!(node_process.id()); - // nodeProcess.stdout.pipe(log); - // nodeProcess.stderr.pipe(log); - - match self.process_map.entry(name.clone()) { - Occupied(_) => return Err(ProviderError::DuplicatedNodeName(name)), - Vacant(slot) => { - slot.insert(Process { - pid: child_process.id(), - logs, - port_mapping: resource_def.spec.ports.iter().fold( - HashMap::new(), - |mut memo: HashMap, item| { - memo.insert(item.container_port, item.host_port); - memo - }, - ), - command: final_command, - }); - }, - } - - if wait_ready { - self.wait_node_ready(name).await?; - } - } - Ok(()) - } - - // TODO: Add test - async fn destroy_namespace(&mut self) -> Result<(), ProviderError> { - // get pids to kill all related process - let pids: Vec = self - .process_map - .iter() - .filter(|(_, process)| process.pid != 0) - .map(|(_, process)| process.pid.to_string()) - .collect(); - - // TODO: use a crate (or even std) to get this info instead of relying on bash - let result = self - .run_command( - [format!( - "ps ax| awk '{{print $1}}'| grep -E '{}'", - pids.join("|") - )] - .to_vec(), - NativeRunCommandOptions { - is_failure_allowed: true, - }, - ) - .await - .unwrap(); - - if result.exit_code.code().unwrap() == 0 { - let pids_to_kill: Vec = result - .std_out - .split(|c| c == '\n') - .map(|s| s.into()) - .collect(); - - let _ = self - .run_command( - [format!("kill -9 {}", pids_to_kill.join(" "))].to_vec(), - NativeRunCommandOptions { - is_failure_allowed: true, - }, - ) - .await?; - } - Ok(()) - } - - // TODO: Add test - async fn get_node_logs(&mut self, name: String) -> Result { + async fn get_node_logs(&mut self, name: &str) -> Result { // For now in native let's just return all the logs let result = self .filesystem @@ -490,157 +299,111 @@ impl Provider for NativeProvider { Ok(()) } - async fn wait_node_ready(&mut self, node_name: String) -> Result<(), ProviderError> { - // check if the process is alive after 1 seconds - sleep(Duration::from_millis(1000)).await; + async fn get_logs_command(&self, name: &str) -> Result { + Ok(format!("tail -f {}/{}.log", self.tmp_dir, name)) + } - let Some(process_node) = self.process_map.get(&node_name) else { - return Err(ProviderError::MissingNodeInfo(node_name, "process".into())); - }; + // TODO: Add test + async fn pause(&self, node_name: &str) -> Result<(), ProviderError> { + let process = self.get_process_by_node_name(node_name)?; - let result = self + let _ = self .run_command( - vec![format!("ps {}", process_node.pid)], + vec![format!("kill -STOP {}", process.pid)], NativeRunCommandOptions { is_failure_allowed: true, }, ) .await?; - - if result.exit_code.code().unwrap() > 0 { - let lines: String = self.get_node_logs(node_name).await?; - // TODO: check how we will log with tables - // TODO: Log with a log table - // const logTable = new CreateLogTable({ - // colWidths: [20, 100], - // }); - // logTable.pushToPrint([ - // [decorators.cyan("Pod"), decorators.green(nodeName)], - // [ - // decorators.cyan("Status"), - // decorators.reverse(decorators.red("Error")), - // ], - // [ - // decorators.cyan("Message"), - // decorators.white(`Process: ${pid}, for node: ${nodeName} dies.`), - // ], - // [decorators.cyan("Output"), decorators.white(lines)], - // ]); - - return Err(ProviderError::NodeNotReady(lines)); - } - - // Process pid is - // check log lines grow between 2/6/12 secs - let lines_intial: RunCommandResponse = self - .run_command( - vec![format!("wc -l {}", process_node.logs)], - NativeRunCommandOptions::default(), - ) - .await?; - - for i in [2000, 6000, 12000] { - sleep(Duration::from_millis(i)).await; - let lines_now = self - .run_command( - vec![format!("wc -l {}", process_node.logs)], - NativeRunCommandOptions::default(), - ) - .await?; - if lines_now.std_out > lines_intial.std_out { - return Ok(()); - }; - } - - let error_string = format!( - "Log lines of process: {} ( node: {} ) doesn't grow, please check logs at {}", - process_node.pid, node_name, process_node.logs - ); - - Err(ProviderError::NodeNotReady(error_string)) - } - - // TODO: Add test - fn get_pause_args(&mut self, name: String) -> Vec { - let command = format!("kill -STOP {}", self.process_map[&name].pid); - vec![command] + Ok(()) } // TODO: Add test - fn get_resume_args(&mut self, name: String) -> Vec { - let command = format!("kill -CONT {}", self.process_map[&name].pid); - vec![command] - } + async fn resume(&self, node_name: &str) -> Result<(), ProviderError> { + let process = self.get_process_by_node_name(node_name)?; - async fn restart_node(&mut self, name: String, timeout: u64) -> Result { - let command = format!("kill -9 {}", self.process_map[&name].pid); - let result = self + let _ = self .run_command( - vec![command], + vec![format!("kill -CONT {}", process.pid)], NativeRunCommandOptions { is_failure_allowed: true, }, ) .await?; + Ok(()) + } + + // TODO: Add test + async fn restart( + &mut self, + node_name: &str, + after_secs: Option, + ) -> Result { + let process = self.get_process_by_node_name(node_name)?; - if result.exit_code.code().unwrap() > 0 { - return Ok(false); + let _resp = self.run_command( + vec![format!("kill -9 {:?}", process.pid)], + NativeRunCommandOptions { + is_failure_allowed: true, + }, + ) + .await?; + + //log::debug!("{:?}", &resp); + + if let Some(secs) = after_secs { + sleep(Duration::from_secs(secs.into())).await; } - sleep(Duration::from_millis(timeout * 1000)).await; + let process: &mut Process = + self.process_map + .get_mut(node_name) + .ok_or(ProviderError::MissingNodeInfo( + node_name.to_owned(), + "process".into(), + ))?; - let logs = self.process_map[&name].logs.clone(); + let mapped_env: HashMap<&str, &str> = process.env.iter().map(|env_var| { + (env_var.name.as_str(), env_var.value.as_str()) + }).collect(); - // log::debug!("Command: {}", self.process_map[&name].cmd.join(" ")); + let child_process: Child = Command::new(self.command.clone()) + .arg("-c") + .arg(process.command.clone()) + .envs(&mapped_env) + .spawn() + .map_err(|e| ProviderError::ErrorSpawningNode(e.to_string()))?; - let file_handler = self - .filesystem - .create(logs.clone()) - .await - .map_err(|e| ProviderError::FSError(Box::new(e)))?; - let final_command = self.process_map[&name].command.clone(); - - let child_process = std::process::Command::new(&self.command) - .arg("-c") - .arg(final_command.clone()) - // TODO: set env - .stdout(file_handler) - // TODO: redirect stderr to the same stdout - //.stderr() - .spawn()?; - - match self.process_map.entry(name.clone()) { - Occupied(_) => return Err(ProviderError::DuplicatedNodeName(name)), - Vacant(slot) => { - slot.insert(Process { - pid: child_process.id(), - // TODO: complete this field - logs, - // TODO: complete this field - port_mapping: HashMap::default(), - command: final_command, - }); - }, - } - self.wait_node_ready(name).await?; + process.pid = child_process.id().ok_or(ProviderError::ErrorSpawningNode( + "Failed to get pid".to_string(), + ))?; Ok(true) } - async fn get_logs_command(&mut self, name: String) -> Result { - Ok(format!("tail -f {}/{}.log", self.tmp_dir, name)) + async fn get_node_info(&self, node_name: &str) -> Result<(IpAddr, Port), ProviderError> { + let host_port = self.get_port_mapping(P2P_PORT, node_name).await?; + Ok((LOCALHOST, host_port)) } - // TODO: Add test - async fn get_help_info(&mut self) -> Result<(), ProviderError> { - let _ = self - .run_command( - vec!["--help".to_owned()], - NativeRunCommandOptions::default(), - ) - .await?; + async fn get_node_ip(&self, _node_name: &str) -> Result { + Ok(LOCALHOST) + } - Ok(()) + async fn get_port_mapping(&self, port: Port, node_name: &str) -> Result { + match self.process_map.get(node_name) { + Some(process) => match process.port_mapping.get(&port) { + Some(port) => Ok(*port), + None => Err(ProviderError::MissingNodeInfo( + node_name.to_owned(), + "port".into(), + )), + }, + None => Err(ProviderError::MissingNodeInfo( + node_name.to_owned(), + "process".into(), + )), + } } } @@ -651,20 +414,15 @@ mod tests { use support::fs::mock::{MockError, MockFilesystem, Operation}; use super::*; - use crate::shared::types::{PodLabels, PodMetadata, PodSpec}; #[test] fn new_native_provider() { let native_provider: NativeProvider = - NativeProvider::new("something", "./", "/tmp", MockFilesystem::new()); + NativeProvider::new("something", "/tmp", MockFilesystem::new()); assert_eq!(native_provider.namespace, "something"); - assert_eq!(native_provider.config_path, "./"); - assert!(native_provider.is_debug); - assert_eq!(native_provider.timeout, 60); assert_eq!(native_provider.tmp_dir, "/tmp"); assert_eq!(native_provider.command, "bash"); - assert_eq!(native_provider.local_magic_file_path, "/tmp/finished.txt"); assert_eq!(native_provider.remote_dir, "/tmp/cfg"); assert_eq!(native_provider.data_dir, "/tmp/data"); } @@ -672,7 +430,7 @@ mod tests { #[tokio::test] async fn test_fielsystem_usage() { let mut native_provider: NativeProvider = - NativeProvider::new("something", "./", "/tmp", MockFilesystem::new()); + NativeProvider::new("something", "/tmp", MockFilesystem::new()); native_provider.create_namespace().await.unwrap(); @@ -691,7 +449,6 @@ mod tests { async fn test_fielsystem_usage_fails() { let mut native_provider: NativeProvider = NativeProvider::new( "something", - "./", "/tmp", MockFilesystem::with_create_dir_error(MockError::OpError("create".into())), ); @@ -702,15 +459,18 @@ mod tests { #[tokio::test] async fn test_get_node_ip() { let native_provider: NativeProvider = - NativeProvider::new("something", "./", "/tmp", MockFilesystem::new()); + NativeProvider::new("something", "/tmp", MockFilesystem::new()); - assert_eq!(native_provider.get_node_ip().await.unwrap(), LOCALHOST); + assert_eq!( + native_provider.get_node_ip("some").await.unwrap(), + LOCALHOST + ); } #[tokio::test] async fn test_run_command_when_bash_is_removed() { let native_provider: NativeProvider = - NativeProvider::new("something", "./", "/tmp", MockFilesystem::new()); + NativeProvider::new("something", "/tmp", MockFilesystem::new()); let result: RunCommandResponse = native_provider .run_command( @@ -732,7 +492,7 @@ mod tests { #[tokio::test] async fn test_run_command_when_dash_c_is_provided() { - let native_provider = NativeProvider::new("something", "./", "/tmp", MockFilesystem::new()); + let native_provider = NativeProvider::new("something", "/tmp", MockFilesystem::new()); let result = native_provider.run_command( vec!["-c".into(), "ls".into()], @@ -745,7 +505,7 @@ mod tests { #[tokio::test] async fn test_run_command_when_error_return_error() { - let native_provider = NativeProvider::new("something", "./", "/tmp", MockFilesystem::new()); + let native_provider = NativeProvider::new("something", "/tmp", MockFilesystem::new()); let mut some = native_provider.run_command( vec!["ls".into(), "ls".into()], @@ -763,71 +523,4 @@ mod tests { assert!(some.await.is_ok()); } - - #[tokio::test] - async fn test_create_resource() { - let mut native_provider: NativeProvider = - NativeProvider::new("something", "./", "/tmp", MockFilesystem::new()); - - let resource_def: PodDef = PodDef { - metadata: PodMetadata { - name: "string".to_owned(), - namespace: "string".to_owned(), - labels: PodLabels { - app: "String".to_owned(), - zombie_ns: "String".to_owned(), - name: "String".to_owned(), - instance: "String".to_owned(), - zombie_role: ZombieRole::Node, - }, - }, - spec: PodSpec { - cfg_path: "string".to_owned(), - data_path: "string".to_owned(), - ports: vec![], - command: vec!["ls".to_owned()], - env: vec![], - }, - }; - - native_provider - .create_resource(resource_def, false, false) - .await - .unwrap(); - - assert_eq!(native_provider.process_map.len(), 1); - } - #[tokio::test] - async fn test_create_resource_wait_ready() { - let mut native_provider: NativeProvider = - NativeProvider::new("something", "./", "/tmp", MockFilesystem::new()); - - let resource_def: PodDef = PodDef { - metadata: PodMetadata { - name: "string".to_owned(), - namespace: "string".to_owned(), - labels: PodLabels { - app: "String".to_owned(), - zombie_ns: "String".to_owned(), - name: "String".to_owned(), - instance: "String".to_owned(), - zombie_role: ZombieRole::Node, - }, - }, - spec: PodSpec { - cfg_path: "string".to_owned(), - data_path: "string".to_owned(), - ports: vec![], - command: vec!["for i in $(seq 1 10); do echo $i;sleep 1;done".into()], - env: vec![], - }, - }; - - native_provider - .create_resource(resource_def, false, true) - .await - .unwrap(); - - assert_eq!(native_provider.process_map.len(), 1); - } } diff --git a/crates/provider/src/shared/types.rs b/crates/provider/src/shared/types.rs index 7ab5e8a41..6817678b8 100644 --- a/crates/provider/src/shared/types.rs +++ b/crates/provider/src/shared/types.rs @@ -115,8 +115,8 @@ pub struct PodDef { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct EnvVar { - name: String, - value: String, + pub(crate) name: String, + pub(crate) value: String, } impl From<(&str, &str)> for EnvVar { @@ -181,4 +181,5 @@ pub struct Process { pub logs: String, pub port_mapping: HashMap, pub command: String, + pub env: ProcessEnvironment, } From 346308f3a310a900810619209e16232f5d3027d4 Mon Sep 17 00:00:00 2001 From: Javier Viola Date: Tue, 8 Aug 2023 12:55:41 -0300 Subject: [PATCH 02/69] fmt --- crates/configuration/src/lib.rs | 3 +-- crates/provider/src/native.rs | 38 +++++++++++++++++++-------------- 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/crates/configuration/src/lib.rs b/crates/configuration/src/lib.rs index f71d7212a..3908b3691 100644 --- a/crates/configuration/src/lib.rs +++ b/crates/configuration/src/lib.rs @@ -12,6 +12,5 @@ pub use hrmp_channel::{HrmpChannelConfig, HrmpChannelConfigBuilder}; pub use network::{NetworkConfig, NetworkConfigBuilder}; pub use parachain::{ParachainConfig, ParachainConfigBuilder}; pub use relaychain::{RelaychainConfig, RelaychainConfigBuilder}; - // re-export shared -pub use shared::{types, node::NodeConfig}; \ No newline at end of file +pub use shared::{node::NodeConfig, types}; diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index e2bdaeb76..d37ac80e9 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -7,12 +7,12 @@ use std::{ }; use async_trait::async_trait; +use configuration::types::Port; use support::fs::FileSystem; use tokio::{ process::{Child, Command}, time::{sleep, Duration}, }; -use configuration::types::Port; use super::Provider; use crate::{ @@ -48,7 +48,7 @@ impl NativeProvider { /// filesystem: Filesystem to use (std::fs::FileSystem, mock etc.) pub fn new( namespace: impl Into, - //config_path: impl Into, + // config_path: impl Into, tmp_dir: impl Into, filesystem: T, ) -> Self { @@ -59,7 +59,7 @@ impl NativeProvider { namespace: namespace.into(), // config_path: config_path.into(), remote_dir: format!("{}{}", &tmp_dir, DEFAULT_REMOTE_DIR), - data_dir: format!("{}{}", &tmp_dir,DEFAULT_DATA_DIR), + data_dir: format!("{}{}", &tmp_dir, DEFAULT_DATA_DIR), command: "bash".into(), tmp_dir, process_map, @@ -80,7 +80,10 @@ impl NativeProvider { pub struct Node {} #[async_trait] -impl Provider for NativeProvider where T: FileSystem + Send + Sync { +impl Provider for NativeProvider +where + T: FileSystem + Send + Sync, +{ type Node = Node; fn require_image() -> bool { @@ -176,7 +179,7 @@ impl Provider for NativeProvider where T: FileSystem + Send + Sync { pod_file_path: PathBuf, local_file_path: PathBuf, ) -> Result<(), ProviderError> { - //log::debug!("cp {} {}", pod_file_path.to_string_lossy(), local_file_path.to_string_lossy()); + // log::debug!("cp {} {}", pod_file_path.to_string_lossy(), local_file_path.to_string_lossy()); self.filesystem .copy(&pod_file_path, &local_file_path) @@ -341,15 +344,16 @@ impl Provider for NativeProvider where T: FileSystem + Send + Sync { ) -> Result { let process = self.get_process_by_node_name(node_name)?; - let _resp = self.run_command( - vec![format!("kill -9 {:?}", process.pid)], - NativeRunCommandOptions { - is_failure_allowed: true, - }, - ) - .await?; + let _resp = self + .run_command( + vec![format!("kill -9 {:?}", process.pid)], + NativeRunCommandOptions { + is_failure_allowed: true, + }, + ) + .await?; - //log::debug!("{:?}", &resp); + // log::debug!("{:?}", &resp); if let Some(secs) = after_secs { sleep(Duration::from_secs(secs.into())).await; @@ -363,9 +367,11 @@ impl Provider for NativeProvider where T: FileSystem + Send + Sync { "process".into(), ))?; - let mapped_env: HashMap<&str, &str> = process.env.iter().map(|env_var| { - (env_var.name.as_str(), env_var.value.as_str()) - }).collect(); + let mapped_env: HashMap<&str, &str> = process + .env + .iter() + .map(|env_var| (env_var.name.as_str(), env_var.value.as_str())) + .collect(); let child_process: Child = Command::new(self.command.clone()) .arg("-c") From 80aaf9e395488895e2bebe4d8587d6671231f8b4 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 15 Aug 2023 00:36:11 +0300 Subject: [PATCH 03/69] wip --- Cargo.toml | 1 + crates/provider/Cargo.toml | 17 +- crates/provider/src/errors.rs | 3 + crates/provider/src/lib.rs | 193 ++++-- crates/provider/src/native.rs | 1123 +++++++++++++++++++------------ crates/support/Cargo.toml | 2 + crates/support/src/fs.rs | 57 +- crates/support/src/fs/errors.rs | 11 - crates/support/src/fs/mock.rs | 385 +++++++---- 9 files changed, 1111 insertions(+), 681 deletions(-) delete mode 100644 crates/support/src/fs/errors.rs diff --git a/Cargo.toml b/Cargo.toml index d56376d75..6bc2ea379 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,3 +23,4 @@ regex = "1.8" lazy_static = "1.4" multiaddr = "0.18" url = "2.3" +uuid = "1.4" \ No newline at end of file diff --git a/crates/provider/Cargo.toml b/crates/provider/Cargo.toml index bbc268302..f6d7d5b91 100644 --- a/crates/provider/Cargo.toml +++ b/crates/provider/Cargo.toml @@ -8,11 +8,18 @@ edition = "2021" [dependencies] support = { path = "../support" } configuration = { path = "../configuration" } -async-trait = {workspace = true } -futures = {workspace = true } +async-trait = { workspace = true } +futures = { workspace = true } #napi = { version="2.12.7", features=["async"]} #napi-derive = "2.12.5" serde = { workspace = true, features = ["derive"] } -serde_json = {workspace = true} -tokio = { workspace = true, features = ["process", "macros", "fs", "time", "rt"] } -thiserror = {workspace = true} +serde_json = { workspace = true } +tokio = { workspace = true, features = [ + "process", + "macros", + "fs", + "time", + "rt", +] } +thiserror = { workspace = true } +uuid = { workspace = true, features = ["v4"] } diff --git a/crates/provider/src/errors.rs b/crates/provider/src/errors.rs index 9bd1e34f0..51bb05cb7 100644 --- a/crates/provider/src/errors.rs +++ b/crates/provider/src/errors.rs @@ -13,6 +13,9 @@ macro_rules! from_error { #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum ProviderError { + #[error("Namespace ID already exists: {0}")] + ConflictingNamespaceId(String), + #[error("Invalid network configuration field {0}")] InvalidConfig(String), #[error("Can recover node: {0} info, field: {1}")] diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs index b4731a0e8..65c8e3929 100644 --- a/crates/provider/src/lib.rs +++ b/crates/provider/src/lib.rs @@ -2,85 +2,152 @@ mod errors; mod native; mod shared; -use std::{net::IpAddr, path::PathBuf}; +use std::{net::IpAddr, path::PathBuf, process::ExitStatus, sync::Arc}; use async_trait::async_trait; -use errors::ProviderError; -use shared::types::{FileMap, NativeRunCommandOptions, Port, RunCommandResponse}; +use tokio::sync::RwLock; + +use crate::{ + errors::ProviderError, + shared::types::{FileMap, Port}, +}; + +#[derive(Clone)] +pub struct ProviderCapabilities { + pub requires_image: bool, +} + +pub struct CreateNamespaceOptions { + pub root_dir: String, + pub config_dir: String, + pub data_dir: String, +} + +impl Default for CreateNamespaceOptions { + fn default() -> Self { + Self { + root_dir: "/tmp".to_string(), + config_dir: "/cfg".to_string(), + data_dir: "/data".to_string(), + } + } +} + +impl CreateNamespaceOptions { + pub fn new() -> Self { + Self::default() + } + + pub fn root_dir(mut self, root_dir: &str) -> Self { + self.root_dir = root_dir.to_string(); + self + } + + pub fn config_dir(mut self, config_dir: &str) -> Self { + self.config_dir = config_dir.to_string(); + self + } + + pub fn data_dir(mut self, data_dir: &str) -> Self { + self.data_dir = data_dir.to_string(); + self + } +} #[async_trait] pub trait Provider { - // TODO(team): I think we should require that the `Node` impl some `ProviderNode trait` - // Provider Node - type Node; - - /// Does the provider require an image (e.g k8s, podman) - fn require_image() -> bool; - /// Create namespace - async fn create_namespace(&mut self) -> Result<(), ProviderError>; - /// Destroy namespace (and inner resources). - async fn destroy_namespace(&self) -> Result<(), ProviderError>; - /// Spawn a long live node/process. - async fn spawn_node( + fn capabilities(&self) -> &ProviderCapabilities; + async fn create_namespace( &self, - node: Self::Node, - // Files to inject, `before` we run the provider command. - files_inject: Vec, - // TODO: keystore logic should live in the orchestrator - keystore: &str, - // chain_spec_id: String, - // TODO: abstract logic for download and uncompress - db_snapshot: &str, - ) -> Result<(), ProviderError>; - /// Spawn a temporary node, will be shutodown after `get` the desired files or output. - async fn spawn_temp( + options: Option, + ) -> Result; + // TODO(team): Do we need at this point to handle cleanner/pod-monitor? +} + +pub type DynProvider = Arc>; + +pub struct SpawnNodeOptions { + pub name: String, + pub node: (), + // Files to inject, `before` we run the provider command. + pub files_inject: Vec, + // TODO: keystore logic should live in the orchestrator + pub keystore: String, + // chain_spec_id: String, + // TODO: abstract logic for download and uncompress + pub db_snapshot: String, +} + +pub struct SpawnTempOptions { + pub node: (), + pub injected_files: Vec, + pub files_to_retrieve: Vec, +} + +#[async_trait] +pub trait ProviderNamespace { + fn id(&self) -> &str; + /// Spawn a long live node/process. + async fn spawn_node(&self, options: SpawnNodeOptions) -> Result<(), ProviderError>; + /// Spawn a temporary node, will be shutdown after `get` the desired files or output. + async fn spawn_temp(&self, options: SpawnTempOptions) -> Result<(), ProviderError>; + /// Destroy namespace (and inner resources). + async fn destroy(&self) -> Result<(), ProviderError>; + async fn static_setup(&self) -> Result<(), ProviderError>; +} + +pub type DynNamespace = Arc>; + +pub struct RunCommandOptions { + pub args: Vec, + pub is_failure_allowed: bool, +} + +pub struct RunScriptOptions { + pub identifier: String, + pub script_path: String, + pub args: Vec, +} + +type ExecutionResult = Result)>; + +#[async_trait] +pub trait ProviderNode { + fn name(&self) -> &str; + + async fn endpoint(&self) -> Result<(IpAddr, Port), ProviderError>; + + async fn mapped_port(&self, port: Port) -> Result; + + async fn logs(&self) -> Result; + + async fn dump_logs(&self, dest: PathBuf) -> Result<(), ProviderError>; + + async fn run_command( &self, - node: Self::Node, - // Files to inject, `before` we run the provider command. - files_inject: Vec, - // Files to get, `after` we run the provider command. - files_get: Vec, - ) -> Result<(), ProviderError>; - /// Copy a single file from node to local filesystem. + options: RunCommandOptions, + ) -> Result; + + async fn run_script(&self, options: RunScriptOptions) + -> Result; + async fn copy_file_from_node( - &mut self, - node_file_path: PathBuf, - local_file_path: PathBuf, - ) -> Result<(), ProviderError>; - /// Run a command inside the node. - async fn run_command( &self, - args: Vec, - opts: NativeRunCommandOptions, - ) -> Result; - /// Run a script inside the node, should be a shell script. - /// zombienet will upload the content first. - async fn run_script( - &mut self, - identifier: String, - script_path: String, - args: Vec, - ) -> Result; - async fn get_node_logs(&mut self, node_name: &str) -> Result; - async fn dump_logs(&mut self, path: String, node_name: String) -> Result<(), ProviderError>; - async fn get_logs_command(&self, node_name: &str) -> Result; + remote_src: PathBuf, + local_dest: PathBuf, + ) -> Result<(), ProviderError>; + async fn pause(&self, node_name: &str) -> Result<(), ProviderError>; + async fn resume(&self, node_name: &str) -> Result<(), ProviderError>; + async fn restart( &mut self, node_name: &str, after_sec: Option, ) -> Result; - async fn get_node_info(&self, node_name: &str) -> Result<(IpAddr, Port), ProviderError>; - async fn get_node_ip(&self, node_name: &str) -> Result; - async fn get_port_mapping(&self, port: Port, node_name: &str) -> Result; - async fn static_setup(&mut self) -> Result<(), ProviderError>; - async fn create_static_resource() -> Result<(), ProviderError> { - Ok(()) - } - // TODO(team): Do we need at this point to handle cleanner/pod-monitor? + async fn destroy(&self) -> Result<(), ProviderError>; } -// re-exports -pub use native::NativeProvider; +pub type DynNode = Arc>; diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index d37ac80e9..f9749f4ac 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -4,6 +4,7 @@ use std::{ fmt::Debug, net::IpAddr, path::{Path, PathBuf}, + sync::{Arc, Weak}, }; use async_trait::async_trait; @@ -11,522 +12,748 @@ use configuration::types::Port; use support::fs::FileSystem; use tokio::{ process::{Child, Command}, + sync::RwLock, time::{sleep, Duration}, }; +use uuid::Uuid; -use super::Provider; use crate::{ errors::ProviderError, shared::{ constants::{DEFAULT_DATA_DIR, DEFAULT_REMOTE_DIR, LOCALHOST, P2P_PORT}, types::{FileMap, NativeRunCommandOptions, Process, RunCommandResponse}, }, + CreateNamespaceOptions, DynNamespace, ExecutionResult, Provider, ProviderCapabilities, + ProviderNamespace, ProviderNode, RunCommandOptions, RunScriptOptions, SpawnNodeOptions, + SpawnTempOptions, }; -#[derive(Debug, Clone, PartialEq)] -pub struct NativeProvider { - // Namespace of the client (isolation directory) - namespace: String, - // TODO: re-iterate, since we are creating the config with the sdk - // Path where configuration relies, all the `files` are accessed relative to this. - // config_path: String, - // Command to use, e.g "bash" - command: String, - // Temporary directory, root directory for the network - tmp_dir: String, - remote_dir: String, - data_dir: String, - process_map: HashMap, - filesystem: T, + +pub struct NativeProviderOptions +where + FS: FileSystem + Send + Sync, +{ + filesystem: FS, } -impl NativeProvider { - /// Zombienet `native` provider allows to run the nodes as a local process in the local environment - /// params: - /// namespace: Namespace of the client - /// config_path: Path where configuration relies - /// tmp_dir: Temporary directory where files will be placed - /// filesystem: Filesystem to use (std::fs::FileSystem, mock etc.) - pub fn new( - namespace: impl Into, - // config_path: impl Into, - tmp_dir: impl Into, - filesystem: T, - ) -> Self { - let tmp_dir = tmp_dir.into(); - let process_map: HashMap = HashMap::new(); - - Self { - namespace: namespace.into(), - // config_path: config_path.into(), - remote_dir: format!("{}{}", &tmp_dir, DEFAULT_REMOTE_DIR), - data_dir: format!("{}{}", &tmp_dir, DEFAULT_DATA_DIR), - command: "bash".into(), - tmp_dir, - process_map, - filesystem, - } +#[derive(Clone)] +pub struct NativeProvider +where + FS: FileSystem + Send + Sync, +{ + capabilities: ProviderCapabilities, + namespaces: Arc>>>>>, + filesystem: FS, + weak: Weak, +} + +impl NativeProvider +where + FS: FileSystem + Send + Sync, +{ + pub fn new(options: NativeProviderOptions) -> Arc { + Arc::new_cyclic(|weak| Self { + capabilities: ProviderCapabilities { + requires_image: false, + }, + filesystem: options.filesystem, + namespaces: Default::default(), + weak: weak.clone(), + }) } +} - fn get_process_by_node_name(&self, node_name: &str) -> Result<&Process, ProviderError> { - self.process_map - .get(node_name) - .ok_or(ProviderError::MissingNodeInfo( - node_name.to_owned(), - "process".into(), - )) +#[async_trait] +impl Provider for Arc> +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + fn capabilities(&self) -> &ProviderCapabilities { + &self.capabilities + } + + async fn create_namespace( + &self, + options: Option, + ) -> Result { + let options = options.unwrap_or(CreateNamespaceOptions::new()); + let id = format!("zombie_{}", Uuid::new_v4()); + let mut namespaces = self.namespaces.write().await; + + if namespaces.contains_key(&id) { + return Err(ProviderError::ConflictingNamespaceId(id)); + } + + let base_dir = format!("{}/{}", &options.root_dir, &id); + let config_dir = format!("{}/{}", &base_dir, &options.config_dir); + let data_dir = format!("{}/{}", &base_dir, &options.data_dir); + + // self.filesystem.create_dir(&config_dir).await.unwrap(); + // self.filesystem.create_dir(&data_dir).await.unwrap(); + + let namespace = Arc::new_cyclic(|weak| { + RwLock::new(NativeNamespace { + id: id.clone(), + config_dir, + data_dir, + nodes: Default::default(), + filesystem: self.filesystem.clone(), + provider: self.weak.clone(), + weak: weak.clone(), + }) + }); + + namespaces.insert(id, namespace.clone()); + + Ok(namespace) } } -pub struct Node {} +#[derive(Debug, Clone)] +pub struct NativeNamespace +where + FS: FileSystem + Send + Sync, +{ + id: String, + config_dir: String, + data_dir: String, + nodes: HashMap>>>, + filesystem: FS, + provider: Weak>, + weak: Weak>, +} #[async_trait] -impl Provider for NativeProvider +impl ProviderNamespace for NativeNamespace where - T: FileSystem + Send + Sync, + FS: FileSystem + Send + Sync, { - type Node = Node; + fn id(&self) -> &str { + &self.id + } - fn require_image() -> bool { - false + async fn spawn_node(&self, options: SpawnNodeOptions) -> Result<(), ProviderError> { + Err(ProviderError::DuplicatedNodeName("test".to_string())) } - async fn create_namespace(&mut self) -> Result<(), ProviderError> { - // Native provider don't have the `namespace` isolation. - // but we create the `remoteDir` to place files - self.filesystem - .create_dir(&self.remote_dir) - .await - .map_err(|e| ProviderError::FSError(Box::new(e)))?; - Ok(()) + async fn spawn_temp(&self, options: SpawnTempOptions) -> Result<(), ProviderError> { + Err(ProviderError::DuplicatedNodeName("test".to_string())) } - async fn destroy_namespace(&self) -> Result<(), ProviderError> { - // get pids to kill all related process - let pids: Vec = self - .process_map + async fn static_setup(&self) -> Result<(), ProviderError> { + Err(ProviderError::DuplicatedNodeName("test".to_string())) + } + + async fn destroy(&self) -> Result<(), ProviderError> { + let nodes = self + .nodes .iter() - .filter(|(_, process)| process.pid != 0) - .map(|(_, process)| process.pid.to_string()) - .collect(); - - // TODO: use a crate (or even std) to get this info instead of relying on bash - let result = self - .run_command( - [format!( - "ps ax| awk '{{print $1}}'| grep -E '{}'", - pids.join("|") - )] - .to_vec(), - NativeRunCommandOptions { - is_failure_allowed: true, - }, - ) - .await - .unwrap(); - - if result.exit_code.code().unwrap() == 0 { - let pids_to_kill: Vec = result - .std_out - .split(|c| c == '\n') - .map(|s| s.into()) - .collect(); - - let _ = self - .run_command( - [format!("kill -9 {}", pids_to_kill.join(" "))].to_vec(), - NativeRunCommandOptions { - is_failure_allowed: true, - }, - ) - .await?; + .map(|(_, node)| node.clone()) + .collect::>>>>(); + + for node in nodes { + node.read().await.destroy(); } - Ok(()) - } - async fn static_setup(&mut self) -> Result<(), ProviderError> { - Ok(()) + Err(ProviderError::DuplicatedNodeName("test".to_string())) } +} - async fn spawn_node( - &self, - _node: Node, - _files_inject: Vec, - _keystore: &str, - _db_snapshot: &str, - ) -> Result<(), ProviderError> { - // TODO: We should implement the logic to go from the `Node` (nodeSpec) - // to the running node, since we will no expose anymore the underline `Def`. - // We can follow the logic of the spawn_from_def later. +#[derive(Debug, Clone)] +pub struct NativeNode +where + FS: FileSystem + Send + Sync, +{ + name: String, + filesystem: FS, + namespace: Weak>>, +} - Ok(()) +#[async_trait] +impl ProviderNode for NativeNode +where + T: FileSystem + Send + Sync, +{ + fn name(&self) -> &str { + "" } - async fn spawn_temp( - &self, - _node: Node, - _files_inject: Vec, - _files_get: Vec, - ) -> Result<(), ProviderError> { - // TODO: We should implement the logic to go from the `Node` (nodeSpec) - // to the running node, since we will no expose anymore the underline `Def`. - // We can follow the logic of the spawn_from_def later. + async fn endpoint(&self) -> Result<(IpAddr, Port), ProviderError> { + Err(ProviderError::DuplicatedNodeName("test".to_string())) + } - Ok(()) + async fn mapped_port(&self, port: Port) -> Result { + Err(ProviderError::DuplicatedNodeName("test".to_string())) } - async fn copy_file_from_node( - &mut self, - pod_file_path: PathBuf, - local_file_path: PathBuf, - ) -> Result<(), ProviderError> { - // log::debug!("cp {} {}", pod_file_path.to_string_lossy(), local_file_path.to_string_lossy()); + async fn logs(&self) -> Result { + Err(ProviderError::DuplicatedNodeName("test".to_string())) + } - self.filesystem - .copy(&pod_file_path, &local_file_path) - .await - .map_err(|e| ProviderError::FSError(Box::new(e)))?; - Ok(()) + async fn dump_logs(&self, dest: PathBuf) -> Result<(), ProviderError> { + Err(ProviderError::DuplicatedNodeName("test".to_string())) } async fn run_command( &self, - mut args: Vec, - opts: NativeRunCommandOptions, - ) -> Result { - if let Some(arg) = args.get(0) { - if arg == "bash" { - args.remove(0); - } - } - - // -c is already used in the process::Command to execute the command thus - // needs to be removed in case provided - if let Some(arg) = args.get(0) { - if arg == "-c" { - args.remove(0); - } - } - - let result = Command::new(&self.command) - .arg("-c") - .arg(args.join(" ")) - .output() - .await?; - - if !result.status.success() && !opts.is_failure_allowed { - return Err(ProviderError::RunCommandError(args.join(" "))); - } else { - // cmd success or we allow to fail - // in either case we return Ok - Ok(RunCommandResponse { - exit_code: result.status, - std_out: String::from_utf8_lossy(&result.stdout).into(), - std_err: if result.stderr.is_empty() { - None - } else { - Some(String::from_utf8_lossy(&result.stderr).into()) - }, - }) - } + options: RunCommandOptions, + ) -> Result { + Err(ProviderError::DuplicatedNodeName("test".to_string())) } - // TODO: Add test async fn run_script( - &mut self, - identifier: String, - script_path: String, - args: Vec, - ) -> Result { - let script_filename = Path::new(&script_path) - .file_name() - .ok_or(ProviderError::InvalidScriptPath(script_path.clone()))? - .to_str() - .ok_or(ProviderError::InvalidScriptPath(script_path.clone()))?; - let script_path_in_pod = format!("{}/{}/{}", self.tmp_dir, identifier, script_filename); - - // upload the script - self.filesystem - .copy(&script_path, &script_path_in_pod) - .await - .map_err(|e| ProviderError::FSError(Box::new(e)))?; - - // set as executable - self.run_command( - vec![ - "chmod".to_owned(), - "+x".to_owned(), - script_path_in_pod.clone(), - ], - NativeRunCommandOptions::default(), - ) - .await?; - - let command = format!( - "cd {}/{} && {} {}", - self.tmp_dir, - identifier, - script_path_in_pod, - args.join(" ") - ); - let result = self - .run_command(vec![command], NativeRunCommandOptions::default()) - .await?; - - Ok(RunCommandResponse { - exit_code: result.exit_code, - std_out: result.std_out, - std_err: result.std_err, - }) - } - - // TODO: Add test - async fn get_node_logs(&mut self, name: &str) -> Result { - // For now in native let's just return all the logs - let result = self - .filesystem - .read_file(&format!("{}/{}.log", self.tmp_dir, name)) - .await - .map_err(|e| ProviderError::FSError(Box::new(e)))?; - return Ok(result); + &self, + options: RunScriptOptions, + ) -> Result { + Err(ProviderError::DuplicatedNodeName("test".to_string())) } - async fn dump_logs(&mut self, path: String, pod_name: String) -> Result<(), ProviderError> { - let dst_file_name: String = format!("{}/logs/{}.log", path, pod_name); - let _ = self - .filesystem - .copy( - &format!("{}/{}.log", self.tmp_dir, pod_name), - &dst_file_name, - ) - .await; + async fn copy_file_from_node( + &self, + remote_src: PathBuf, + local_dest: PathBuf, + ) -> Result<(), ProviderError> { Ok(()) } - async fn get_logs_command(&self, name: &str) -> Result { - Ok(format!("tail -f {}/{}.log", self.tmp_dir, name)) - } - - // TODO: Add test async fn pause(&self, node_name: &str) -> Result<(), ProviderError> { - let process = self.get_process_by_node_name(node_name)?; - - let _ = self - .run_command( - vec![format!("kill -STOP {}", process.pid)], - NativeRunCommandOptions { - is_failure_allowed: true, - }, - ) - .await?; Ok(()) } - // TODO: Add test async fn resume(&self, node_name: &str) -> Result<(), ProviderError> { - let process = self.get_process_by_node_name(node_name)?; - - let _ = self - .run_command( - vec![format!("kill -CONT {}", process.pid)], - NativeRunCommandOptions { - is_failure_allowed: true, - }, - ) - .await?; Ok(()) } - // TODO: Add test async fn restart( &mut self, node_name: &str, - after_secs: Option, + after_sec: Option, ) -> Result { - let process = self.get_process_by_node_name(node_name)?; - - let _resp = self - .run_command( - vec![format!("kill -9 {:?}", process.pid)], - NativeRunCommandOptions { - is_failure_allowed: true, - }, - ) - .await?; - - // log::debug!("{:?}", &resp); - - if let Some(secs) = after_secs { - sleep(Duration::from_secs(secs.into())).await; - } - - let process: &mut Process = - self.process_map - .get_mut(node_name) - .ok_or(ProviderError::MissingNodeInfo( - node_name.to_owned(), - "process".into(), - ))?; - - let mapped_env: HashMap<&str, &str> = process - .env - .iter() - .map(|env_var| (env_var.name.as_str(), env_var.value.as_str())) - .collect(); - - let child_process: Child = Command::new(self.command.clone()) - .arg("-c") - .arg(process.command.clone()) - .envs(&mapped_env) - .spawn() - .map_err(|e| ProviderError::ErrorSpawningNode(e.to_string()))?; - - process.pid = child_process.id().ok_or(ProviderError::ErrorSpawningNode( - "Failed to get pid".to_string(), - ))?; - - Ok(true) - } - - async fn get_node_info(&self, node_name: &str) -> Result<(IpAddr, Port), ProviderError> { - let host_port = self.get_port_mapping(P2P_PORT, node_name).await?; - Ok((LOCALHOST, host_port)) + Ok(false) } - async fn get_node_ip(&self, _node_name: &str) -> Result { - Ok(LOCALHOST) - } - - async fn get_port_mapping(&self, port: Port, node_name: &str) -> Result { - match self.process_map.get(node_name) { - Some(process) => match process.port_mapping.get(&port) { - Some(port) => Ok(*port), - None => Err(ProviderError::MissingNodeInfo( - node_name.to_owned(), - "port".into(), - )), - }, - None => Err(ProviderError::MissingNodeInfo( - node_name.to_owned(), - "process".into(), - )), - } + async fn destroy(&self) -> Result<(), ProviderError> { + self.namespace + .upgrade() + .expect("node should be destroyed if namespace is dropped") + .write() + .await + .nodes + .remove(&self.name); + Ok(()) } } #[cfg(test)] mod tests { - use std::{os::unix::process::ExitStatusExt, process::ExitStatus}; - - use support::fs::mock::{MockError, MockFilesystem, Operation}; - - use super::*; - - #[test] - fn new_native_provider() { - let native_provider: NativeProvider = - NativeProvider::new("something", "/tmp", MockFilesystem::new()); - - assert_eq!(native_provider.namespace, "something"); - assert_eq!(native_provider.tmp_dir, "/tmp"); - assert_eq!(native_provider.command, "bash"); - assert_eq!(native_provider.remote_dir, "/tmp/cfg"); - assert_eq!(native_provider.data_dir, "/tmp/data"); - } - - #[tokio::test] - async fn test_fielsystem_usage() { - let mut native_provider: NativeProvider = - NativeProvider::new("something", "/tmp", MockFilesystem::new()); - - native_provider.create_namespace().await.unwrap(); - - assert!(native_provider.filesystem.operations.len() == 1); - - assert_eq!( - native_provider.filesystem.operations[0], - Operation::CreateDir { - path: "/tmp/cfg".into(), - } - ); - } - - #[tokio::test] - #[should_panic(expected = "FSError(OpError(\"create\"))")] - async fn test_fielsystem_usage_fails() { - let mut native_provider: NativeProvider = NativeProvider::new( - "something", - "/tmp", - MockFilesystem::with_create_dir_error(MockError::OpError("create".into())), - ); - - native_provider.create_namespace().await.unwrap(); - } - - #[tokio::test] - async fn test_get_node_ip() { - let native_provider: NativeProvider = - NativeProvider::new("something", "/tmp", MockFilesystem::new()); - - assert_eq!( - native_provider.get_node_ip("some").await.unwrap(), - LOCALHOST - ); - } - #[tokio::test] - async fn test_run_command_when_bash_is_removed() { - let native_provider: NativeProvider = - NativeProvider::new("something", "/tmp", MockFilesystem::new()); - - let result: RunCommandResponse = native_provider - .run_command( - vec!["bash".into(), "ls".into()], - NativeRunCommandOptions::default(), - ) - .await - .unwrap(); - - assert_eq!( - result, - RunCommandResponse { - exit_code: ExitStatus::from_raw(0), - std_out: "Cargo.toml\nsrc\n".into(), - std_err: None, - } - ); - } - - #[tokio::test] - async fn test_run_command_when_dash_c_is_provided() { - let native_provider = NativeProvider::new("something", "/tmp", MockFilesystem::new()); - - let result = native_provider.run_command( - vec!["-c".into(), "ls".into()], - NativeRunCommandOptions::default(), - ); - - let a = result.await; - assert!(a.is_ok()); - } - - #[tokio::test] - async fn test_run_command_when_error_return_error() { - let native_provider = NativeProvider::new("something", "/tmp", MockFilesystem::new()); - - let mut some = native_provider.run_command( - vec!["ls".into(), "ls".into()], - NativeRunCommandOptions::default(), - ); - - assert!(some.await.is_err()); - - some = native_provider.run_command( - vec!["ls".into(), "ls".into()], - NativeRunCommandOptions { - is_failure_allowed: true, - }, - ); - - assert!(some.await.is_ok()); - } + async fn it_should_works() {} } + +// #[derive(Debug, Clone, PartialEq)] +// pub struct NativeProvider { +// // Namespace of the client (isolation directory) +// namespace: String, +// // TODO: re-iterate, since we are creating the config with the sdk +// // Path where configuration relies, all the `files` are accessed relative to this. +// // config_path: String, +// // Command to use, e.g "bash" +// command: String, +// // Temporary directory, root directory for the network +// tmp_dir: String, +// remote_dir: String, +// data_dir: String, +// process_map: HashMap, +// filesystem: T, +// } + +// impl NativeProvider { +// /// Zombienet `native` provider allows to run the nodes as a local process in the local environment +// /// params: +// /// namespace: Namespace of the client +// /// config_path: Path where configuration relies +// /// tmp_dir: Temporary directory where files will be placed +// /// filesystem: Filesystem to use (std::fs::FileSystem, mock etc.) +// pub fn new( +// namespace: impl Into, +// // config_path: impl Into, +// tmp_dir: impl Into, +// filesystem: T, +// ) -> Self { +// let tmp_dir = tmp_dir.into(); +// let process_map: HashMap = HashMap::new(); + +// Self { +// namespace: namespace.into(), +// // config_path: config_path.into(), +// remote_dir: format!("{}{}", &tmp_dir, DEFAULT_REMOTE_DIR), +// data_dir: format!("{}{}", &tmp_dir, DEFAULT_DATA_DIR), +// command: "bash".into(), +// tmp_dir, +// process_map, +// filesystem, +// } +// } + +// fn get_process_by_node_name(&self, node_name: &str) -> Result<&Process, ProviderError> { +// self.process_map +// .get(node_name) +// .ok_or(ProviderError::MissingNodeInfo( +// node_name.to_owned(), +// "process".into(), +// )) +// } +// } + +// pub struct Node {} + +// #[async_trait] +// impl Provider for NativeProvider +// where +// T: FileSystem + Send + Sync, +// { +// type Node = Node; + +// fn require_image() -> bool { +// false +// } + +// async fn create_namespace(&mut self) -> Result<(), ProviderError> { +// // Native provider don't have the `namespace` isolation. +// // but we create the `remoteDir` to place files +// self.filesystem +// .create_dir(&self.remote_dir) +// .await +// .map_err(|e| ProviderError::FSError(Box::new(e)))?; +// Ok(()) +// } + +// async fn destroy_namespace(&self) -> Result<(), ProviderError> { +// // get pids to kill all related process +// let pids: Vec = self +// .process_map +// .iter() +// .filter(|(_, process)| process.pid != 0) +// .map(|(_, process)| process.pid.to_string()) +// .collect(); + +// // TODO: use a crate (or even std) to get this info instead of relying on bash +// let result = self +// .run_command( +// [format!( +// "ps ax| awk '{{print $1}}'| grep -E '{}'", +// pids.join("|") +// )] +// .to_vec(), +// NativeRunCommandOptions { +// is_failure_allowed: true, +// }, +// ) +// .await +// .unwrap(); + +// if result.exit_code.code().unwrap() == 0 { +// let pids_to_kill: Vec = result +// .std_out +// .split(|c| c == '\n') +// .map(|s| s.into()) +// .collect(); + +// let _ = self +// .run_command( +// [format!("kill -9 {}", pids_to_kill.join(" "))].to_vec(), +// NativeRunCommandOptions { +// is_failure_allowed: true, +// }, +// ) +// .await?; +// } +// Ok(()) +// } + +// async fn static_setup(&mut self) -> Result<(), ProviderError> { +// Ok(()) +// } + +// async fn spawn_node( +// &self, +// _node: Node, +// _files_inject: Vec, +// _keystore: &str, +// _db_snapshot: &str, +// ) -> Result<(), ProviderError> { +// // TODO: We should implement the logic to go from the `Node` (nodeSpec) +// // to the running node, since we will no expose anymore the underline `Def`. +// // We can follow the logic of the spawn_from_def later. + +// Ok(()) +// } + +// async fn spawn_temp( +// &self, +// _node: Node, +// _files_inject: Vec, +// _files_get: Vec, +// ) -> Result<(), ProviderError> { +// // TODO: We should implement the logic to go from the `Node` (nodeSpec) +// // to the running node, since we will no expose anymore the underline `Def`. +// // We can follow the logic of the spawn_from_def later. + +// Ok(()) +// } + +// async fn copy_file_from_node( +// &mut self, +// pod_file_path: PathBuf, +// local_file_path: PathBuf, +// ) -> Result<(), ProviderError> { +// // log::debug!("cp {} {}", pod_file_path.to_string_lossy(), local_file_path.to_string_lossy()); + +// self.filesystem +// .copy(&pod_file_path, &local_file_path) +// .await +// .map_err(|e| ProviderError::FSError(Box::new(e)))?; +// Ok(()) +// } + +// async fn run_command( +// &self, +// mut args: Vec, +// opts: NativeRunCommandOptions, +// ) -> Result { +// if let Some(arg) = args.get(0) { +// if arg == "bash" { +// args.remove(0); +// } +// } + +// // -c is already used in the process::Command to execute the command thus +// // needs to be removed in case provided +// if let Some(arg) = args.get(0) { +// if arg == "-c" { +// args.remove(0); +// } +// } + +// let result = Command::new(&self.command) +// .arg("-c") +// .arg(args.join(" ")) +// .output() +// .await?; + +// if !result.status.success() && !opts.is_failure_allowed { +// return Err(ProviderError::RunCommandError(args.join(" "))); +// } else { +// // cmd success or we allow to fail +// // in either case we return Ok +// Ok(RunCommandResponse { +// exit_code: result.status, +// std_out: String::from_utf8_lossy(&result.stdout).into(), +// std_err: if result.stderr.is_empty() { +// None +// } else { +// Some(String::from_utf8_lossy(&result.stderr).into()) +// }, +// }) +// } +// } + +// // TODO: Add test +// async fn run_script( +// &mut self, +// identifier: String, +// script_path: String, +// args: Vec, +// ) -> Result { +// let script_filename = Path::new(&script_path) +// .file_name() +// .ok_or(ProviderError::InvalidScriptPath(script_path.clone()))? +// .to_str() +// .ok_or(ProviderError::InvalidScriptPath(script_path.clone()))?; +// let script_path_in_pod = format!("{}/{}/{}", self.tmp_dir, identifier, script_filename); + +// // upload the script +// self.filesystem +// .copy(&script_path, &script_path_in_pod) +// .await +// .map_err(|e| ProviderError::FSError(Box::new(e)))?; + +// // set as executable +// self.run_command( +// vec![ +// "chmod".to_owned(), +// "+x".to_owned(), +// script_path_in_pod.clone(), +// ], +// NativeRunCommandOptions::default(), +// ) +// .await?; + +// let command = format!( +// "cd {}/{} && {} {}", +// self.tmp_dir, +// identifier, +// script_path_in_pod, +// args.join(" ") +// ); +// let result = self +// .run_command(vec![command], NativeRunCommandOptions::default()) +// .await?; + +// Ok(RunCommandResponse { +// exit_code: result.exit_code, +// std_out: result.std_out, +// std_err: result.std_err, +// }) +// } + +// // TODO: Add test +// async fn get_node_logs(&mut self, name: &str) -> Result { +// // For now in native let's just return all the logs +// let result = self +// .filesystem +// .read_file(&format!("{}/{}.log", self.tmp_dir, name)) +// .await +// .map_err(|e| ProviderError::FSError(Box::new(e)))?; +// return Ok(result); +// } + +// async fn dump_logs(&mut self, path: String, pod_name: String) -> Result<(), ProviderError> { +// let dst_file_name: String = format!("{}/logs/{}.log", path, pod_name); +// let _ = self +// .filesystem +// .copy( +// &format!("{}/{}.log", self.tmp_dir, pod_name), +// &dst_file_name, +// ) +// .await; +// Ok(()) +// } + +// async fn get_logs_command(&self, name: &str) -> Result { +// Ok(format!("tail -f {}/{}.log", self.tmp_dir, name)) +// } + +// // TODO: Add test +// async fn pause(&self, node_name: &str) -> Result<(), ProviderError> { +// let process = self.get_process_by_node_name(node_name)?; + +// let _ = self +// .run_command( +// vec![format!("kill -STOP {}", process.pid)], +// NativeRunCommandOptions { +// is_failure_allowed: true, +// }, +// ) +// .await?; +// Ok(()) +// } + +// // TODO: Add test +// async fn resume(&self, node_name: &str) -> Result<(), ProviderError> { +// let process = self.get_process_by_node_name(node_name)?; + +// let _ = self +// .run_command( +// vec![format!("kill -CONT {}", process.pid)], +// NativeRunCommandOptions { +// is_failure_allowed: true, +// }, +// ) +// .await?; +// Ok(()) +// } + +// // TODO: Add test +// async fn restart( +// &mut self, +// node_name: &str, +// after_secs: Option, +// ) -> Result { +// let process = self.get_process_by_node_name(node_name)?; + +// let _resp = self +// .run_command( +// vec![format!("kill -9 {:?}", process.pid)], +// NativeRunCommandOptions { +// is_failure_allowed: true, +// }, +// ) +// .await?; + +// // log::debug!("{:?}", &resp); + +// if let Some(secs) = after_secs { +// sleep(Duration::from_secs(secs.into())).await; +// } + +// let process: &mut Process = +// self.process_map +// .get_mut(node_name) +// .ok_or(ProviderError::MissingNodeInfo( +// node_name.to_owned(), +// "process".into(), +// ))?; + +// let mapped_env: HashMap<&str, &str> = process +// .env +// .iter() +// .map(|env_var| (env_var.name.as_str(), env_var.value.as_str())) +// .collect(); + +// let child_process: Child = Command::new(self.command.clone()) +// .arg("-c") +// .arg(process.command.clone()) +// .envs(&mapped_env) +// .spawn() +// .map_err(|e| ProviderError::ErrorSpawningNode(e.to_string()))?; + +// process.pid = child_process.id().ok_or(ProviderError::ErrorSpawningNode( +// "Failed to get pid".to_string(), +// ))?; + +// Ok(true) +// } + +// async fn get_node_info(&self, node_name: &str) -> Result<(IpAddr, Port), ProviderError> { +// let host_port = self.get_port_mapping(P2P_PORT, node_name).await?; +// Ok((LOCALHOST, host_port)) +// } + +// async fn get_node_ip(&self, _node_name: &str) -> Result { +// Ok(LOCALHOST) +// } + +// async fn get_port_mapping(&self, port: Port, node_name: &str) -> Result { +// match self.process_map.get(node_name) { +// Some(process) => match process.port_mapping.get(&port) { +// Some(port) => Ok(*port), +// None => Err(ProviderError::MissingNodeInfo( +// node_name.to_owned(), +// "port".into(), +// )), +// }, +// None => Err(ProviderError::MissingNodeInfo( +// node_name.to_owned(), +// "process".into(), +// )), +// } +// } +// } + +// #[cfg(test)] +// mod tests { +// use std::{os::unix::process::ExitStatusExt, process::ExitStatus}; + +// use support::fs::mock::{MockError, MockFilesystem, Operation}; + +// use super::*; + +// #[test] +// fn new_native_provider() { +// let native_provider: NativeProvider = +// NativeProvider::new("something", "/tmp", MockFilesystem::new()); + +// assert_eq!(native_provider.namespace, "something"); +// assert_eq!(native_provider.tmp_dir, "/tmp"); +// assert_eq!(native_provider.command, "bash"); +// assert_eq!(native_provider.remote_dir, "/tmp/cfg"); +// assert_eq!(native_provider.data_dir, "/tmp/data"); +// } + +// #[tokio::test] +// async fn test_fielsystem_usage() { +// let mut native_provider: NativeProvider = +// NativeProvider::new("something", "/tmp", MockFilesystem::new()); + +// native_provider.create_namespace().await.unwrap(); + +// assert!(native_provider.filesystem.operations.len() == 1); + +// assert_eq!( +// native_provider.filesystem.operations[0], +// Operation::CreateDir { +// path: "/tmp/cfg".into(), +// } +// ); +// } + +// #[tokio::test] +// #[should_panic(expected = "FSError(OpError(\"create\"))")] +// async fn test_fielsystem_usage_fails() { +// let mut native_provider: NativeProvider = NativeProvider::new( +// "something", +// "/tmp", +// MockFilesystem::with_create_dir_error(MockError::OpError("create".into())), +// ); + +// native_provider.create_namespace().await.unwrap(); +// } + +// #[tokio::test] +// async fn test_get_node_ip() { +// let native_provider: NativeProvider = +// NativeProvider::new("something", "/tmp", MockFilesystem::new()); + +// assert_eq!( +// native_provider.get_node_ip("some").await.unwrap(), +// LOCALHOST +// ); +// } + +// #[tokio::test] +// async fn test_run_command_when_bash_is_removed() { +// let native_provider: NativeProvider = +// NativeProvider::new("something", "/tmp", MockFilesystem::new()); + +// let result: RunCommandResponse = native_provider +// .run_command( +// vec!["bash".into(), "ls".into()], +// NativeRunCommandOptions::default(), +// ) +// .await +// .unwrap(); + +// assert_eq!( +// result, +// RunCommandResponse { +// exit_code: ExitStatus::from_raw(0), +// std_out: "Cargo.toml\nsrc\n".into(), +// std_err: None, +// } +// ); +// } + +// #[tokio::test] +// async fn test_run_command_when_dash_c_is_provided() { +// let native_provider = NativeProvider::new("something", "/tmp", MockFilesystem::new()); + +// let result = native_provider.run_command( +// vec!["-c".into(), "ls".into()], +// NativeRunCommandOptions::default(), +// ); + +// let a = result.await; +// assert!(a.is_ok()); +// } + +// #[tokio::test] +// async fn test_run_command_when_error_return_error() { +// let native_provider = NativeProvider::new("something", "/tmp", MockFilesystem::new()); + +// let mut some = native_provider.run_command( +// vec!["ls".into(), "ls".into()], +// NativeRunCommandOptions::default(), +// ); + +// assert!(some.await.is_err()); + +// some = native_provider.run_command( +// vec!["ls".into(), "ls".into()], +// NativeRunCommandOptions { +// is_failure_allowed: true, +// }, +// ); + +// assert!(some.await.is_ok()); +// } +// } diff --git a/crates/support/Cargo.toml b/crates/support/Cargo.toml index 226ba257d..ec6bf9fd7 100644 --- a/crates/support/Cargo.toml +++ b/crates/support/Cargo.toml @@ -11,3 +11,5 @@ async-trait = { workspace = true } futures = { workspace = true } reqwest = { workspace = true } +[dev-dependencies] +tokio = { workspace = true, features = ["full"] } diff --git a/crates/support/src/fs.rs b/crates/support/src/fs.rs index 9d8a1a36a..dc48c1dce 100644 --- a/crates/support/src/fs.rs +++ b/crates/support/src/fs.rs @@ -1,29 +1,46 @@ -use std::{ - io::{Read, Write}, - path::Path, - process::Stdio, -}; +use std::{ffi::OsString, path::Path}; use async_trait::async_trait; -pub mod errors; mod local_file; +#[cfg(test)] pub mod mock; +#[derive(Debug, thiserror::Error)] +pub enum FileSystemError { + #[error("File path '{0:?}' doesn't contains UTF-8")] + InvalidUtf8Path(OsString), + #[error("File '{0}' doesn't contains UTF-8")] + InvalidUtf8File(String), + #[error("File '{0}' already exists")] + FileAlreadyExists(String), + #[error("File '{0}' not found")] + FileNotFound(String), + #[error("File '{0}' is a directory")] + FileIsDirectory(String), +} + +pub type FileSystemResult = Result; + #[async_trait] pub trait FileSystem { - type File: Read + Write + Into + Send + Sync; - type FSError: std::error::Error + Send + Sync + 'static; - - async fn copy + Send>(&mut self, from: P, to: P) -> Result<(), Self::FSError>; - async fn create + Send>(&mut self, path: P) - -> Result; - async fn create_dir + Send>(&mut self, path: P) -> Result<(), Self::FSError>; - async fn open_file + Send>(&mut self, path: P) -> Result<(), Self::FSError>; - async fn read_file + Send>(&mut self, path: P) -> Result; - async fn write + Send>( - &mut self, - path: P, - content: impl Into + Send, - ) -> Result<(), Self::FSError>; + async fn copy( + &self, + from: impl AsRef + Send, + to: impl AsRef + Send, + ) -> FileSystemResult<()>; + + async fn create_dir(&self, path: impl AsRef + Send) -> FileSystemResult<()>; + + async fn create_dir_all(&self, path: impl AsRef + Send) -> FileSystemResult<()>; + + async fn read(&self, path: impl AsRef + Send) -> FileSystemResult>>; + + async fn read_to_string(&self, path: impl AsRef + Send) -> FileSystemResult; + + async fn write( + &self, + path: impl AsRef + Send, + content: impl AsRef<[u8]> + Send, + ) -> FileSystemResult<()>; } diff --git a/crates/support/src/fs/errors.rs b/crates/support/src/fs/errors.rs deleted file mode 100644 index 2d567344a..000000000 --- a/crates/support/src/fs/errors.rs +++ /dev/null @@ -1,11 +0,0 @@ -//! Zombienet Provider error definitions. - -#[derive(Debug, thiserror::Error)] -pub enum FileSystemError { - // TODO: we need more specifc error - #[error("Generic FileSystem error")] - GenericFileSystemError, - /// Some other error. - #[error(transparent)] - Other(#[from] Box), -} diff --git a/crates/support/src/fs/mock.rs b/crates/support/src/fs/mock.rs index 6b2771fae..7b18b1522 100644 --- a/crates/support/src/fs/mock.rs +++ b/crates/support/src/fs/mock.rs @@ -1,173 +1,290 @@ -use std::{ - fs::File, - path::{Path, PathBuf}, -}; +use std::{collections::HashMap, path::Path, ffi::OsString}; +use super::{FileSystem, FileSystemError, FileSystemResult}; use async_trait::async_trait; +use tokio::sync::RwLock; -use super::{local_file::LocalFile, FileSystem}; - -#[derive(Debug, PartialEq)] -pub enum Operation { - Copy { from: PathBuf, to: PathBuf }, - ReadFile { path: PathBuf }, - CreateFile { path: PathBuf }, - CreateDir { path: PathBuf }, - OpenFile { path: PathBuf }, - Write { path: PathBuf, content: String }, +enum InMemoryFileType { + File, + Directory, } -#[derive(Debug, thiserror::Error)] -pub enum MockError { - #[error("Operation error: {0}")] - OpError(String), - #[error(transparent)] - Other(#[from] Box), -} -#[derive(Debug, Default)] -pub struct MockFilesystem { - copy_error: Option, - create_dir_error: Option, - create_file_error: Option, - open_file_error: Option, - read_file_error: Option, - write_error: Option, - pub operations: Vec, +struct InMemoryFile { + r#type: InMemoryFileType, + content: Option>, } -impl MockFilesystem { - pub fn new() -> Self { - Self::default() - } - - pub fn with_create_dir_error(error: MockError) -> Self { +impl InMemoryFile { + fn dir() -> Self { Self { - create_dir_error: Some(error), - ..Self::default() + r#type: InMemoryFileType::Directory, + content: None, } } - // TODO: add test - #[allow(dead_code)] - fn with_create_file_error(error: MockError) -> Self { + fn file(content: Option>) -> Self { Self { - create_file_error: Some(error), - ..Self::default() + r#type: InMemoryFileType::File, + content, } } +} - // TODO: add test - #[allow(dead_code)] - fn with_read_file_error(error: MockError) -> Self { - Self { - read_file_error: Some(error), - ..Self::default() - } - } +struct InMemoryFileSystem { + files: RwLock>, +} - // TODO: add test - #[allow(dead_code)] - fn with_copy_error(error: MockError) -> Self { - Self { - copy_error: Some(error), - ..Self::default() - } - } +#[async_trait] +impl FileSystem for InMemoryFileSystem { + async fn copy( + &self, + from: impl AsRef + Send, + to: impl AsRef + Send, + ) -> FileSystemResult<()> { + let from = from.as_ref().to_owned(); + let to = to.as_ref().to_owned(); - // TODO: add test - #[allow(dead_code)] - fn with_write_error(error: MockError) -> Self { - Self { - write_error: Some(error), - ..Self::default() - } + from.as_os_str() } -} -#[async_trait] -impl FileSystem for MockFilesystem { - type FSError = MockError; - type File = LocalFile; + async fn create_dir(&self, path: impl AsRef + Send) -> FileSystemResult<()> {} - async fn create_dir + Send>(&mut self, path: P) -> Result<(), Self::FSError> { - if let Some(err) = self.create_dir_error.take() { - return Err(err); - } + async fn create_dir_all(&self, path: impl AsRef + Send) -> FileSystemResult<()> { + let ancestors = path.as_ref().to_owned().ancestors(); + let files = self.files.write().await; - self.operations.push(Operation::CreateDir { - path: path.as_ref().to_path_buf(), - }); - Ok(()) - } + while let Some(path) = ancestors.next() { + let path = path + .to_str() + .ok_or(FileSystemError::InvalidUtf8Path( + path.as_os_str().to_owned(), + ))? + .to_string(); + + if files.contains_key(&path) { + return Err(FileSystemError::FileAlreadyExists(path.clone())); + } - async fn write + Send>( - &mut self, - path: P, - content: impl Into + Send, - ) -> Result<(), Self::FSError> { - if let Some(err) = self.write_error.take() { - return Err(err); + files.insert(path, InMemoryFile::dir()); } - self.operations.push(Operation::Write { - path: path.as_ref().to_path_buf(), - content: content.into(), - }); Ok(()) } - async fn create + Send>( - &mut self, - path: P, - ) -> Result { - if let Some(err) = self.create_file_error.take() { - return Err(err); + async fn read(&self, path: impl AsRef + Send) -> FileSystemResult>> { + let path = path.as_ref().to_owned(); + let path = path + .to_str() + .ok_or(FileSystemError::InvalidUtf8Path( + path.as_os_str().to_owned(), + ))? + .to_string(); + let file = self + .files + .read() + .await + .get(&path) + .ok_or(FileSystemError::FileNotFound(path))?; + + if let InMemoryFileType::Directory = file.r#type { + return Err(FileSystemError::FileIsDirectory(path)); } - let p = path.as_ref().to_path_buf(); + Ok(file.content.clone()) + } - self.operations - .push(Operation::CreateFile { path: p.clone() }); + async fn read_to_string(&self, path: impl AsRef + Send) -> FileSystemResult { + let content = self.read(path).await?; + let path = path.as_ref().to_str().unwrap().to_string(); - let file = File::create(p).expect("not created"); - Ok(LocalFile::from(file)) + Ok(match content { + Some(content) => { + String::from_utf8(content).map_err(|_| FileSystemError::InvalidUtf8File(path))? + }, + None => String::from(""), + }) } - async fn open_file + Send>(&mut self, path: P) -> Result<(), Self::FSError> { - if let Some(err) = self.open_file_error.take() { - return Err(err); - } + async fn write( + &self, + path: impl AsRef + Send, + contents: impl AsRef<[u8]> + Send, + ) -> FileSystemResult<()> { + let files = self.files.write().await; - self.operations.push(Operation::OpenFile { - path: path.as_ref().to_path_buf(), - }); - Ok(()) + if !files.contains_key(&path) { + return Err(FileSystemError::FileNotFound(path)); + } } +} - async fn read_file + Send>(&mut self, path: P) -> Result { - if let Some(err) = self.read_file_error.take() { - return Err(err); - } +#[cfg(test)] +mod tests { + #[tokio::test] + async fn it_works() {} +} - self.operations.push(Operation::ReadFile { - path: path.as_ref().to_path_buf(), - }); - Ok("This is a test".to_owned()) - } +// #[derive(Debug, PartialEq)] +// pub enum Operation { +// Copy { from: PathBuf, to: PathBuf }, +// ReadFile { path: PathBuf }, +// CreateFile { path: PathBuf }, +// CreateDir { path: PathBuf }, +// OpenFile { path: PathBuf }, +// Write { path: PathBuf, content: String }, +// } - async fn copy + Send>( - &mut self, - from: P, - to: P, - ) -> std::result::Result<(), Self::FSError> { - if let Some(err) = self.copy_error.take() { - return Err(err); - } +// #[derive(Debug, thiserror::Error)] +// pub enum MockError { +// #[error("Operation error: {0}")] +// OpError(String), +// #[error(transparent)] +// Other(#[from] Box), +// } +// #[derive(Debug, Default)] +// pub struct MockFilesystem { +// copy_error: Option, +// create_dir_error: Option, +// create_file_error: Option, +// open_file_error: Option, +// read_file_error: Option, +// write_error: Option, +// pub operations: Vec, +// } - self.operations.push(Operation::Copy { - from: from.as_ref().to_path_buf(), - to: to.as_ref().to_path_buf(), - }); - Ok(()) - } -} +// impl MockFilesystem { +// pub fn new() -> Self { +// Self::default() +// } + +// pub fn with_create_dir_error(error: MockError) -> Self { +// Self { +// create_dir_error: Some(error), +// ..Self::default() +// } +// } + +// // TODO: add test +// #[allow(dead_code)] +// fn with_create_file_error(error: MockError) -> Self { +// Self { +// create_file_error: Some(error), +// ..Self::default() +// } +// } + +// // TODO: add test +// #[allow(dead_code)] +// fn with_read_file_error(error: MockError) -> Self { +// Self { +// read_file_error: Some(error), +// ..Self::default() +// } +// } + +// // TODO: add test +// #[allow(dead_code)] +// fn with_copy_error(error: MockError) -> Self { +// Self { +// copy_error: Some(error), +// ..Self::default() +// } +// } + +// // TODO: add test +// #[allow(dead_code)] +// fn with_write_error(error: MockError) -> Self { +// Self { +// write_error: Some(error), +// ..Self::default() +// } +// } +// } + +// #[async_trait] +// impl FileSystem for MockFilesystem { +// type FSError = MockError; +// type File = LocalFile; + +// async fn create_dir + Send>(&mut self, path: P) -> Result<(), Self::FSError> { +// if let Some(err) = self.create_dir_error.take() { +// return Err(err); +// } + +// self.operations.push(Operation::CreateDir { +// path: path.as_ref().to_path_buf(), +// }); +// Ok(()) +// } + +// async fn write + Send>( +// &mut self, +// path: P, +// content: impl Into + Send, +// ) -> Result<(), Self::FSError> { +// if let Some(err) = self.write_error.take() { +// return Err(err); +// } + +// self.operations.push(Operation::Write { +// path: path.as_ref().to_path_buf(), +// content: content.into(), +// }); +// Ok(()) +// } + +// async fn create + Send>( +// &mut self, +// path: P, +// ) -> Result { +// if let Some(err) = self.create_file_error.take() { +// return Err(err); +// } + +// let p = path.as_ref().to_path_buf(); + +// self.operations +// .push(Operation::CreateFile { path: p.clone() }); + +// let file = File::create(p).expect("not created"); +// Ok(LocalFile::from(file)) +// } + +// async fn open_file + Send>(&mut self, path: P) -> Result<(), Self::FSError> { +// if let Some(err) = self.open_file_error.take() { +// return Err(err); +// } + +// self.operations.push(Operation::OpenFile { +// path: path.as_ref().to_path_buf(), +// }); +// Ok(()) +// } + +// async fn read_file + Send>(&mut self, path: P) -> Result { +// if let Some(err) = self.read_file_error.take() { +// return Err(err); +// } + +// self.operations.push(Operation::ReadFile { +// path: path.as_ref().to_path_buf(), +// }); +// Ok("This is a test".to_owned()) +// } + +// async fn copy + Send>( +// &mut self, +// from: P, +// to: P, +// ) -> std::result::Result<(), Self::FSError> { +// if let Some(err) = self.copy_error.take() { +// return Err(err); +// } + +// self.operations.push(Operation::Copy { +// from: from.as_ref().to_path_buf(), +// to: to.as_ref().to_path_buf(), +// }); +// Ok(()) +// } +// } From d08b6d41f83b7aefb9beb65fa37dc507203a99ff Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 28 Aug 2023 15:43:55 +0300 Subject: [PATCH 04/69] feat: refactored FileSystem trait and errors --- crates/support/src/fs.rs | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/crates/support/src/fs.rs b/crates/support/src/fs.rs index dc48c1dce..f54655277 100644 --- a/crates/support/src/fs.rs +++ b/crates/support/src/fs.rs @@ -2,22 +2,25 @@ use std::{ffi::OsString, path::Path}; use async_trait::async_trait; -mod local_file; #[cfg(test)] -pub mod mock; +pub mod in_memory; #[derive(Debug, thiserror::Error)] pub enum FileSystemError { - #[error("File path '{0:?}' doesn't contains UTF-8")] - InvalidUtf8Path(OsString), - #[error("File '{0}' doesn't contains UTF-8")] - InvalidUtf8File(String), - #[error("File '{0}' already exists")] - FileAlreadyExists(String), - #[error("File '{0}' not found")] - FileNotFound(String), - #[error("File '{0}' is a directory")] - FileIsDirectory(String), + #[error("File {0:?} already exists")] + FileAlreadyExists(OsString), + #[error("Directory {0:?} already exists")] + DirectoryAlreadyExists(OsString), + #[error("Ancestor {0:?} doesn't exists")] + AncestorDoesntExists(OsString), + #[error("Ancestor {0:?} is not a directory")] + AncestorNotDirectory(OsString), + #[error("File {0:?} not found")] + FileNotFound(OsString), + #[error("File {0:?} is a directory")] + FileIsDirectory(OsString), + #[error("Invalid UTF-8 encoding for file {0:?}")] + InvalidUtf8FileEncoding(OsString), } pub type FileSystemResult = Result; @@ -34,7 +37,7 @@ pub trait FileSystem { async fn create_dir_all(&self, path: impl AsRef + Send) -> FileSystemResult<()>; - async fn read(&self, path: impl AsRef + Send) -> FileSystemResult>>; + async fn read(&self, path: impl AsRef + Send) -> FileSystemResult>; async fn read_to_string(&self, path: impl AsRef + Send) -> FileSystemResult; From 13e4d9ab4e3e6ef521f995e771d71f0917f44a57 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 28 Aug 2023 15:45:08 +0300 Subject: [PATCH 05/69] chore: removed unused local_file, stdout will be hardcoded in provider at the moment --- crates/support/src/fs/local_file.rs | 35 ----------------------------- 1 file changed, 35 deletions(-) delete mode 100644 crates/support/src/fs/local_file.rs diff --git a/crates/support/src/fs/local_file.rs b/crates/support/src/fs/local_file.rs deleted file mode 100644 index 9c76bf36e..000000000 --- a/crates/support/src/fs/local_file.rs +++ /dev/null @@ -1,35 +0,0 @@ -use std::{ - fs::File, - io::{Read, Write}, - process::Stdio, -}; - -pub struct LocalFile(File); - -impl From for LocalFile { - fn from(file: File) -> Self { - LocalFile(file) - } -} - -impl From for Stdio { - fn from(value: LocalFile) -> Self { - value.0.into() - } -} - -impl Write for LocalFile { - fn write(&mut self, buf: &[u8]) -> Result { - self.0.write(buf) - } - - fn flush(&mut self) -> Result<(), std::io::Error> { - self.0.flush() - } -} - -impl Read for LocalFile { - fn read(&mut self, buf: &mut [u8]) -> std::io::Result { - self.0.read(buf) - } -} From 424d57882891747a26ee1dcc83b8770b037ba817 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 28 Aug 2023 15:46:12 +0300 Subject: [PATCH 06/69] feat: moved MockFileSystem to InMemoryFileSystem, files too, and added mirror implementation of tokio::fs with tests --- crates/support/src/fs/in_memory.rs | 716 +++++++++++++++++++++++++++++ crates/support/src/fs/mock.rs | 290 ------------ 2 files changed, 716 insertions(+), 290 deletions(-) create mode 100644 crates/support/src/fs/in_memory.rs delete mode 100644 crates/support/src/fs/mock.rs diff --git a/crates/support/src/fs/in_memory.rs b/crates/support/src/fs/in_memory.rs new file mode 100644 index 000000000..a5eb54450 --- /dev/null +++ b/crates/support/src/fs/in_memory.rs @@ -0,0 +1,716 @@ +use std::{collections::HashMap, ffi::OsString, path::Path, str::FromStr}; + +use super::{FileSystem, FileSystemError, FileSystemResult}; +use async_trait::async_trait; +use tokio::sync::RwLock; + +#[derive(Debug)] +enum InMemoryFile { + File(Vec), + Directory, +} + +#[derive(Default, Debug)] +struct InMemoryFileSystem { + files: RwLock>, +} + +impl InMemoryFileSystem { + fn new(files: HashMap) -> Self { + Self { + files: RwLock::new(files), + } + } +} + +#[async_trait] +impl FileSystem for InMemoryFileSystem { + async fn create_dir(&self, path: impl AsRef + Send) -> FileSystemResult<()> { + let path = path.as_ref(); + let os_path = path.as_os_str(); + + match self.files.read().await.get(os_path) { + Some(InMemoryFile::File(_)) => { + Err(FileSystemError::FileAlreadyExists(os_path.to_owned()))? + }, + Some(InMemoryFile::Directory) => { + Err(FileSystemError::DirectoryAlreadyExists(os_path.to_owned()))? + }, + None => {}, + }; + + let mut ancestors = path.ancestors().skip(1); + while let Some(path) = ancestors.next() { + match self.files.read().await.get(path.as_os_str()) { + Some(InMemoryFile::Directory) => continue, + Some(InMemoryFile::File(_)) => Err(FileSystemError::AncestorNotDirectory( + path.as_os_str().to_owned(), + ))?, + None => Err(FileSystemError::AncestorDoesntExists( + path.as_os_str().to_owned(), + ))?, + }; + } + + self.files + .write() + .await + .insert(os_path.to_owned(), InMemoryFile::Directory); + + Ok(()) + } + + async fn create_dir_all(&self, path: impl AsRef + Send) -> FileSystemResult<()> { + let path = path.as_ref(); + let mut files = self.files.write().await; + let mut ancestors = path + .ancestors() + .collect::>() + .into_iter() + .rev() + .skip(1); + + while let Some(path) = ancestors.next() { + match files.get(path.as_os_str()) { + Some(InMemoryFile::Directory) => continue, + Some(InMemoryFile::File(_)) => Err(FileSystemError::AncestorNotDirectory( + path.as_os_str().to_owned(), + ))?, + None => files.insert(path.as_os_str().to_owned(), InMemoryFile::Directory), + }; + } + + Ok(()) + } + + async fn read(&self, path: impl AsRef + Send) -> FileSystemResult> { + let os_path = path.as_ref().as_os_str(); + + match self.files.read().await.get(os_path) { + Some(InMemoryFile::File(content)) => Ok(content.clone()), + Some(InMemoryFile::Directory) => { + Err(FileSystemError::FileIsDirectory(os_path.to_owned())) + }, + None => Err(FileSystemError::FileNotFound(os_path.to_owned())), + } + } + + async fn read_to_string(&self, path: impl AsRef + Send) -> FileSystemResult { + let os_path = path.as_ref().as_os_str().to_owned(); + let content = self.read(path).await?; + + String::from_utf8(content).map_err(|_| FileSystemError::InvalidUtf8FileEncoding(os_path)) + } + + async fn write( + &self, + path: impl AsRef + Send, + contents: impl AsRef<[u8]> + Send, + ) -> FileSystemResult<()> { + let path = path.as_ref(); + let os_path = path.as_os_str(); + let mut files = self.files.write().await; + + let mut ancestors = path.ancestors().skip(1); + while let Some(path) = ancestors.next() { + match files.get(path.as_os_str()) { + Some(InMemoryFile::Directory) => continue, + Some(InMemoryFile::File(_)) => Err(FileSystemError::AncestorNotDirectory( + path.as_os_str().to_owned(), + ))?, + None => Err(FileSystemError::AncestorDoesntExists( + path.as_os_str().to_owned(), + ))?, + }; + } + + if let Some(InMemoryFile::Directory) = files.get(os_path) { + return Err(FileSystemError::FileIsDirectory(os_path.to_owned())); + } + + files.insert( + os_path.to_owned(), + InMemoryFile::File(contents.as_ref().to_vec()), + ); + + Ok(()) + } + + async fn copy( + &self, + from: impl AsRef + Send, + to: impl AsRef + Send, + ) -> FileSystemResult<()> { + let content = self.read(from).await?; + self.write(to, content).await + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn create_dir_should_creates_a_directory_at_root() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/").unwrap(), + InMemoryFile::Directory, + )])); + + fs.create_dir("/dir").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 2); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/dir").unwrap()) + .unwrap(), + InMemoryFile::Directory + )); + } + + #[tokio::test] + async fn create_dir_should_returns_an_error_if_directory_already_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + (OsString::from_str("/dir").unwrap(), InMemoryFile::Directory), + ])); + + let err = fs.create_dir("/dir").await.unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 2); + assert!(matches!( + err, + FileSystemError::DirectoryAlreadyExists(path) if path == "/dir" + )); + } + + #[tokio::test] + async fn create_dir_should_returns_an_error_if_file_already_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/dir").unwrap(), + InMemoryFile::File(vec![]), + ), + ])); + + let err = fs.create_dir("/dir").await.unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 2); + assert!(matches!( + err, + FileSystemError::FileAlreadyExists(path) if path == "/dir" + )); + } + + #[tokio::test] + async fn create_dir_should_creates_a_directory_if_all_ancestors_exist() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/path").unwrap(), + InMemoryFile::Directory, + ), + ( + OsString::from_str("/path/to").unwrap(), + InMemoryFile::Directory, + ), + ( + OsString::from_str("/path/to/my").unwrap(), + InMemoryFile::Directory, + ), + ])); + + fs.create_dir("/path/to/my/dir").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 5); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/path/to/my/dir").unwrap()) + .unwrap(), + InMemoryFile::Directory + )); + } + + #[tokio::test] + async fn create_dir_should_returns_an_error_if_some_directory_ancestor_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/path").unwrap(), + InMemoryFile::Directory, + ), + ( + OsString::from_str("/path/to").unwrap(), + InMemoryFile::Directory, + ), + ])); + + let err = fs.create_dir("/path/to/my/dir").await.unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 3); + assert!(matches!( + err, + FileSystemError::AncestorDoesntExists(path) if path == "/path/to/my" + )); + } + + #[tokio::test] + async fn create_dir_should_returns_an_error_if_some_ancestor_is_not_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/path").unwrap(), + InMemoryFile::File(vec![]), + ), + ( + OsString::from_str("/path/to").unwrap(), + InMemoryFile::Directory, + ), + ( + OsString::from_str("/path/to/my").unwrap(), + InMemoryFile::Directory, + ), + ])); + + let err = fs.create_dir("/path/to/my/dir").await.unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 4); + assert!(matches!( + err, + FileSystemError::AncestorNotDirectory(path) if path == "/path" + )); + } + + #[tokio::test] + async fn create_dir_all_should_creates_a_directory_and_all_its_ancestors_if_they_dont_exist() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/").unwrap(), + InMemoryFile::Directory, + )])); + + fs.create_dir_all("/path/to/my/dir").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 5); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/path").unwrap()) + .unwrap(), + InMemoryFile::Directory + )); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/path/to").unwrap()) + .unwrap(), + InMemoryFile::Directory + )); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/path/to/my").unwrap()) + .unwrap(), + InMemoryFile::Directory + )); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/path/to/my/dir").unwrap()) + .unwrap(), + InMemoryFile::Directory + )); + } + + #[tokio::test] + async fn create_dir_all_should_creates_a_directory_and_some_of_its_ancestors_if_they_dont_exist( + ) { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/path").unwrap(), + InMemoryFile::Directory, + ), + ( + OsString::from_str("/path/to").unwrap(), + InMemoryFile::Directory, + ), + ])); + + fs.create_dir_all("/path/to/my/dir").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 5); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/path/to/my").unwrap()) + .unwrap(), + InMemoryFile::Directory + )); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/path/to/my/dir").unwrap()) + .unwrap(), + InMemoryFile::Directory + )); + } + + #[tokio::test] + async fn create_dir_all_should_returns_an_error_if_some_ancestor_is_not_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/path").unwrap(), + InMemoryFile::File(vec![]), + ), + ( + OsString::from_str("/path/to").unwrap(), + InMemoryFile::Directory, + ), + ])); + + let err = fs.create_dir_all("/path/to/my/dir").await.unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 3); + assert!(matches!( + err, + FileSystemError::AncestorNotDirectory(path) if path == "/path" + )); + } + + #[tokio::test] + async fn read_should_returns_the_file_content() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::File("content".as_bytes().to_vec()), + )])); + + let content = fs.read("/myfile").await.unwrap(); + + assert_eq!(content, "content".as_bytes().to_vec()); + } + + #[tokio::test] + async fn read_should_returns_an_error_if_file_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::new()); + + let err = fs.read("/myfile").await.unwrap_err(); + + assert!(matches!( + err, + FileSystemError::FileNotFound(path) if path == "/myfile" + )); + } + + #[tokio::test] + async fn read_should_returns_an_error_if_file_is_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::Directory, + )])); + + let err = fs.read("/myfile").await.unwrap_err(); + + assert!(matches!( + err, + FileSystemError::FileIsDirectory(path) if path == "/myfile" + )); + } + + #[tokio::test] + async fn read_to_string_should_returns_the_file_content_as_a_string() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::File("content".as_bytes().to_vec()), + )])); + + let content = fs.read_to_string("/myfile").await.unwrap(); + + assert_eq!(content, "content"); + } + + #[tokio::test] + async fn read_to_string_should_returns_an_error_if_file_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::new()); + + let err = fs.read_to_string("/myfile").await.unwrap_err(); + + assert!(matches!( + err, + FileSystemError::FileNotFound(path) if path == "/myfile" + )); + } + + #[tokio::test] + async fn read_to_string_should_returns_an_error_if_file_is_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::Directory, + )])); + + let err = fs.read_to_string("/myfile").await.unwrap_err(); + + assert!(matches!( + err, + FileSystemError::FileIsDirectory(path) if path == "/myfile" + )); + } + + #[tokio::test] + async fn read_to_string_should_returns_an_error_if_file_isnt_utf8_encoded() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::File(vec![0xC3, 0x28]), + )])); + + let err = fs.read_to_string("/myfile").await.unwrap_err(); + + assert!(matches!( + err, + FileSystemError::InvalidUtf8FileEncoding(path) if path == "/myfile" + )); + } + + #[tokio::test] + async fn write_should_creates_file_with_content_if_file_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/").unwrap(), + InMemoryFile::Directory, + )])); + + fs.write("/myfile", "my file content").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 2); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/myfile").unwrap()), + Some(InMemoryFile::File(content)) if content == "my file content".as_bytes() + )) + } + + #[tokio::test] + async fn write_should_updates_file_content_if_file_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::File("my file content".as_bytes().to_vec()), + ), + ])); + + fs.write("/myfile", "my new file content").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 2); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/myfile").unwrap()), + Some(InMemoryFile::File(content)) if content == "my new file content".as_bytes() + )) + } + + #[tokio::test] + async fn write_should_returns_an_error_if_file_is_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::Directory, + ), + ])); + + let err = fs.write("/myfile", "my file content").await.unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 2); + assert!(matches!(err, FileSystemError::FileIsDirectory(path) if path == "/myfile")); + } + + #[tokio::test] + async fn write_should_returns_an_error_if_file_is_new_and_some_ancestor_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/path/to").unwrap(), + InMemoryFile::Directory, + ), + ])); + + let err = fs + .write("/path/to/myfile", "my file content") + .await + .unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 2); + assert!(matches!(err, FileSystemError::AncestorDoesntExists(path) if path == "/path")); + } + + #[tokio::test] + async fn write_should_returns_an_error_if_file_is_new_and_some_ancestor_is_not_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/path").unwrap(), + InMemoryFile::File(vec![]), + ), + ( + OsString::from_str("/path/to").unwrap(), + InMemoryFile::Directory, + ), + ])); + + let err = fs + .write("/path/to/myfile", "my file content") + .await + .unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 3); + assert!(matches!(err, FileSystemError::AncestorNotDirectory(path) if path == "/path")); + } + + #[tokio::test] + async fn copy_should_creates_new_destination_file_if_it_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::File("my file content".as_bytes().to_vec()), + ), + ])); + + fs.copy("/myfile", "/myfilecopy").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 3); + assert!( + matches!(fs.files.read().await.get(&OsString::from_str("/myfilecopy").unwrap()).unwrap(), InMemoryFile::File(content) if content == "my file content".as_bytes()) + ); + } + + #[tokio::test] + async fn copy_should_updates_the_file_content_of_the_destination_file_if_it_already_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::File("my new file content".as_bytes().to_vec()), + ), + ( + OsString::from_str("/myfilecopy").unwrap(), + InMemoryFile::File("my file content".as_bytes().to_vec()), + ), + ])); + + fs.copy("/myfile", "/myfilecopy").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 3); + assert!( + matches!(fs.files.read().await.get(&OsString::from_str("/myfilecopy").unwrap()).unwrap(), InMemoryFile::File(content) if content == "my new file content".as_bytes()) + ); + } + + #[tokio::test] + async fn copy_should_returns_an_error_if_source_file_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/").unwrap(), + InMemoryFile::Directory, + )])); + + let err = fs.copy("/myfile", "/mfilecopy").await.unwrap_err(); + + assert!(matches!( + err, + FileSystemError::FileNotFound(path) if path == "/myfile" + )); + } + + #[tokio::test] + async fn copy_should_returns_an_error_if_source_file_is_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::Directory, + ), + ])); + + let err = fs.copy("/myfile", "/mfilecopy").await.unwrap_err(); + + assert!(matches!( + err, + FileSystemError::FileIsDirectory(path) if path == "/myfile" + )); + } + + #[tokio::test] + async fn copy_should_returns_an_error_if_destination_file_is_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::File("my file content".as_bytes().to_vec()), + ), + ( + OsString::from_str("/myfilecopy").unwrap(), + InMemoryFile::Directory, + ), + ])); + + let err = fs.copy("/myfile", "/myfilecopy").await.unwrap_err(); + + assert!(matches!( + err, + FileSystemError::FileIsDirectory(path) if path == "/myfilecopy" + )); + } + + #[tokio::test] + async fn copy_should_returns_an_error_if_destination_file_is_new_and_some_ancestor_doesnt_exists( + ) { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::File("my file content".as_bytes().to_vec()), + ), + ])); + + let err = fs.copy("/myfile", "/somedir/myfilecopy").await.unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 2); + assert!(matches!(err, FileSystemError::AncestorDoesntExists(path) if path == "/somedir")); + } + + #[tokio::test] + async fn copy_should_returns_an_error_if_destination_file_is_new_and_some_ancestor_is_not_a_directory( + ) { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::File("my file content".as_bytes().to_vec()), + ), + ( + OsString::from_str("/mypath").unwrap(), + InMemoryFile::File(vec![]), + ), + ])); + + let err = fs.copy("/myfile", "/mypath/myfilecopy").await.unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 3); + assert!(matches!(err, FileSystemError::AncestorNotDirectory(path) if path == "/mypath")); + } +} diff --git a/crates/support/src/fs/mock.rs b/crates/support/src/fs/mock.rs deleted file mode 100644 index 7b18b1522..000000000 --- a/crates/support/src/fs/mock.rs +++ /dev/null @@ -1,290 +0,0 @@ -use std::{collections::HashMap, path::Path, ffi::OsString}; - -use super::{FileSystem, FileSystemError, FileSystemResult}; -use async_trait::async_trait; -use tokio::sync::RwLock; - -enum InMemoryFileType { - File, - Directory, -} - -struct InMemoryFile { - r#type: InMemoryFileType, - content: Option>, -} - -impl InMemoryFile { - fn dir() -> Self { - Self { - r#type: InMemoryFileType::Directory, - content: None, - } - } - - fn file(content: Option>) -> Self { - Self { - r#type: InMemoryFileType::File, - content, - } - } -} - -struct InMemoryFileSystem { - files: RwLock>, -} - -#[async_trait] -impl FileSystem for InMemoryFileSystem { - async fn copy( - &self, - from: impl AsRef + Send, - to: impl AsRef + Send, - ) -> FileSystemResult<()> { - let from = from.as_ref().to_owned(); - let to = to.as_ref().to_owned(); - - from.as_os_str() - } - - async fn create_dir(&self, path: impl AsRef + Send) -> FileSystemResult<()> {} - - async fn create_dir_all(&self, path: impl AsRef + Send) -> FileSystemResult<()> { - let ancestors = path.as_ref().to_owned().ancestors(); - let files = self.files.write().await; - - while let Some(path) = ancestors.next() { - let path = path - .to_str() - .ok_or(FileSystemError::InvalidUtf8Path( - path.as_os_str().to_owned(), - ))? - .to_string(); - - if files.contains_key(&path) { - return Err(FileSystemError::FileAlreadyExists(path.clone())); - } - - files.insert(path, InMemoryFile::dir()); - } - - Ok(()) - } - - async fn read(&self, path: impl AsRef + Send) -> FileSystemResult>> { - let path = path.as_ref().to_owned(); - let path = path - .to_str() - .ok_or(FileSystemError::InvalidUtf8Path( - path.as_os_str().to_owned(), - ))? - .to_string(); - let file = self - .files - .read() - .await - .get(&path) - .ok_or(FileSystemError::FileNotFound(path))?; - - if let InMemoryFileType::Directory = file.r#type { - return Err(FileSystemError::FileIsDirectory(path)); - } - - Ok(file.content.clone()) - } - - async fn read_to_string(&self, path: impl AsRef + Send) -> FileSystemResult { - let content = self.read(path).await?; - let path = path.as_ref().to_str().unwrap().to_string(); - - Ok(match content { - Some(content) => { - String::from_utf8(content).map_err(|_| FileSystemError::InvalidUtf8File(path))? - }, - None => String::from(""), - }) - } - - async fn write( - &self, - path: impl AsRef + Send, - contents: impl AsRef<[u8]> + Send, - ) -> FileSystemResult<()> { - let files = self.files.write().await; - - if !files.contains_key(&path) { - return Err(FileSystemError::FileNotFound(path)); - } - } -} - -#[cfg(test)] -mod tests { - #[tokio::test] - async fn it_works() {} -} - -// #[derive(Debug, PartialEq)] -// pub enum Operation { -// Copy { from: PathBuf, to: PathBuf }, -// ReadFile { path: PathBuf }, -// CreateFile { path: PathBuf }, -// CreateDir { path: PathBuf }, -// OpenFile { path: PathBuf }, -// Write { path: PathBuf, content: String }, -// } - -// #[derive(Debug, thiserror::Error)] -// pub enum MockError { -// #[error("Operation error: {0}")] -// OpError(String), -// #[error(transparent)] -// Other(#[from] Box), -// } -// #[derive(Debug, Default)] -// pub struct MockFilesystem { -// copy_error: Option, -// create_dir_error: Option, -// create_file_error: Option, -// open_file_error: Option, -// read_file_error: Option, -// write_error: Option, -// pub operations: Vec, -// } - -// impl MockFilesystem { -// pub fn new() -> Self { -// Self::default() -// } - -// pub fn with_create_dir_error(error: MockError) -> Self { -// Self { -// create_dir_error: Some(error), -// ..Self::default() -// } -// } - -// // TODO: add test -// #[allow(dead_code)] -// fn with_create_file_error(error: MockError) -> Self { -// Self { -// create_file_error: Some(error), -// ..Self::default() -// } -// } - -// // TODO: add test -// #[allow(dead_code)] -// fn with_read_file_error(error: MockError) -> Self { -// Self { -// read_file_error: Some(error), -// ..Self::default() -// } -// } - -// // TODO: add test -// #[allow(dead_code)] -// fn with_copy_error(error: MockError) -> Self { -// Self { -// copy_error: Some(error), -// ..Self::default() -// } -// } - -// // TODO: add test -// #[allow(dead_code)] -// fn with_write_error(error: MockError) -> Self { -// Self { -// write_error: Some(error), -// ..Self::default() -// } -// } -// } - -// #[async_trait] -// impl FileSystem for MockFilesystem { -// type FSError = MockError; -// type File = LocalFile; - -// async fn create_dir + Send>(&mut self, path: P) -> Result<(), Self::FSError> { -// if let Some(err) = self.create_dir_error.take() { -// return Err(err); -// } - -// self.operations.push(Operation::CreateDir { -// path: path.as_ref().to_path_buf(), -// }); -// Ok(()) -// } - -// async fn write + Send>( -// &mut self, -// path: P, -// content: impl Into + Send, -// ) -> Result<(), Self::FSError> { -// if let Some(err) = self.write_error.take() { -// return Err(err); -// } - -// self.operations.push(Operation::Write { -// path: path.as_ref().to_path_buf(), -// content: content.into(), -// }); -// Ok(()) -// } - -// async fn create + Send>( -// &mut self, -// path: P, -// ) -> Result { -// if let Some(err) = self.create_file_error.take() { -// return Err(err); -// } - -// let p = path.as_ref().to_path_buf(); - -// self.operations -// .push(Operation::CreateFile { path: p.clone() }); - -// let file = File::create(p).expect("not created"); -// Ok(LocalFile::from(file)) -// } - -// async fn open_file + Send>(&mut self, path: P) -> Result<(), Self::FSError> { -// if let Some(err) = self.open_file_error.take() { -// return Err(err); -// } - -// self.operations.push(Operation::OpenFile { -// path: path.as_ref().to_path_buf(), -// }); -// Ok(()) -// } - -// async fn read_file + Send>(&mut self, path: P) -> Result { -// if let Some(err) = self.read_file_error.take() { -// return Err(err); -// } - -// self.operations.push(Operation::ReadFile { -// path: path.as_ref().to_path_buf(), -// }); -// Ok("This is a test".to_owned()) -// } - -// async fn copy + Send>( -// &mut self, -// from: P, -// to: P, -// ) -> std::result::Result<(), Self::FSError> { -// if let Some(err) = self.copy_error.take() { -// return Err(err); -// } - -// self.operations.push(Operation::Copy { -// from: from.as_ref().to_path_buf(), -// to: to.as_ref().to_path_buf(), -// }); -// Ok(()) -// } -// } From 6577903a7de9c94357384f60b46f41cf2e163826 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 29 Aug 2023 20:52:24 +0300 Subject: [PATCH 07/69] feat: added new method append to FileSystem trait --- crates/support/src/fs.rs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/crates/support/src/fs.rs b/crates/support/src/fs.rs index f54655277..9fc360402 100644 --- a/crates/support/src/fs.rs +++ b/crates/support/src/fs.rs @@ -2,7 +2,6 @@ use std::{ffi::OsString, path::Path}; use async_trait::async_trait; -#[cfg(test)] pub mod in_memory; #[derive(Debug, thiserror::Error)] @@ -27,12 +26,6 @@ pub type FileSystemResult = Result; #[async_trait] pub trait FileSystem { - async fn copy( - &self, - from: impl AsRef + Send, - to: impl AsRef + Send, - ) -> FileSystemResult<()>; - async fn create_dir(&self, path: impl AsRef + Send) -> FileSystemResult<()>; async fn create_dir_all(&self, path: impl AsRef + Send) -> FileSystemResult<()>; @@ -44,6 +37,18 @@ pub trait FileSystem { async fn write( &self, path: impl AsRef + Send, - content: impl AsRef<[u8]> + Send, + contents: impl AsRef<[u8]> + Send, + ) -> FileSystemResult<()>; + + async fn append( + &self, + path: impl AsRef + Send, + contents: impl AsRef<[u8]> + Send, + ) -> FileSystemResult<()>; + + async fn copy( + &self, + from: impl AsRef + Send, + to: impl AsRef + Send, ) -> FileSystemResult<()>; } From c50b69478493d2eca034376d4473be1ab59cc348 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 29 Aug 2023 20:53:13 +0300 Subject: [PATCH 08/69] feat: implemented new append method on InMemoryFileSystem --- crates/support/src/fs/in_memory.rs | 180 ++++++++++++++++++++++++----- 1 file changed, 148 insertions(+), 32 deletions(-) diff --git a/crates/support/src/fs/in_memory.rs b/crates/support/src/fs/in_memory.rs index a5eb54450..d9e33b150 100644 --- a/crates/support/src/fs/in_memory.rs +++ b/crates/support/src/fs/in_memory.rs @@ -1,24 +1,24 @@ -use std::{collections::HashMap, ffi::OsString, path::Path, str::FromStr}; +use std::{collections::HashMap, ffi::OsString, path::Path, str::FromStr, sync::Arc}; use super::{FileSystem, FileSystemError, FileSystemResult}; use async_trait::async_trait; use tokio::sync::RwLock; -#[derive(Debug)] -enum InMemoryFile { +#[derive(Debug, Clone)] +pub enum InMemoryFile { File(Vec), Directory, } -#[derive(Default, Debug)] -struct InMemoryFileSystem { - files: RwLock>, +#[derive(Default, Debug, Clone)] +pub struct InMemoryFileSystem { + files: Arc>>, } impl InMemoryFileSystem { - fn new(files: HashMap) -> Self { + pub fn new(files: HashMap) -> Self { Self { - files: RwLock::new(files), + files: Arc::new(RwLock::new(files)), } } } @@ -136,6 +136,22 @@ impl FileSystem for InMemoryFileSystem { Ok(()) } + async fn append( + &self, + path: impl AsRef + Send, + contents: impl AsRef<[u8]> + Send, + ) -> FileSystemResult<()> { + let path = path.as_ref(); + let mut existing_contents = match self.read(path).await { + Ok(existing_contents) => existing_contents, + Err(FileSystemError::FileNotFound(_)) => vec![], + Err(err) => Err(err)?, + }; + existing_contents.append(&mut contents.as_ref().to_vec()); + + self.write(path, existing_contents).await + } + async fn copy( &self, from: impl AsRef + Send, @@ -151,7 +167,7 @@ mod tests { use super::*; #[tokio::test] - async fn create_dir_should_creates_a_directory_at_root() { + async fn create_dir_should_create_a_directory_at_root() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/").unwrap(), InMemoryFile::Directory, @@ -171,7 +187,7 @@ mod tests { } #[tokio::test] - async fn create_dir_should_returns_an_error_if_directory_already_exists() { + async fn create_dir_should_return_an_error_if_directory_already_exists() { let fs = InMemoryFileSystem::new(HashMap::from([ (OsString::from_str("/").unwrap(), InMemoryFile::Directory), (OsString::from_str("/dir").unwrap(), InMemoryFile::Directory), @@ -187,7 +203,7 @@ mod tests { } #[tokio::test] - async fn create_dir_should_returns_an_error_if_file_already_exists() { + async fn create_dir_should_return_an_error_if_file_already_exists() { let fs = InMemoryFileSystem::new(HashMap::from([ (OsString::from_str("/").unwrap(), InMemoryFile::Directory), ( @@ -206,7 +222,7 @@ mod tests { } #[tokio::test] - async fn create_dir_should_creates_a_directory_if_all_ancestors_exist() { + async fn create_dir_should_create_a_directory_if_all_ancestors_exist() { let fs = InMemoryFileSystem::new(HashMap::from([ (OsString::from_str("/").unwrap(), InMemoryFile::Directory), ( @@ -237,7 +253,7 @@ mod tests { } #[tokio::test] - async fn create_dir_should_returns_an_error_if_some_directory_ancestor_doesnt_exists() { + async fn create_dir_should_return_an_error_if_some_directory_ancestor_doesnt_exists() { let fs = InMemoryFileSystem::new(HashMap::from([ (OsString::from_str("/").unwrap(), InMemoryFile::Directory), ( @@ -260,7 +276,7 @@ mod tests { } #[tokio::test] - async fn create_dir_should_returns_an_error_if_some_ancestor_is_not_a_directory() { + async fn create_dir_should_return_an_error_if_some_ancestor_is_not_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([ (OsString::from_str("/").unwrap(), InMemoryFile::Directory), ( @@ -287,7 +303,7 @@ mod tests { } #[tokio::test] - async fn create_dir_all_should_creates_a_directory_and_all_its_ancestors_if_they_dont_exist() { + async fn create_dir_all_should_create_a_directory_and_all_its_ancestors_if_they_dont_exist() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/").unwrap(), InMemoryFile::Directory, @@ -331,8 +347,8 @@ mod tests { } #[tokio::test] - async fn create_dir_all_should_creates_a_directory_and_some_of_its_ancestors_if_they_dont_exist( - ) { + async fn create_dir_all_should_create_a_directory_and_some_of_its_ancestors_if_they_dont_exist() + { let fs = InMemoryFileSystem::new(HashMap::from([ (OsString::from_str("/").unwrap(), InMemoryFile::Directory), ( @@ -367,7 +383,7 @@ mod tests { } #[tokio::test] - async fn create_dir_all_should_returns_an_error_if_some_ancestor_is_not_a_directory() { + async fn create_dir_all_should_return_an_error_if_some_ancestor_is_not_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([ (OsString::from_str("/").unwrap(), InMemoryFile::Directory), ( @@ -390,7 +406,7 @@ mod tests { } #[tokio::test] - async fn read_should_returns_the_file_content() { + async fn read_should_return_the_file_content() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/myfile").unwrap(), InMemoryFile::File("content".as_bytes().to_vec()), @@ -402,7 +418,7 @@ mod tests { } #[tokio::test] - async fn read_should_returns_an_error_if_file_doesnt_exists() { + async fn read_should_return_an_error_if_file_doesnt_exists() { let fs = InMemoryFileSystem::new(HashMap::new()); let err = fs.read("/myfile").await.unwrap_err(); @@ -414,7 +430,7 @@ mod tests { } #[tokio::test] - async fn read_should_returns_an_error_if_file_is_a_directory() { + async fn read_should_return_an_error_if_file_is_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/myfile").unwrap(), InMemoryFile::Directory, @@ -429,7 +445,7 @@ mod tests { } #[tokio::test] - async fn read_to_string_should_returns_the_file_content_as_a_string() { + async fn read_to_string_should_return_the_file_content_as_a_string() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/myfile").unwrap(), InMemoryFile::File("content".as_bytes().to_vec()), @@ -441,7 +457,7 @@ mod tests { } #[tokio::test] - async fn read_to_string_should_returns_an_error_if_file_doesnt_exists() { + async fn read_to_string_should_return_an_error_if_file_doesnt_exists() { let fs = InMemoryFileSystem::new(HashMap::new()); let err = fs.read_to_string("/myfile").await.unwrap_err(); @@ -453,7 +469,7 @@ mod tests { } #[tokio::test] - async fn read_to_string_should_returns_an_error_if_file_is_a_directory() { + async fn read_to_string_should_return_an_error_if_file_is_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/myfile").unwrap(), InMemoryFile::Directory, @@ -468,7 +484,7 @@ mod tests { } #[tokio::test] - async fn read_to_string_should_returns_an_error_if_file_isnt_utf8_encoded() { + async fn read_to_string_should_return_an_error_if_file_isnt_utf8_encoded() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/myfile").unwrap(), InMemoryFile::File(vec![0xC3, 0x28]), @@ -483,7 +499,7 @@ mod tests { } #[tokio::test] - async fn write_should_creates_file_with_content_if_file_doesnt_exists() { + async fn write_should_create_file_with_content_if_file_doesnt_exists() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/").unwrap(), InMemoryFile::Directory, @@ -498,11 +514,11 @@ mod tests { .await .get(&OsString::from_str("/myfile").unwrap()), Some(InMemoryFile::File(content)) if content == "my file content".as_bytes() - )) + )); } #[tokio::test] - async fn write_should_updates_file_content_if_file_exists() { + async fn write_should_overwrite_file_content_if_file_exists() { let fs = InMemoryFileSystem::new(HashMap::from([ (OsString::from_str("/").unwrap(), InMemoryFile::Directory), ( @@ -520,11 +536,11 @@ mod tests { .await .get(&OsString::from_str("/myfile").unwrap()), Some(InMemoryFile::File(content)) if content == "my new file content".as_bytes() - )) + )); } #[tokio::test] - async fn write_should_returns_an_error_if_file_is_a_directory() { + async fn write_should_return_an_error_if_file_is_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([ (OsString::from_str("/").unwrap(), InMemoryFile::Directory), ( @@ -540,7 +556,7 @@ mod tests { } #[tokio::test] - async fn write_should_returns_an_error_if_file_is_new_and_some_ancestor_doesnt_exists() { + async fn write_should_return_an_error_if_file_is_new_and_some_ancestor_doesnt_exists() { let fs = InMemoryFileSystem::new(HashMap::from([ (OsString::from_str("/").unwrap(), InMemoryFile::Directory), ( @@ -559,7 +575,7 @@ mod tests { } #[tokio::test] - async fn write_should_returns_an_error_if_file_is_new_and_some_ancestor_is_not_a_directory() { + async fn write_should_return_an_error_if_file_is_new_and_some_ancestor_is_not_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([ (OsString::from_str("/").unwrap(), InMemoryFile::Directory), ( @@ -581,6 +597,106 @@ mod tests { assert!(matches!(err, FileSystemError::AncestorNotDirectory(path) if path == "/path")); } + #[tokio::test] + async fn append_should_update_file_content_if_file_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::File("my file content".as_bytes().to_vec()), + ), + ])); + + fs.append("/myfile", " has been updated with new things") + .await + .unwrap(); + + assert_eq!(fs.files.read().await.len(), 2); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/myfile").unwrap()), + Some(InMemoryFile::File(content)) if content == "my file content has been updated with new things".as_bytes() + )); + } + + #[tokio::test] + async fn append_should_create_file_with_content_if_file_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/").unwrap(), + InMemoryFile::Directory, + )])); + + fs.append("/myfile", "my file content").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 2); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/myfile").unwrap()), + Some(InMemoryFile::File(content)) if content == "my file content".as_bytes() + )); + } + + #[tokio::test] + async fn append_should_return_an_error_if_file_is_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::Directory, + )])); + + let err = fs.append("/myfile", "my file content").await.unwrap_err(); + + assert!(matches!( + err, + FileSystemError::FileIsDirectory(path) if path == "/myfile" + )); + } + + #[tokio::test] + async fn append_should_return_an_error_if_file_is_new_and_some_ancestor_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/path/to").unwrap(), + InMemoryFile::Directory, + ), + ])); + + let err = fs + .append("/path/to/myfile", "my file content") + .await + .unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 2); + assert!(matches!(err, FileSystemError::AncestorDoesntExists(path) if path == "/path")); + } + + #[tokio::test] + async fn append_should_return_an_error_if_file_is_new_and_some_ancestor_is_not_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + ( + OsString::from_str("/path").unwrap(), + InMemoryFile::File(vec![]), + ), + ( + OsString::from_str("/path/to").unwrap(), + InMemoryFile::Directory, + ), + ])); + + let err = fs + .append("/path/to/myfile", "my file content") + .await + .unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 3); + assert!(matches!(err, FileSystemError::AncestorNotDirectory(path) if path == "/path")); + } + #[tokio::test] async fn copy_should_creates_new_destination_file_if_it_doesnt_exists() { let fs = InMemoryFileSystem::new(HashMap::from([ From 7121f122eacda7055e2fde124687098545986ec6 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Fri, 1 Sep 2023 00:17:01 +0300 Subject: [PATCH 09/69] feat: added anyhow dependencies to support crate --- crates/support/Cargo.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/support/Cargo.toml b/crates/support/Cargo.toml index ec6bf9fd7..58a3cfb6d 100644 --- a/crates/support/Cargo.toml +++ b/crates/support/Cargo.toml @@ -7,9 +7,8 @@ edition = "2021" [dependencies] thiserror = { workspace = true } +anyhow = { workspace = true } async-trait = { workspace = true } futures = { workspace = true } reqwest = { workspace = true } - -[dev-dependencies] tokio = { workspace = true, features = ["full"] } From 525b818e49230dafe616560f8e46a1b651a9d5b7 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Fri, 1 Sep 2023 00:18:20 +0300 Subject: [PATCH 10/69] feat: refactored FileSystem trait to expose only a single wrapped error --- crates/support/src/fs.rs | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/crates/support/src/fs.rs b/crates/support/src/fs.rs index 9fc360402..5d60ac7ea 100644 --- a/crates/support/src/fs.rs +++ b/crates/support/src/fs.rs @@ -1,26 +1,13 @@ -use std::{ffi::OsString, path::Path}; +use std::path::Path; use async_trait::async_trait; pub mod in_memory; +pub mod local; #[derive(Debug, thiserror::Error)] -pub enum FileSystemError { - #[error("File {0:?} already exists")] - FileAlreadyExists(OsString), - #[error("Directory {0:?} already exists")] - DirectoryAlreadyExists(OsString), - #[error("Ancestor {0:?} doesn't exists")] - AncestorDoesntExists(OsString), - #[error("Ancestor {0:?} is not a directory")] - AncestorNotDirectory(OsString), - #[error("File {0:?} not found")] - FileNotFound(OsString), - #[error("File {0:?} is a directory")] - FileIsDirectory(OsString), - #[error("Invalid UTF-8 encoding for file {0:?}")] - InvalidUtf8FileEncoding(OsString), -} +#[error(transparent)] +pub struct FileSystemError(#[from] anyhow::Error); pub type FileSystemResult = Result; From 7eb86029cc8995f748a74d2940e714dbe910ecbd Mon Sep 17 00:00:00 2001 From: l0r1s Date: Fri, 1 Sep 2023 00:19:45 +0300 Subject: [PATCH 11/69] feat: refacto InMemoryFileSystem following FileSystem trait refacto --- crates/support/src/fs/in_memory.rs | 131 +++++++++++------------------ 1 file changed, 48 insertions(+), 83 deletions(-) diff --git a/crates/support/src/fs/in_memory.rs b/crates/support/src/fs/in_memory.rs index d9e33b150..bb6edee06 100644 --- a/crates/support/src/fs/in_memory.rs +++ b/crates/support/src/fs/in_memory.rs @@ -1,6 +1,7 @@ -use std::{collections::HashMap, ffi::OsString, path::Path, str::FromStr, sync::Arc}; +use std::{collections::HashMap, ffi::OsString, path::Path, sync::Arc}; -use super::{FileSystem, FileSystemError, FileSystemResult}; +use super::{FileSystem, FileSystemResult}; +use anyhow::anyhow; use async_trait::async_trait; use tokio::sync::RwLock; @@ -31,10 +32,10 @@ impl FileSystem for InMemoryFileSystem { match self.files.read().await.get(os_path) { Some(InMemoryFile::File(_)) => { - Err(FileSystemError::FileAlreadyExists(os_path.to_owned()))? + Err(anyhow!("file {:?} already exists", os_path.to_owned(),))? }, Some(InMemoryFile::Directory) => { - Err(FileSystemError::DirectoryAlreadyExists(os_path.to_owned()))? + Err(anyhow!("directory {:?} already exists", os_path.to_owned(),))? }, None => {}, }; @@ -43,12 +44,11 @@ impl FileSystem for InMemoryFileSystem { while let Some(path) = ancestors.next() { match self.files.read().await.get(path.as_os_str()) { Some(InMemoryFile::Directory) => continue, - Some(InMemoryFile::File(_)) => Err(FileSystemError::AncestorNotDirectory( - path.as_os_str().to_owned(), - ))?, - None => Err(FileSystemError::AncestorDoesntExists( - path.as_os_str().to_owned(), + Some(InMemoryFile::File(_)) => Err(anyhow!( + "ancestor {:?} is not a directory", + path.as_os_str(), ))?, + None => Err(anyhow!("ancestor {:?} doesn't exists", path.as_os_str(),))?, }; } @@ -73,7 +73,8 @@ impl FileSystem for InMemoryFileSystem { while let Some(path) = ancestors.next() { match files.get(path.as_os_str()) { Some(InMemoryFile::Directory) => continue, - Some(InMemoryFile::File(_)) => Err(FileSystemError::AncestorNotDirectory( + Some(InMemoryFile::File(_)) => Err(anyhow!( + "ancestor {:?} is not a directory", path.as_os_str().to_owned(), ))?, None => files.insert(path.as_os_str().to_owned(), InMemoryFile::Directory), @@ -89,9 +90,9 @@ impl FileSystem for InMemoryFileSystem { match self.files.read().await.get(os_path) { Some(InMemoryFile::File(content)) => Ok(content.clone()), Some(InMemoryFile::Directory) => { - Err(FileSystemError::FileIsDirectory(os_path.to_owned())) + Err(anyhow!("file {:?} is a directory", os_path).into()) }, - None => Err(FileSystemError::FileNotFound(os_path.to_owned())), + None => Err(anyhow!("file {:?} not found", os_path).into()), } } @@ -99,7 +100,8 @@ impl FileSystem for InMemoryFileSystem { let os_path = path.as_ref().as_os_str().to_owned(); let content = self.read(path).await?; - String::from_utf8(content).map_err(|_| FileSystemError::InvalidUtf8FileEncoding(os_path)) + String::from_utf8(content) + .map_err(|_| anyhow!("invalid utf-8 encoding for file {:?}", os_path).into()) } async fn write( @@ -115,17 +117,16 @@ impl FileSystem for InMemoryFileSystem { while let Some(path) = ancestors.next() { match files.get(path.as_os_str()) { Some(InMemoryFile::Directory) => continue, - Some(InMemoryFile::File(_)) => Err(FileSystemError::AncestorNotDirectory( - path.as_os_str().to_owned(), - ))?, - None => Err(FileSystemError::AncestorDoesntExists( - path.as_os_str().to_owned(), + Some(InMemoryFile::File(_)) => Err(anyhow!( + "ancestor {:?} is not a directory", + path.as_os_str() ))?, + None => Err(anyhow!("ancestor {:?} doesn't exists", path.as_os_str()))?, }; } if let Some(InMemoryFile::Directory) = files.get(os_path) { - return Err(FileSystemError::FileIsDirectory(os_path.to_owned())); + return Err(anyhow!("file {:?} is a directory", os_path).into()); } files.insert( @@ -144,7 +145,9 @@ impl FileSystem for InMemoryFileSystem { let path = path.as_ref(); let mut existing_contents = match self.read(path).await { Ok(existing_contents) => existing_contents, - Err(FileSystemError::FileNotFound(_)) => vec![], + Err(err) if err.to_string() == format!("file {:?} not found", path.as_os_str()) => { + vec![] + }, Err(err) => Err(err)?, }; existing_contents.append(&mut contents.as_ref().to_vec()); @@ -165,6 +168,7 @@ impl FileSystem for InMemoryFileSystem { #[cfg(test)] mod tests { use super::*; + use std::str::FromStr; #[tokio::test] async fn create_dir_should_create_a_directory_at_root() { @@ -196,10 +200,7 @@ mod tests { let err = fs.create_dir("/dir").await.unwrap_err(); assert_eq!(fs.files.read().await.len(), 2); - assert!(matches!( - err, - FileSystemError::DirectoryAlreadyExists(path) if path == "/dir" - )); + assert_eq!(err.to_string(), "directory \"/dir\" already exists"); } #[tokio::test] @@ -215,10 +216,7 @@ mod tests { let err = fs.create_dir("/dir").await.unwrap_err(); assert_eq!(fs.files.read().await.len(), 2); - assert!(matches!( - err, - FileSystemError::FileAlreadyExists(path) if path == "/dir" - )); + assert_eq!(err.to_string(), "file \"/dir\" already exists"); } #[tokio::test] @@ -269,10 +267,7 @@ mod tests { let err = fs.create_dir("/path/to/my/dir").await.unwrap_err(); assert_eq!(fs.files.read().await.len(), 3); - assert!(matches!( - err, - FileSystemError::AncestorDoesntExists(path) if path == "/path/to/my" - )); + assert_eq!(err.to_string(), "ancestor \"/path/to/my\" doesn't exists"); } #[tokio::test] @@ -296,10 +291,7 @@ mod tests { let err = fs.create_dir("/path/to/my/dir").await.unwrap_err(); assert_eq!(fs.files.read().await.len(), 4); - assert!(matches!( - err, - FileSystemError::AncestorNotDirectory(path) if path == "/path" - )); + assert_eq!(err.to_string(), "ancestor \"/path\" is not a directory"); } #[tokio::test] @@ -399,10 +391,7 @@ mod tests { let err = fs.create_dir_all("/path/to/my/dir").await.unwrap_err(); assert_eq!(fs.files.read().await.len(), 3); - assert!(matches!( - err, - FileSystemError::AncestorNotDirectory(path) if path == "/path" - )); + assert_eq!(err.to_string(), "ancestor \"/path\" is not a directory"); } #[tokio::test] @@ -423,10 +412,7 @@ mod tests { let err = fs.read("/myfile").await.unwrap_err(); - assert!(matches!( - err, - FileSystemError::FileNotFound(path) if path == "/myfile" - )); + assert_eq!(err.to_string(), "file \"/myfile\" not found"); } #[tokio::test] @@ -438,10 +424,7 @@ mod tests { let err = fs.read("/myfile").await.unwrap_err(); - assert!(matches!( - err, - FileSystemError::FileIsDirectory(path) if path == "/myfile" - )); + assert_eq!(err.to_string(), "file \"/myfile\" is a directory"); } #[tokio::test] @@ -462,10 +445,7 @@ mod tests { let err = fs.read_to_string("/myfile").await.unwrap_err(); - assert!(matches!( - err, - FileSystemError::FileNotFound(path) if path == "/myfile" - )); + assert_eq!(err.to_string(), "file \"/myfile\" not found"); } #[tokio::test] @@ -477,10 +457,7 @@ mod tests { let err = fs.read_to_string("/myfile").await.unwrap_err(); - assert!(matches!( - err, - FileSystemError::FileIsDirectory(path) if path == "/myfile" - )); + assert_eq!(err.to_string(), "file \"/myfile\" is a directory"); } #[tokio::test] @@ -492,10 +469,10 @@ mod tests { let err = fs.read_to_string("/myfile").await.unwrap_err(); - assert!(matches!( - err, - FileSystemError::InvalidUtf8FileEncoding(path) if path == "/myfile" - )); + assert_eq!( + err.to_string(), + "invalid utf-8 encoding for file \"/myfile\"" + ); } #[tokio::test] @@ -552,7 +529,7 @@ mod tests { let err = fs.write("/myfile", "my file content").await.unwrap_err(); assert_eq!(fs.files.read().await.len(), 2); - assert!(matches!(err, FileSystemError::FileIsDirectory(path) if path == "/myfile")); + assert_eq!(err.to_string(), "file \"/myfile\" is a directory"); } #[tokio::test] @@ -571,7 +548,7 @@ mod tests { .unwrap_err(); assert_eq!(fs.files.read().await.len(), 2); - assert!(matches!(err, FileSystemError::AncestorDoesntExists(path) if path == "/path")); + assert_eq!(err.to_string(), "ancestor \"/path\" doesn't exists"); } #[tokio::test] @@ -594,7 +571,7 @@ mod tests { .unwrap_err(); assert_eq!(fs.files.read().await.len(), 3); - assert!(matches!(err, FileSystemError::AncestorNotDirectory(path) if path == "/path")); + assert_eq!(err.to_string(), "ancestor \"/path\" is not a directory"); } #[tokio::test] @@ -649,10 +626,7 @@ mod tests { let err = fs.append("/myfile", "my file content").await.unwrap_err(); - assert!(matches!( - err, - FileSystemError::FileIsDirectory(path) if path == "/myfile" - )); + assert_eq!(err.to_string(), "file \"/myfile\" is a directory"); } #[tokio::test] @@ -671,7 +645,7 @@ mod tests { .unwrap_err(); assert_eq!(fs.files.read().await.len(), 2); - assert!(matches!(err, FileSystemError::AncestorDoesntExists(path) if path == "/path")); + assert_eq!(err.to_string(), "ancestor \"/path\" doesn't exists"); } #[tokio::test] @@ -694,7 +668,7 @@ mod tests { .unwrap_err(); assert_eq!(fs.files.read().await.len(), 3); - assert!(matches!(err, FileSystemError::AncestorNotDirectory(path) if path == "/path")); + assert_eq!(err.to_string(), "ancestor \"/path\" is not a directory"); } #[tokio::test] @@ -746,10 +720,7 @@ mod tests { let err = fs.copy("/myfile", "/mfilecopy").await.unwrap_err(); - assert!(matches!( - err, - FileSystemError::FileNotFound(path) if path == "/myfile" - )); + assert_eq!(err.to_string(), "file \"/myfile\" not found"); } #[tokio::test] @@ -764,10 +735,7 @@ mod tests { let err = fs.copy("/myfile", "/mfilecopy").await.unwrap_err(); - assert!(matches!( - err, - FileSystemError::FileIsDirectory(path) if path == "/myfile" - )); + assert_eq!(err.to_string(), "file \"/myfile\" is a directory"); } #[tokio::test] @@ -786,10 +754,7 @@ mod tests { let err = fs.copy("/myfile", "/myfilecopy").await.unwrap_err(); - assert!(matches!( - err, - FileSystemError::FileIsDirectory(path) if path == "/myfilecopy" - )); + assert_eq!(err.to_string(), "file \"/myfilecopy\" is a directory"); } #[tokio::test] @@ -806,7 +771,7 @@ mod tests { let err = fs.copy("/myfile", "/somedir/myfilecopy").await.unwrap_err(); assert_eq!(fs.files.read().await.len(), 2); - assert!(matches!(err, FileSystemError::AncestorDoesntExists(path) if path == "/somedir")); + assert_eq!(err.to_string(), "ancestor \"/somedir\" doesn't exists"); } #[tokio::test] @@ -827,6 +792,6 @@ mod tests { let err = fs.copy("/myfile", "/mypath/myfilecopy").await.unwrap_err(); assert_eq!(fs.files.read().await.len(), 3); - assert!(matches!(err, FileSystemError::AncestorNotDirectory(path) if path == "/mypath")); + assert_eq!(err.to_string(), "ancestor \"/mypath\" is not a directory"); } } From 3491cbd48d94c7ca2b6dac97dbba77c8eb08c53d Mon Sep 17 00:00:00 2001 From: l0r1s Date: Fri, 1 Sep 2023 14:18:18 +0300 Subject: [PATCH 12/69] feat: added conversion between io::Error and FileSystemError --- crates/support/src/fs.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/support/src/fs.rs b/crates/support/src/fs.rs index 5d60ac7ea..3c6061d23 100644 --- a/crates/support/src/fs.rs +++ b/crates/support/src/fs.rs @@ -9,6 +9,12 @@ pub mod local; #[error(transparent)] pub struct FileSystemError(#[from] anyhow::Error); +impl From for FileSystemError { + fn from(error: std::io::Error) -> Self { + Self(error.into()) + } +} + pub type FileSystemResult = Result; #[async_trait] From 2a6158b4d0a4c7fc26b5c14a3e430c891433082f Mon Sep 17 00:00:00 2001 From: l0r1s Date: Fri, 1 Sep 2023 14:18:55 +0300 Subject: [PATCH 13/69] feat: added implementation of LocalFileSystem using tokio::fs --- crates/support/src/fs/local.rs | 65 ++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 crates/support/src/fs/local.rs diff --git a/crates/support/src/fs/local.rs b/crates/support/src/fs/local.rs new file mode 100644 index 000000000..8a5d031dc --- /dev/null +++ b/crates/support/src/fs/local.rs @@ -0,0 +1,65 @@ +use std::path::Path; +use tokio::io::AsyncWriteExt; + +use async_trait::async_trait; + +use super::{FileSystem, FileSystemError, FileSystemResult}; + +pub struct LocalFileSystem; + +#[async_trait] +impl FileSystem for LocalFileSystem { + async fn create_dir(&self, path: impl AsRef + Send) -> FileSystemResult<()> { + tokio::fs::create_dir(path).await.map_err(Into::into) + } + + async fn create_dir_all(&self, path: impl AsRef + Send) -> FileSystemResult<()> { + tokio::fs::create_dir_all(path).await.map_err(Into::into) + } + + async fn read(&self, path: impl AsRef + Send) -> FileSystemResult> { + tokio::fs::read(path).await.map_err(Into::into) + } + + async fn read_to_string(&self, path: impl AsRef + Send) -> FileSystemResult { + tokio::fs::read_to_string(path).await.map_err(Into::into) + } + + async fn write( + &self, + path: impl AsRef + Send, + contents: impl AsRef<[u8]> + Send, + ) -> FileSystemResult<()> { + tokio::fs::write(path, contents).await.map_err(Into::into) + } + + async fn append( + &self, + path: impl AsRef + Send, + contents: impl AsRef<[u8]> + Send, + ) -> FileSystemResult<()> { + let contents = contents.as_ref(); + let mut file = tokio::fs::OpenOptions::new() + .append(true) + .create(true) + .open(path) + .await + .map_err(Into::::into)?; + + file.write_all(contents) + .await + .and(Ok(())) + .map_err(Into::into) + } + + async fn copy( + &self, + from: impl AsRef + Send, + to: impl AsRef + Send, + ) -> FileSystemResult<()> { + tokio::fs::copy(from, to) + .await + .and(Ok(())) + .map_err(Into::into) + } +} From 5d5b1a9dc1864dd86733ce2e561a58eff0d8d07c Mon Sep 17 00:00:00 2001 From: l0r1s Date: Sat, 2 Sep 2023 17:05:25 +0300 Subject: [PATCH 14/69] feat: added nix crate dependency to workspace and provider crate --- Cargo.toml | 3 ++- crates/provider/Cargo.toml | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 6bc2ea379..ee1a26b97 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,4 +23,5 @@ regex = "1.8" lazy_static = "1.4" multiaddr = "0.18" url = "2.3" -uuid = "1.4" \ No newline at end of file +uuid = "1.4" +nix = "0.27" \ No newline at end of file diff --git a/crates/provider/Cargo.toml b/crates/provider/Cargo.toml index f6d7d5b91..6737df24e 100644 --- a/crates/provider/Cargo.toml +++ b/crates/provider/Cargo.toml @@ -23,3 +23,4 @@ tokio = { workspace = true, features = [ ] } thiserror = { workspace = true } uuid = { workspace = true, features = ["v4"] } +nix = { workspace = true, features = ["signal"] } From b820462884f4cf6e9ccd5912755742b1244e8bbd Mon Sep 17 00:00:00 2001 From: l0r1s Date: Sat, 2 Sep 2023 20:50:25 +0300 Subject: [PATCH 15/69] feat: added uuid with v4 as dev-dependencies for testing in support crate --- crates/support/Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/support/Cargo.toml b/crates/support/Cargo.toml index 58a3cfb6d..f660b7bf0 100644 --- a/crates/support/Cargo.toml +++ b/crates/support/Cargo.toml @@ -12,3 +12,6 @@ async-trait = { workspace = true } futures = { workspace = true } reqwest = { workspace = true } tokio = { workspace = true, features = ["full"] } + +[dev-dependencies] +uuid = { workspace = true, features = ["v4"] } From 5e8ad88415de064bab1ea2e4e72cd704491cb529 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Sat, 2 Sep 2023 20:51:00 +0300 Subject: [PATCH 16/69] feat: added unit tests for LocalFileSystem --- crates/support/src/fs/local.rs | 252 ++++++++++++++++++++++++++++++++- 1 file changed, 249 insertions(+), 3 deletions(-) diff --git a/crates/support/src/fs/local.rs b/crates/support/src/fs/local.rs index 8a5d031dc..179b7c557 100644 --- a/crates/support/src/fs/local.rs +++ b/crates/support/src/fs/local.rs @@ -5,6 +5,7 @@ use async_trait::async_trait; use super::{FileSystem, FileSystemError, FileSystemResult}; +#[derive(Default, Debug, Clone)] pub struct LocalFileSystem; #[async_trait] @@ -40,16 +41,17 @@ impl FileSystem for LocalFileSystem { ) -> FileSystemResult<()> { let contents = contents.as_ref(); let mut file = tokio::fs::OpenOptions::new() - .append(true) .create(true) + .append(true) .open(path) .await .map_err(Into::::into)?; file.write_all(contents) .await - .and(Ok(())) - .map_err(Into::into) + .map_err(Into::::into)?; + + file.flush().await.and(Ok(())).map_err(Into::into) } async fn copy( @@ -63,3 +65,247 @@ impl FileSystem for LocalFileSystem { .map_err(Into::into) } } + +#[cfg(test)] +mod tests { + use super::*; + use uuid::Uuid; + + fn setup() -> String { + let test_dir = format!("/tmp/unit_test_{}", Uuid::new_v4()); + std::fs::create_dir(&test_dir).unwrap(); + test_dir + } + + fn teardown(test_dir: String) { + std::fs::remove_dir_all(&test_dir).unwrap(); + } + + #[tokio::test] + async fn create_dir_should_create_a_new_directory_at_path() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + + let new_dir = format!("{test_dir}/mynewdir"); + fs.create_dir(&new_dir).await.unwrap(); + + let new_dir_path = Path::new(&new_dir); + assert!(new_dir_path.exists() && new_dir_path.is_dir()); + teardown(test_dir); + } + + #[tokio::test] + async fn create_dir_should_bubble_up_error_if_some_happens() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + + let new_dir = format!("{test_dir}/mynewdir"); + // intentionally create new dir before calling function to force error + std::fs::create_dir(&new_dir).unwrap(); + let err = fs.create_dir(&new_dir).await.unwrap_err(); + + assert_eq!(err.to_string(), "File exists (os error 17)"); + teardown(test_dir); + } + + #[tokio::test] + async fn create_dir_all_should_create_a_new_directory_and_all_of_it_ancestors_at_path() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + + let new_dir = format!("{test_dir}/the/path/to/mynewdir"); + fs.create_dir_all(&new_dir).await.unwrap(); + + let new_dir_path = Path::new(&new_dir); + assert!(new_dir_path.exists() && new_dir_path.is_dir()); + teardown(test_dir); + } + + #[tokio::test] + async fn create_dir_all_should_bubble_up_error_if_some_happens() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + + let new_dir = format!("{test_dir}/the/path/to/mynewdir"); + // intentionally create new file as ancestor before calling function to force error + std::fs::write(format!("{test_dir}/the"), b"test").unwrap(); + let err = fs.create_dir_all(&new_dir).await.unwrap_err(); + + assert_eq!(err.to_string(), "Not a directory (os error 20)"); + teardown(test_dir); + } + + #[tokio::test] + async fn read_should_return_the_contents_of_the_file_at_path() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + + let file_path = format!("{test_dir}/myfile"); + std::fs::write(&file_path, b"Test").unwrap(); + let contents = fs.read(file_path).await.unwrap(); + + assert_eq!(contents, b"Test"); + teardown(test_dir); + } + + #[tokio::test] + async fn read_should_bubble_up_error_if_some_happens() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + + let file_path = format!("{test_dir}/myfile"); + // intentionally forget to create file to force error + let err = fs.read(file_path).await.unwrap_err(); + + assert_eq!(err.to_string(), "No such file or directory (os error 2)"); + teardown(test_dir); + } + + #[tokio::test] + async fn read_to_string_should_return_the_contents_of_the_file_at_path_as_string() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + + let file_path = format!("{test_dir}/myfile"); + std::fs::write(&file_path, b"Test").unwrap(); + let contents = fs.read_to_string(file_path).await.unwrap(); + + assert_eq!(contents, "Test"); + teardown(test_dir); + } + + #[tokio::test] + async fn read_to_string_should_bubble_up_error_if_some_happens() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + + let file_path = format!("{test_dir}/myfile"); + // intentionally forget to create file to force error + let err = fs.read_to_string(file_path).await.unwrap_err(); + + assert_eq!(err.to_string(), "No such file or directory (os error 2)"); + teardown(test_dir); + } + + #[tokio::test] + async fn write_should_create_a_new_file_at_path_with_contents() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + + let file_path = format!("{test_dir}/myfile"); + fs.write(&file_path, "Test").await.unwrap(); + + assert_eq!(std::fs::read_to_string(file_path).unwrap(), "Test"); + teardown(test_dir); + } + + #[tokio::test] + async fn write_should_overwrite_an_existing_file_with_contents() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + + let file_path = format!("{test_dir}/myfile"); + std::fs::write(&file_path, "Test").unwrap(); + assert_eq!(std::fs::read_to_string(&file_path).unwrap(), "Test"); + fs.write(&file_path, "Test updated").await.unwrap(); + + assert_eq!(std::fs::read_to_string(file_path).unwrap(), "Test updated"); + teardown(test_dir); + } + + #[tokio::test] + async fn write_should_bubble_up_error_if_some_happens() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + + let file_path = format!("{test_dir}/myfile"); + // intentionally create directory instead of file to force error + std::fs::create_dir(&file_path).unwrap(); + let err = fs.write(&file_path, "Test").await.unwrap_err(); + + assert_eq!(err.to_string(), "Is a directory (os error 21)"); + teardown(test_dir); + } + + #[tokio::test] + async fn append_should_create_a_new_file_at_path_with_contents() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + + let file_path = format!("{test_dir}/myfile"); + fs.append(&file_path, "Test").await.unwrap(); + + assert_eq!(std::fs::read_to_string(file_path).unwrap(), "Test"); + teardown(test_dir); + } + + #[tokio::test] + async fn append_should_updates_an_existing_file_by_appending_contents() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + + let file_path = format!("{test_dir}/myfile"); + std::fs::write(&file_path, "Test").unwrap(); + assert_eq!(std::fs::read_to_string(&file_path).unwrap(), "Test"); + fs.append(&file_path, " updated").await.unwrap(); + + assert_eq!(std::fs::read_to_string(file_path).unwrap(), "Test updated"); + teardown(test_dir); + } + + #[tokio::test] + async fn append_should_bubble_up_error_if_some_happens() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + + let file_path = format!("{test_dir}/myfile"); + // intentionally create directory instead of file to force error + std::fs::create_dir(&file_path).unwrap(); + let err = fs.append(&file_path, "Test").await.unwrap_err(); + + assert_eq!(err.to_string(), "Is a directory (os error 21)"); + teardown(test_dir); + } + + #[tokio::test] + async fn copy_should_create_a_duplicate_of_source() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + + let from_path = format!("{test_dir}/myfile"); + std::fs::write(&from_path, "Test").unwrap(); + let to_path = format!("{test_dir}/mycopy"); + fs.copy(&from_path, &to_path).await.unwrap(); + + assert_eq!(std::fs::read_to_string(to_path).unwrap(), "Test"); + teardown(test_dir); + } + + #[tokio::test] + async fn copy_should_ovewrite_destination_if_alread_exists() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + + let from_path = format!("{test_dir}/myfile"); + std::fs::write(&from_path, "Test").unwrap(); + let to_path = format!("{test_dir}/mycopy"); + std::fs::write(&from_path, "Some content").unwrap(); + fs.copy(&from_path, &to_path).await.unwrap(); + + assert_eq!(std::fs::read_to_string(to_path).unwrap(), "Some content"); + teardown(test_dir); + } + + #[tokio::test] + async fn copy_should_bubble_up_error_if_some_happens() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + + let from_path = format!("{test_dir}/nonexistentfile"); + let to_path = format!("{test_dir}/mycopy"); + let err = fs.copy(&from_path, &to_path).await.unwrap_err(); + + assert_eq!(err.to_string(), "No such file or directory (os error 2)"); + teardown(test_dir); + } +} From 90a3edf560658e0b7aeef76372ffbcbf87e71b94 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Sun, 3 Sep 2023 21:27:58 +0300 Subject: [PATCH 17/69] feat: updated Provider, ProviderNamespace and ProviderNode traits and related DTO --- crates/provider/src/lib.rs | 58 +++++++++++--------------------------- 1 file changed, 16 insertions(+), 42 deletions(-) diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs index 65c8e3929..4a17faf54 100644 --- a/crates/provider/src/lib.rs +++ b/crates/provider/src/lib.rs @@ -2,7 +2,7 @@ mod errors; mod native; mod shared; -use std::{net::IpAddr, path::PathBuf, process::ExitStatus, sync::Arc}; +use std::{net::IpAddr, path::PathBuf, process::ExitStatus, sync::Arc, time::Duration}; use async_trait::async_trait; use tokio::sync::RwLock; @@ -12,7 +12,7 @@ use crate::{ shared::types::{FileMap, Port}, }; -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct ProviderCapabilities { pub requires_image: bool, } @@ -33,42 +33,20 @@ impl Default for CreateNamespaceOptions { } } -impl CreateNamespaceOptions { - pub fn new() -> Self { - Self::default() - } - - pub fn root_dir(mut self, root_dir: &str) -> Self { - self.root_dir = root_dir.to_string(); - self - } - - pub fn config_dir(mut self, config_dir: &str) -> Self { - self.config_dir = config_dir.to_string(); - self - } - - pub fn data_dir(mut self, data_dir: &str) -> Self { - self.data_dir = data_dir.to_string(); - self - } -} - #[async_trait] pub trait Provider { - fn capabilities(&self) -> &ProviderCapabilities; - async fn create_namespace( - &self, - options: Option, - ) -> Result; + async fn capabilities(&self) -> ProviderCapabilities; + async fn create_namespace(&self) -> Result; // TODO(team): Do we need at this point to handle cleanner/pod-monitor? } -pub type DynProvider = Arc>; +pub type DynProvider = Arc; pub struct SpawnNodeOptions { pub name: String, - pub node: (), + pub command: String, + pub args: Vec, + pub env: Vec<(String, String)>, // Files to inject, `before` we run the provider command. pub files_inject: Vec, // TODO: keystore logic should live in the orchestrator @@ -86,9 +64,9 @@ pub struct SpawnTempOptions { #[async_trait] pub trait ProviderNamespace { - fn id(&self) -> &str; + async fn id(&self) -> String; /// Spawn a long live node/process. - async fn spawn_node(&self, options: SpawnNodeOptions) -> Result<(), ProviderError>; + async fn spawn_node(&self, options: SpawnNodeOptions) -> Result; /// Spawn a temporary node, will be shutdown after `get` the desired files or output. async fn spawn_temp(&self, options: SpawnTempOptions) -> Result<(), ProviderError>; /// Destroy namespace (and inner resources). @@ -96,7 +74,7 @@ pub trait ProviderNamespace { async fn static_setup(&self) -> Result<(), ProviderError>; } -pub type DynNamespace = Arc>; +pub type DynNamespace = Arc; pub struct RunCommandOptions { pub args: Vec, @@ -113,7 +91,7 @@ type ExecutionResult = Result)>; #[async_trait] pub trait ProviderNode { - fn name(&self) -> &str; + async fn name(&self) -> String; async fn endpoint(&self) -> Result<(IpAddr, Port), ProviderError>; @@ -137,17 +115,13 @@ pub trait ProviderNode { local_dest: PathBuf, ) -> Result<(), ProviderError>; - async fn pause(&self, node_name: &str) -> Result<(), ProviderError>; + async fn pause(&self) -> Result<(), ProviderError>; - async fn resume(&self, node_name: &str) -> Result<(), ProviderError>; + async fn resume(&self) -> Result<(), ProviderError>; - async fn restart( - &mut self, - node_name: &str, - after_sec: Option, - ) -> Result; + async fn restart(&mut self, after: Option) -> Result<(), ProviderError>; async fn destroy(&self) -> Result<(), ProviderError>; } -pub type DynNode = Arc>; +pub type DynNode = Arc; From 8cf4051071106d37fa7368ac3f403533ce873b3e Mon Sep 17 00:00:00 2001 From: l0r1s Date: Sun, 3 Sep 2023 21:28:36 +0300 Subject: [PATCH 18/69] feat: added anyhow as dependency to provider crate --- crates/provider/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/provider/Cargo.toml b/crates/provider/Cargo.toml index 6737df24e..895a93a00 100644 --- a/crates/provider/Cargo.toml +++ b/crates/provider/Cargo.toml @@ -22,5 +22,6 @@ tokio = { workspace = true, features = [ "rt", ] } thiserror = { workspace = true } +anyhow = { workspace = true } uuid = { workspace = true, features = ["v4"] } nix = { workspace = true, features = ["signal"] } From 26514ffef5feaaa804fb4bec75ee4aeed4bda2bd Mon Sep 17 00:00:00 2001 From: l0r1s Date: Sun, 3 Sep 2023 21:29:08 +0300 Subject: [PATCH 19/69] feat: updated ProviderError --- crates/provider/src/errors.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/crates/provider/src/errors.rs b/crates/provider/src/errors.rs index 51bb05cb7..f63a290d0 100644 --- a/crates/provider/src/errors.rs +++ b/crates/provider/src/errors.rs @@ -1,5 +1,7 @@ //! Zombienet Provider error definitions. +use support::fs::FileSystemError; + macro_rules! from_error { ($type:ty, $target:ident, $targetvar:expr) => { impl From<$type> for $target { @@ -13,8 +15,8 @@ macro_rules! from_error { #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum ProviderError { - #[error("Namespace ID already exists: {0}")] - ConflictingNamespaceId(String), + #[error("Failed to spawn node '{0}': {1}")] + NodeSpawningFailed(String, anyhow::Error), #[error("Invalid network configuration field {0}")] InvalidConfig(String), @@ -30,7 +32,7 @@ pub enum ProviderError { NodeNotReady(String), // FSErrors are implemented in the associated type #[error(transparent)] - FSError(Box), + FSError(#[from] FileSystemError), // From serde errors #[error("Serialization error")] SerializationError(serde_json::Error), From 9e4b3f19de2ce09f3bec6d9e309024200df1ae5a Mon Sep 17 00:00:00 2001 From: l0r1s Date: Sun, 3 Sep 2023 21:30:24 +0300 Subject: [PATCH 20/69] feat: work on NativeProvider with individual Node, Namespace and Provider struct threadsafe using RwLock and Arc, added implementation logic of resume/restart/pause, destroy and helpers for logs --- crates/provider/src/native.rs | 478 +++++++++++++++++++++++----------- 1 file changed, 320 insertions(+), 158 deletions(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index f9749f4ac..0473246be 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -2,30 +2,37 @@ use std::{ self, collections::HashMap, fmt::Debug, + io::Error, net::IpAddr, - path::{Path, PathBuf}, + path::PathBuf, + process::Stdio, sync::{Arc, Weak}, }; use async_trait::async_trait; use configuration::types::Port; +use nix::{ + sys::signal::{kill, Signal}, + unistd::Pid, +}; use support::fs::FileSystem; use tokio::{ + io::{AsyncRead, AsyncReadExt, BufReader}, process::{Child, Command}, - sync::RwLock, + sync::{ + mpsc::{self, Receiver, Sender}, + RwLock, + }, + task::JoinHandle, time::{sleep, Duration}, }; use uuid::Uuid; use crate::{ errors::ProviderError, - shared::{ - constants::{DEFAULT_DATA_DIR, DEFAULT_REMOTE_DIR, LOCALHOST, P2P_PORT}, - types::{FileMap, NativeRunCommandOptions, Process, RunCommandResponse}, - }, - CreateNamespaceOptions, DynNamespace, ExecutionResult, Provider, ProviderCapabilities, - ProviderNamespace, ProviderNode, RunCommandOptions, RunScriptOptions, SpawnNodeOptions, - SpawnTempOptions, + shared::constants::{DEFAULT_TMP_DIR, NODE_CONFIG_DIR, NODE_DATA_DIR}, + DynNamespace, DynNode, ExecutionResult, Provider, ProviderCapabilities, ProviderNamespace, + ProviderNode, RunCommandOptions, RunScriptOptions, SpawnNodeOptions, SpawnTempOptions, }; pub struct NativeProviderOptions @@ -33,220 +40,405 @@ where FS: FileSystem + Send + Sync, { filesystem: FS, + tmp_dir: Option, } -#[derive(Clone)] -pub struct NativeProvider -where - FS: FileSystem + Send + Sync, -{ +#[derive(Debug)] +struct NativeProviderInner { capabilities: ProviderCapabilities, - namespaces: Arc>>>>>, + tmp_dir: String, + namespaces: HashMap>, filesystem: FS, - weak: Weak, } -impl NativeProvider -where - FS: FileSystem + Send + Sync, -{ - pub fn new(options: NativeProviderOptions) -> Arc { - Arc::new_cyclic(|weak| Self { - capabilities: ProviderCapabilities { - requires_image: false, - }, - filesystem: options.filesystem, - namespaces: Default::default(), - weak: weak.clone(), - }) +#[derive(Debug, Clone)] +pub struct NativeProvider { + inner: Arc>>, +} + +#[derive(Debug, Clone)] +struct WeakNativeProvider { + inner: Weak>>, +} + +impl NativeProvider { + pub fn new(options: NativeProviderOptions) -> Self { + NativeProvider { + inner: Arc::new(RwLock::new(NativeProviderInner { + capabilities: ProviderCapabilities { + requires_image: false, + }, + tmp_dir: options.tmp_dir.unwrap_or(DEFAULT_TMP_DIR.to_string()), + namespaces: Default::default(), + filesystem: options.filesystem, + })), + } } } #[async_trait] -impl Provider for Arc> -where - FS: FileSystem + Send + Sync + Clone + 'static, -{ - fn capabilities(&self) -> &ProviderCapabilities { - &self.capabilities +impl Provider for NativeProvider { + async fn capabilities(&self) -> ProviderCapabilities { + self.inner.read().await.capabilities.clone() } - async fn create_namespace( - &self, - options: Option, - ) -> Result { - let options = options.unwrap_or(CreateNamespaceOptions::new()); + async fn create_namespace(&self) -> Result { let id = format!("zombie_{}", Uuid::new_v4()); - let mut namespaces = self.namespaces.write().await; + let mut inner = self.inner.write().await; - if namespaces.contains_key(&id) { - return Err(ProviderError::ConflictingNamespaceId(id)); - } + let base_dir = format!("{}/{}", inner.tmp_dir, &id); + inner.filesystem.create_dir(&base_dir).await.unwrap(); - let base_dir = format!("{}/{}", &options.root_dir, &id); - let config_dir = format!("{}/{}", &base_dir, &options.config_dir); - let data_dir = format!("{}/{}", &base_dir, &options.data_dir); - - // self.filesystem.create_dir(&config_dir).await.unwrap(); - // self.filesystem.create_dir(&data_dir).await.unwrap(); - - let namespace = Arc::new_cyclic(|weak| { - RwLock::new(NativeNamespace { + let namespace = NativeNamespace { + inner: Arc::new(RwLock::new(NativeNamespaceInner { id: id.clone(), - config_dir, - data_dir, + base_dir, nodes: Default::default(), - filesystem: self.filesystem.clone(), - provider: self.weak.clone(), - weak: weak.clone(), - }) - }); + filesystem: inner.filesystem.clone(), + provider: WeakNativeProvider { + inner: Arc::downgrade(&self.inner), + }, + })), + }; - namespaces.insert(id, namespace.clone()); + inner.namespaces.insert(id, namespace.clone()); - Ok(namespace) + Ok(Arc::new(namespace)) } } -#[derive(Debug, Clone)] -pub struct NativeNamespace -where - FS: FileSystem + Send + Sync, -{ +#[derive(Debug)] +struct NativeNamespaceInner { id: String, - config_dir: String, - data_dir: String, - nodes: HashMap>>>, + base_dir: String, + nodes: HashMap>, filesystem: FS, - provider: Weak>, - weak: Weak>, + provider: WeakNativeProvider, +} + +#[derive(Debug, Clone)] +pub struct NativeNamespace { + inner: Arc>>, +} + +#[derive(Debug, Clone)] +struct WeakNativeNamespace { + inner: Weak>>, } #[async_trait] -impl ProviderNamespace for NativeNamespace -where - FS: FileSystem + Send + Sync, -{ - fn id(&self) -> &str { - &self.id +impl ProviderNamespace for NativeNamespace { + async fn id(&self) -> String { + self.inner.read().await.id.clone() } - async fn spawn_node(&self, options: SpawnNodeOptions) -> Result<(), ProviderError> { - Err(ProviderError::DuplicatedNodeName("test".to_string())) + async fn spawn_node(&self, options: SpawnNodeOptions) -> Result { + let mut inner = self.inner.write().await; + + // create node directories and filepaths + let base_dir = format!("{}/{}", &inner.base_dir, &options.name); + let log_path = format!("{}/{}.log", &base_dir, &options.name); + let config_dir = format!("{}{}", &base_dir, NODE_CONFIG_DIR); + let data_dir = format!("{}{}", &base_dir, NODE_DATA_DIR); + inner.filesystem.create_dir(&base_dir).await.unwrap(); + inner.filesystem.create_dir(&config_dir).await.unwrap(); + inner.filesystem.create_dir(&data_dir).await.unwrap(); + + let (process, stdout_reading_handle, stderr_reading_handle, log_writing_handle) = + create_process_with_log_tasks( + &options.name, + &options.command, + &options.args, + &options.env, + &log_path, + inner.filesystem.clone(), + )?; + + // create node structure holding state + let node = NativeNode { + inner: Arc::new(RwLock::new(NativeNodeInner { + name: options.name.clone(), + command: options.command, + args: options.args, + env: options.env, + log_path, + process, + stdout_reading_handle, + stderr_reading_handle, + log_writing_handle, + filesystem: inner.filesystem.clone(), + namespace: WeakNativeNamespace { + inner: Arc::downgrade(&self.inner), + }, + })), + }; + + // store node inside namespace + inner.nodes.insert(options.name, node.clone()); + + Ok(Arc::new(node)) } - async fn spawn_temp(&self, options: SpawnTempOptions) -> Result<(), ProviderError> { - Err(ProviderError::DuplicatedNodeName("test".to_string())) + async fn spawn_temp(&self, _options: SpawnTempOptions) -> Result<(), ProviderError> { + todo!() } async fn static_setup(&self) -> Result<(), ProviderError> { - Err(ProviderError::DuplicatedNodeName("test".to_string())) + todo!() } async fn destroy(&self) -> Result<(), ProviderError> { + // we need to clone nodes (behind an Arc, so cheaply) to avoid deadlock between the inner.write lock and the node.destroy + // method acquiring a lock the namespace to remove the node from the nodes hashmap. let nodes = self + .inner + .write() + .await .nodes .iter() .map(|(_, node)| node.clone()) - .collect::>>>>(); + .collect::>>(); + + for node in nodes.iter() { + node.destroy().await?; + } - for node in nodes { - node.read().await.destroy(); + // remove namespace from provider + let inner = self.inner.write().await; + if let Some(provider) = inner.provider.inner.upgrade() { + provider.write().await.namespaces.remove(&inner.id); } - Err(ProviderError::DuplicatedNodeName("test".to_string())) + Ok(()) } } -#[derive(Debug, Clone)] -pub struct NativeNode -where - FS: FileSystem + Send + Sync, -{ +#[derive(Debug)] +struct NativeNodeInner { name: String, + command: String, + args: Vec, + env: Vec<(String, String)>, + log_path: String, + process: Child, + stdout_reading_handle: JoinHandle<()>, + stderr_reading_handle: JoinHandle<()>, + log_writing_handle: JoinHandle<()>, filesystem: FS, - namespace: Weak>>, + namespace: WeakNativeNamespace, +} + +impl NativeNodeInner {} + +#[derive(Debug, Clone)] +struct NativeNode { + inner: Arc>>, } #[async_trait] -impl ProviderNode for NativeNode -where - T: FileSystem + Send + Sync, -{ - fn name(&self) -> &str { - "" +impl ProviderNode for NativeNode { + async fn name(&self) -> String { + self.inner.read().await.name.clone() } async fn endpoint(&self) -> Result<(IpAddr, Port), ProviderError> { - Err(ProviderError::DuplicatedNodeName("test".to_string())) + todo!(); } - async fn mapped_port(&self, port: Port) -> Result { - Err(ProviderError::DuplicatedNodeName("test".to_string())) + async fn mapped_port(&self, _port: Port) -> Result { + todo!() } async fn logs(&self) -> Result { - Err(ProviderError::DuplicatedNodeName("test".to_string())) + let inner = self.inner.read().await; + Ok(inner.filesystem.read_to_string(&inner.log_path).await?) } async fn dump_logs(&self, dest: PathBuf) -> Result<(), ProviderError> { - Err(ProviderError::DuplicatedNodeName("test".to_string())) + let logs = self.logs().await?; + Ok(self + .inner + .write() + .await + .filesystem + .write(dest, logs.as_bytes()) + .await?) } async fn run_command( &self, - options: RunCommandOptions, + _options: RunCommandOptions, ) -> Result { - Err(ProviderError::DuplicatedNodeName("test".to_string())) + todo!() } async fn run_script( &self, - options: RunScriptOptions, + _options: RunScriptOptions, ) -> Result { - Err(ProviderError::DuplicatedNodeName("test".to_string())) + todo!() } async fn copy_file_from_node( &self, - remote_src: PathBuf, - local_dest: PathBuf, + _remote_src: PathBuf, + _local_dest: PathBuf, ) -> Result<(), ProviderError> { - Ok(()) + todo!() } - async fn pause(&self, node_name: &str) -> Result<(), ProviderError> { + async fn pause(&self) -> Result<(), ProviderError> { + let inner = self.inner.write().await; + let raw_pid = inner.process.id().unwrap(); + let pid = Pid::from_raw(raw_pid.try_into().unwrap()); + + kill(pid, Signal::SIGSTOP).unwrap(); + Ok(()) } - async fn resume(&self, node_name: &str) -> Result<(), ProviderError> { + async fn resume(&self) -> Result<(), ProviderError> { + let inner = self.inner.write().await; + let raw_pid = inner.process.id().unwrap(); + let pid = Pid::from_raw(raw_pid.try_into().unwrap()); + + kill(pid, Signal::SIGCONT).unwrap(); + Ok(()) } - async fn restart( - &mut self, - node_name: &str, - after_sec: Option, - ) -> Result { - Ok(false) + async fn restart(&mut self, after: Option) -> Result<(), ProviderError> { + if let Some(duration) = after { + sleep(duration).await; + } + + let mut inner = self.inner.write().await; + + // abort all task handlers and kill process + inner.log_writing_handle.abort(); + inner.stdout_reading_handle.abort(); + inner.stderr_reading_handle.abort(); + inner.process.kill().await.unwrap(); + + // re-spawn process with tasks for logs + let (process, stdout_reading_handle, stderr_reading_handle, log_writing_handle) = + create_process_with_log_tasks( + &inner.name, + &inner.command, + &inner.args, + &inner.env, + &inner.log_path, + inner.filesystem.clone(), + )?; + + // update node process and handlers + inner.process = process; + inner.stderr_reading_handle = stdout_reading_handle; + inner.stderr_reading_handle = stderr_reading_handle; + inner.log_writing_handle = log_writing_handle; + + Ok(()) } async fn destroy(&self) -> Result<(), ProviderError> { - self.namespace - .upgrade() - .expect("node should be destroyed if namespace is dropped") - .write() - .await - .nodes - .remove(&self.name); + let mut inner = self.inner.write().await; + + inner.log_writing_handle.abort(); + inner.stdout_reading_handle.abort(); + inner.stderr_reading_handle.abort(); + inner.process.kill().await.unwrap(); + + if let Some(namespace) = inner.namespace.inner.upgrade() { + namespace.write().await.nodes.remove(&inner.name); + } + Ok(()) } } +fn create_stream_polling_task( + stream: impl AsyncRead + Unpin + Send + 'static, + tx: Sender, Error>>, +) -> JoinHandle<()> { + tokio::spawn(async move { + let mut reader = BufReader::new(stream); + let mut buffer = vec![0u8; 1024]; + + loop { + match reader.read(&mut buffer).await { + Ok(0) => { + let _ = tx.send(Ok(Vec::new())).await; + break; + }, + Ok(n) => { + let _ = tx.send(Ok(buffer[..n].to_vec())).await; + }, + Err(e) => { + let _ = tx.send(Err(e)).await; + break; + }, + } + } + }) +} + +fn create_log_writing_task( + mut rx: Receiver, Error>>, + filesystem: impl FileSystem + Send + Sync + 'static, + log_path: String, +) -> JoinHandle<()> { + tokio::spawn(async move { + loop { + sleep(Duration::from_millis(250)).await; + while let Some(Ok(data)) = rx.recv().await { + filesystem.append(&log_path, data).await.unwrap(); + } + } + }) +} + +fn create_process_with_log_tasks( + name: &str, + command: &str, + args: &[String], + env: &[(String, String)], + log_path: &str, + filesystem: impl FileSystem + Send + Sync + 'static, +) -> Result<(Child, JoinHandle<()>, JoinHandle<()>, JoinHandle<()>), ProviderError> { + // create process + let mut process = Command::new(command) + .args(args) + .envs(env.to_owned()) + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .kill_on_drop(true) + .spawn() + .map_err(|err| ProviderError::NodeSpawningFailed(name.to_string(), err.into()))?; + let stdout = process.stdout.take().expect("infaillible, stdout is piped"); + let stderr = process.stderr.take().expect("Infaillible, stderr is piped"); + + // create additonnal long-running tasks for logs + let (stdout_tx, rx) = mpsc::channel(10); + let stderr_tx = stdout_tx.clone(); + let stdout_reading_handle = create_stream_polling_task(stdout, stdout_tx); + let stderr_reading_handle = create_stream_polling_task(stderr, stderr_tx); + let log_writing_handle = create_log_writing_task(rx, filesystem, log_path.to_owned()); + + Ok(( + process, + stdout_reading_handle, + stderr_reading_handle, + log_writing_handle, + )) +} + #[cfg(test)] mod tests { - #[tokio::test] - async fn it_should_works() {} + #[tokio::test(flavor = "multi_thread", worker_threads = 8)] + async fn it_should_works() { + todo!(); + } } // #[derive(Debug, Clone, PartialEq)] @@ -533,36 +725,6 @@ mod tests { // Ok(format!("tail -f {}/{}.log", self.tmp_dir, name)) // } -// // TODO: Add test -// async fn pause(&self, node_name: &str) -> Result<(), ProviderError> { -// let process = self.get_process_by_node_name(node_name)?; - -// let _ = self -// .run_command( -// vec![format!("kill -STOP {}", process.pid)], -// NativeRunCommandOptions { -// is_failure_allowed: true, -// }, -// ) -// .await?; -// Ok(()) -// } - -// // TODO: Add test -// async fn resume(&self, node_name: &str) -> Result<(), ProviderError> { -// let process = self.get_process_by_node_name(node_name)?; - -// let _ = self -// .run_command( -// vec![format!("kill -CONT {}", process.pid)], -// NativeRunCommandOptions { -// is_failure_allowed: true, -// }, -// ) -// .await?; -// Ok(()) -// } - // // TODO: Add test // async fn restart( // &mut self, From 284c7912989d595a1a8e7881533c12c336c727f8 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Sun, 3 Sep 2023 21:30:57 +0300 Subject: [PATCH 21/69] feat: rename some provider constants --- crates/provider/src/shared/constants.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/crates/provider/src/shared/constants.rs b/crates/provider/src/shared/constants.rs index 2fab0358f..2a6e3f23e 100644 --- a/crates/provider/src/shared/constants.rs +++ b/crates/provider/src/shared/constants.rs @@ -1,13 +1,15 @@ use std::net::{IpAddr, Ipv4Addr}; -/// Default dir for configuration inside pods -pub const DEFAULT_REMOTE_DIR: &str = "/cfg"; -/// Default dir for node /data -pub const DEFAULT_DATA_DIR: &str = "/data"; +/// Default temporary directory +pub const DEFAULT_TMP_DIR: &str = "/tmp"; +/// Directory for node configuration +pub const NODE_CONFIG_DIR: &str = "/cfg"; +/// Directory for node configuration +pub const NODE_DATA_DIR: &str = "/data"; /// Localhost ip -pub const LOCALHOST: IpAddr = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); +pub const _LOCALHOST: IpAddr = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); /// The port substrate listens for p2p connections on -pub const P2P_PORT: u16 = 30333; +pub const _P2P_PORT: u16 = 30333; /// The remote port prometheus can be accessed with pub const _PROMETHEUS_PORT: u16 = 9615; /// The remote port websocket to access the RPC From 9b27a2a6c3c16c4fe00d4f1cbb36a828dce51acc Mon Sep 17 00:00:00 2001 From: l0r1s Date: Wed, 6 Sep 2023 20:20:53 +0300 Subject: [PATCH 22/69] feat: added uuid with v4 features as normal dependency --- crates/support/Cargo.toml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/support/Cargo.toml b/crates/support/Cargo.toml index f660b7bf0..3d671bd6a 100644 --- a/crates/support/Cargo.toml +++ b/crates/support/Cargo.toml @@ -12,6 +12,4 @@ async-trait = { workspace = true } futures = { workspace = true } reqwest = { workspace = true } tokio = { workspace = true, features = ["full"] } - -[dev-dependencies] -uuid = { workspace = true, features = ["v4"] } +uuid = { workspace = true, features = ["v4"] } \ No newline at end of file From 368db2a9d1b4074775cdafa7ffef21c6ab16b42f Mon Sep 17 00:00:00 2001 From: l0r1s Date: Wed, 6 Sep 2023 20:21:39 +0300 Subject: [PATCH 23/69] feat: added new set_mode method on FileSystem trait to modify permissions bits, added implementations and tests --- crates/support/src/fs.rs | 2 + crates/support/src/fs/in_memory.rs | 308 ++++++++++++++++------------- crates/support/src/fs/local.rs | 81 +++++++- 3 files changed, 255 insertions(+), 136 deletions(-) diff --git a/crates/support/src/fs.rs b/crates/support/src/fs.rs index 3c6061d23..6afb60c22 100644 --- a/crates/support/src/fs.rs +++ b/crates/support/src/fs.rs @@ -44,4 +44,6 @@ pub trait FileSystem { from: impl AsRef + Send, to: impl AsRef + Send, ) -> FileSystemResult<()>; + + async fn set_mode(&self, path: impl AsRef + Send, perm: u32) -> FileSystemResult<()>; } diff --git a/crates/support/src/fs/in_memory.rs b/crates/support/src/fs/in_memory.rs index bb6edee06..6a43e3d95 100644 --- a/crates/support/src/fs/in_memory.rs +++ b/crates/support/src/fs/in_memory.rs @@ -7,8 +7,21 @@ use tokio::sync::RwLock; #[derive(Debug, Clone)] pub enum InMemoryFile { - File(Vec), - Directory, + File { mode: u32, contents: Vec }, + Directory { mode: u32 }, +} + +impl InMemoryFile { + fn file(contents: Vec) -> Self { + Self::File { + mode: 0o664, + contents, + } + } + + fn dir() -> Self { + Self::Directory { mode: 0o775 } + } } #[derive(Default, Debug, Clone)] @@ -31,10 +44,10 @@ impl FileSystem for InMemoryFileSystem { let os_path = path.as_os_str(); match self.files.read().await.get(os_path) { - Some(InMemoryFile::File(_)) => { + Some(InMemoryFile::File { .. }) => { Err(anyhow!("file {:?} already exists", os_path.to_owned(),))? }, - Some(InMemoryFile::Directory) => { + Some(InMemoryFile::Directory { .. }) => { Err(anyhow!("directory {:?} already exists", os_path.to_owned(),))? }, None => {}, @@ -43,8 +56,8 @@ impl FileSystem for InMemoryFileSystem { let mut ancestors = path.ancestors().skip(1); while let Some(path) = ancestors.next() { match self.files.read().await.get(path.as_os_str()) { - Some(InMemoryFile::Directory) => continue, - Some(InMemoryFile::File(_)) => Err(anyhow!( + Some(InMemoryFile::Directory { .. }) => continue, + Some(InMemoryFile::File { .. }) => Err(anyhow!( "ancestor {:?} is not a directory", path.as_os_str(), ))?, @@ -55,7 +68,7 @@ impl FileSystem for InMemoryFileSystem { self.files .write() .await - .insert(os_path.to_owned(), InMemoryFile::Directory); + .insert(os_path.to_owned(), InMemoryFile::dir()); Ok(()) } @@ -72,12 +85,12 @@ impl FileSystem for InMemoryFileSystem { while let Some(path) = ancestors.next() { match files.get(path.as_os_str()) { - Some(InMemoryFile::Directory) => continue, - Some(InMemoryFile::File(_)) => Err(anyhow!( + Some(InMemoryFile::Directory { .. }) => continue, + Some(InMemoryFile::File { .. }) => Err(anyhow!( "ancestor {:?} is not a directory", path.as_os_str().to_owned(), ))?, - None => files.insert(path.as_os_str().to_owned(), InMemoryFile::Directory), + None => files.insert(path.as_os_str().to_owned(), InMemoryFile::dir()), }; } @@ -88,8 +101,8 @@ impl FileSystem for InMemoryFileSystem { let os_path = path.as_ref().as_os_str(); match self.files.read().await.get(os_path) { - Some(InMemoryFile::File(content)) => Ok(content.clone()), - Some(InMemoryFile::Directory) => { + Some(InMemoryFile::File { contents, .. }) => Ok(contents.clone()), + Some(InMemoryFile::Directory { .. }) => { Err(anyhow!("file {:?} is a directory", os_path).into()) }, None => Err(anyhow!("file {:?} not found", os_path).into()), @@ -116,8 +129,8 @@ impl FileSystem for InMemoryFileSystem { let mut ancestors = path.ancestors().skip(1); while let Some(path) = ancestors.next() { match files.get(path.as_os_str()) { - Some(InMemoryFile::Directory) => continue, - Some(InMemoryFile::File(_)) => Err(anyhow!( + Some(InMemoryFile::Directory { .. }) => continue, + Some(InMemoryFile::File { .. }) => Err(anyhow!( "ancestor {:?} is not a directory", path.as_os_str() ))?, @@ -125,13 +138,13 @@ impl FileSystem for InMemoryFileSystem { }; } - if let Some(InMemoryFile::Directory) = files.get(os_path) { + if let Some(InMemoryFile::Directory { .. }) = files.get(os_path) { return Err(anyhow!("file {:?} is a directory", os_path).into()); } files.insert( os_path.to_owned(), - InMemoryFile::File(contents.as_ref().to_vec()), + InMemoryFile::file(contents.as_ref().to_vec()), ); Ok(()) @@ -163,6 +176,23 @@ impl FileSystem for InMemoryFileSystem { let content = self.read(from).await?; self.write(to, content).await } + + async fn set_mode(&self, path: impl AsRef + Send, mode: u32) -> FileSystemResult<()> { + let os_path = path.as_ref().as_os_str(); + if let Some(file) = self.files.write().await.get_mut(os_path) { + match file { + InMemoryFile::File { mode: old_mode, .. } => { + *old_mode = mode; + }, + InMemoryFile::Directory { mode: old_mode, .. } => { + *old_mode = mode; + }, + }; + Ok(()) + } else { + Err(anyhow!("file {:?} not found", os_path).into()) + } + } } #[cfg(test)] @@ -174,7 +204,7 @@ mod tests { async fn create_dir_should_create_a_directory_at_root() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/").unwrap(), - InMemoryFile::Directory, + InMemoryFile::dir(), )])); fs.create_dir("/dir").await.unwrap(); @@ -186,15 +216,15 @@ mod tests { .await .get(&OsString::from_str("/dir").unwrap()) .unwrap(), - InMemoryFile::Directory + InMemoryFile::Directory { mode } if *mode == 0o775 )); } #[tokio::test] async fn create_dir_should_return_an_error_if_directory_already_exists() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), - (OsString::from_str("/dir").unwrap(), InMemoryFile::Directory), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/dir").unwrap(), InMemoryFile::dir()), ])); let err = fs.create_dir("/dir").await.unwrap_err(); @@ -206,10 +236,10 @@ mod tests { #[tokio::test] async fn create_dir_should_return_an_error_if_file_already_exists() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/dir").unwrap(), - InMemoryFile::File(vec![]), + InMemoryFile::file(vec![]), ), ])); @@ -222,18 +252,12 @@ mod tests { #[tokio::test] async fn create_dir_should_create_a_directory_if_all_ancestors_exist() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), - ( - OsString::from_str("/path").unwrap(), - InMemoryFile::Directory, - ), - ( - OsString::from_str("/path/to").unwrap(), - InMemoryFile::Directory, - ), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/path/to/my").unwrap(), - InMemoryFile::Directory, + InMemoryFile::dir(), ), ])); @@ -246,22 +270,16 @@ mod tests { .await .get(&OsString::from_str("/path/to/my/dir").unwrap()) .unwrap(), - InMemoryFile::Directory + InMemoryFile::Directory { mode} if *mode == 0o775 )); } #[tokio::test] async fn create_dir_should_return_an_error_if_some_directory_ancestor_doesnt_exists() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), - ( - OsString::from_str("/path").unwrap(), - InMemoryFile::Directory, - ), - ( - OsString::from_str("/path/to").unwrap(), - InMemoryFile::Directory, - ), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), ])); let err = fs.create_dir("/path/to/my/dir").await.unwrap_err(); @@ -273,18 +291,15 @@ mod tests { #[tokio::test] async fn create_dir_should_return_an_error_if_some_ancestor_is_not_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/path").unwrap(), - InMemoryFile::File(vec![]), - ), - ( - OsString::from_str("/path/to").unwrap(), - InMemoryFile::Directory, + InMemoryFile::file(vec![]), ), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/path/to/my").unwrap(), - InMemoryFile::Directory, + InMemoryFile::dir(), ), ])); @@ -298,7 +313,7 @@ mod tests { async fn create_dir_all_should_create_a_directory_and_all_its_ancestors_if_they_dont_exist() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/").unwrap(), - InMemoryFile::Directory, + InMemoryFile::dir(), )])); fs.create_dir_all("/path/to/my/dir").await.unwrap(); @@ -310,7 +325,7 @@ mod tests { .await .get(&OsString::from_str("/path").unwrap()) .unwrap(), - InMemoryFile::Directory + InMemoryFile::Directory { mode } if *mode == 0o775 )); assert!(matches!( fs.files @@ -318,7 +333,7 @@ mod tests { .await .get(&OsString::from_str("/path/to").unwrap()) .unwrap(), - InMemoryFile::Directory + InMemoryFile::Directory { mode } if *mode == 0o775 )); assert!(matches!( fs.files @@ -326,7 +341,7 @@ mod tests { .await .get(&OsString::from_str("/path/to/my").unwrap()) .unwrap(), - InMemoryFile::Directory + InMemoryFile::Directory { mode } if *mode == 0o775 )); assert!(matches!( fs.files @@ -334,7 +349,7 @@ mod tests { .await .get(&OsString::from_str("/path/to/my/dir").unwrap()) .unwrap(), - InMemoryFile::Directory + InMemoryFile::Directory { mode } if *mode == 0o775 )); } @@ -342,15 +357,9 @@ mod tests { async fn create_dir_all_should_create_a_directory_and_some_of_its_ancestors_if_they_dont_exist() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), - ( - OsString::from_str("/path").unwrap(), - InMemoryFile::Directory, - ), - ( - OsString::from_str("/path/to").unwrap(), - InMemoryFile::Directory, - ), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), ])); fs.create_dir_all("/path/to/my/dir").await.unwrap(); @@ -362,7 +371,7 @@ mod tests { .await .get(&OsString::from_str("/path/to/my").unwrap()) .unwrap(), - InMemoryFile::Directory + InMemoryFile::Directory { mode } if *mode == 0o775 )); assert!(matches!( fs.files @@ -370,22 +379,19 @@ mod tests { .await .get(&OsString::from_str("/path/to/my/dir").unwrap()) .unwrap(), - InMemoryFile::Directory + InMemoryFile::Directory { mode } if *mode == 0o775 )); } #[tokio::test] async fn create_dir_all_should_return_an_error_if_some_ancestor_is_not_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/path").unwrap(), - InMemoryFile::File(vec![]), - ), - ( - OsString::from_str("/path/to").unwrap(), - InMemoryFile::Directory, + InMemoryFile::file(vec![]), ), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), ])); let err = fs.create_dir_all("/path/to/my/dir").await.unwrap_err(); @@ -398,7 +404,7 @@ mod tests { async fn read_should_return_the_file_content() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/myfile").unwrap(), - InMemoryFile::File("content".as_bytes().to_vec()), + InMemoryFile::file("content".as_bytes().to_vec()), )])); let content = fs.read("/myfile").await.unwrap(); @@ -419,7 +425,7 @@ mod tests { async fn read_should_return_an_error_if_file_is_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/myfile").unwrap(), - InMemoryFile::Directory, + InMemoryFile::dir(), )])); let err = fs.read("/myfile").await.unwrap_err(); @@ -431,7 +437,7 @@ mod tests { async fn read_to_string_should_return_the_file_content_as_a_string() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/myfile").unwrap(), - InMemoryFile::File("content".as_bytes().to_vec()), + InMemoryFile::file("content".as_bytes().to_vec()), )])); let content = fs.read_to_string("/myfile").await.unwrap(); @@ -452,7 +458,7 @@ mod tests { async fn read_to_string_should_return_an_error_if_file_is_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/myfile").unwrap(), - InMemoryFile::Directory, + InMemoryFile::dir(), )])); let err = fs.read_to_string("/myfile").await.unwrap_err(); @@ -464,7 +470,7 @@ mod tests { async fn read_to_string_should_return_an_error_if_file_isnt_utf8_encoded() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/myfile").unwrap(), - InMemoryFile::File(vec![0xC3, 0x28]), + InMemoryFile::file(vec![0xC3, 0x28]), )])); let err = fs.read_to_string("/myfile").await.unwrap_err(); @@ -479,7 +485,7 @@ mod tests { async fn write_should_create_file_with_content_if_file_doesnt_exists() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/").unwrap(), - InMemoryFile::Directory, + InMemoryFile::dir(), )])); fs.write("/myfile", "my file content").await.unwrap(); @@ -490,17 +496,17 @@ mod tests { .read() .await .get(&OsString::from_str("/myfile").unwrap()), - Some(InMemoryFile::File(content)) if content == "my file content".as_bytes() + Some(InMemoryFile::File {mode, contents}) if *mode == 0o664 && contents == "my file content".as_bytes() )); } #[tokio::test] async fn write_should_overwrite_file_content_if_file_exists() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/myfile").unwrap(), - InMemoryFile::File("my file content".as_bytes().to_vec()), + InMemoryFile::file("my file content".as_bytes().to_vec()), ), ])); @@ -512,18 +518,15 @@ mod tests { .read() .await .get(&OsString::from_str("/myfile").unwrap()), - Some(InMemoryFile::File(content)) if content == "my new file content".as_bytes() + Some(InMemoryFile::File { mode, contents }) if *mode == 0o664 && contents == "my new file content".as_bytes() )); } #[tokio::test] async fn write_should_return_an_error_if_file_is_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), - ( - OsString::from_str("/myfile").unwrap(), - InMemoryFile::Directory, - ), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/myfile").unwrap(), InMemoryFile::dir()), ])); let err = fs.write("/myfile", "my file content").await.unwrap_err(); @@ -535,11 +538,8 @@ mod tests { #[tokio::test] async fn write_should_return_an_error_if_file_is_new_and_some_ancestor_doesnt_exists() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), - ( - OsString::from_str("/path/to").unwrap(), - InMemoryFile::Directory, - ), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), ])); let err = fs @@ -554,15 +554,12 @@ mod tests { #[tokio::test] async fn write_should_return_an_error_if_file_is_new_and_some_ancestor_is_not_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/path").unwrap(), - InMemoryFile::File(vec![]), - ), - ( - OsString::from_str("/path/to").unwrap(), - InMemoryFile::Directory, + InMemoryFile::file(vec![]), ), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), ])); let err = fs @@ -577,10 +574,10 @@ mod tests { #[tokio::test] async fn append_should_update_file_content_if_file_exists() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/myfile").unwrap(), - InMemoryFile::File("my file content".as_bytes().to_vec()), + InMemoryFile::file("my file content".as_bytes().to_vec()), ), ])); @@ -594,7 +591,7 @@ mod tests { .read() .await .get(&OsString::from_str("/myfile").unwrap()), - Some(InMemoryFile::File(content)) if content == "my file content has been updated with new things".as_bytes() + Some(InMemoryFile::File { mode, contents }) if *mode == 0o664 && contents == "my file content has been updated with new things".as_bytes() )); } @@ -602,7 +599,7 @@ mod tests { async fn append_should_create_file_with_content_if_file_doesnt_exists() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/").unwrap(), - InMemoryFile::Directory, + InMemoryFile::dir(), )])); fs.append("/myfile", "my file content").await.unwrap(); @@ -613,7 +610,7 @@ mod tests { .read() .await .get(&OsString::from_str("/myfile").unwrap()), - Some(InMemoryFile::File(content)) if content == "my file content".as_bytes() + Some(InMemoryFile::File { mode,contents }) if *mode == 0o664 && contents == "my file content".as_bytes() )); } @@ -621,7 +618,7 @@ mod tests { async fn append_should_return_an_error_if_file_is_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/myfile").unwrap(), - InMemoryFile::Directory, + InMemoryFile::dir(), )])); let err = fs.append("/myfile", "my file content").await.unwrap_err(); @@ -632,11 +629,8 @@ mod tests { #[tokio::test] async fn append_should_return_an_error_if_file_is_new_and_some_ancestor_doesnt_exists() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), - ( - OsString::from_str("/path/to").unwrap(), - InMemoryFile::Directory, - ), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), ])); let err = fs @@ -651,15 +645,12 @@ mod tests { #[tokio::test] async fn append_should_return_an_error_if_file_is_new_and_some_ancestor_is_not_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/path").unwrap(), - InMemoryFile::File(vec![]), - ), - ( - OsString::from_str("/path/to").unwrap(), - InMemoryFile::Directory, + InMemoryFile::file(vec![]), ), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), ])); let err = fs @@ -674,10 +665,10 @@ mod tests { #[tokio::test] async fn copy_should_creates_new_destination_file_if_it_doesnt_exists() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/myfile").unwrap(), - InMemoryFile::File("my file content".as_bytes().to_vec()), + InMemoryFile::file("my file content".as_bytes().to_vec()), ), ])); @@ -685,21 +676,21 @@ mod tests { assert_eq!(fs.files.read().await.len(), 3); assert!( - matches!(fs.files.read().await.get(&OsString::from_str("/myfilecopy").unwrap()).unwrap(), InMemoryFile::File(content) if content == "my file content".as_bytes()) + matches!(fs.files.read().await.get(&OsString::from_str("/myfilecopy").unwrap()).unwrap(), InMemoryFile::File { mode, contents } if *mode == 0o664 && contents == "my file content".as_bytes()) ); } #[tokio::test] async fn copy_should_updates_the_file_content_of_the_destination_file_if_it_already_exists() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/myfile").unwrap(), - InMemoryFile::File("my new file content".as_bytes().to_vec()), + InMemoryFile::file("my new file content".as_bytes().to_vec()), ), ( OsString::from_str("/myfilecopy").unwrap(), - InMemoryFile::File("my file content".as_bytes().to_vec()), + InMemoryFile::file("my file content".as_bytes().to_vec()), ), ])); @@ -707,7 +698,7 @@ mod tests { assert_eq!(fs.files.read().await.len(), 3); assert!( - matches!(fs.files.read().await.get(&OsString::from_str("/myfilecopy").unwrap()).unwrap(), InMemoryFile::File(content) if content == "my new file content".as_bytes()) + matches!(fs.files.read().await.get(&OsString::from_str("/myfilecopy").unwrap()).unwrap(), InMemoryFile::File { mode, contents } if *mode == 0o664 && contents == "my new file content".as_bytes()) ); } @@ -715,7 +706,7 @@ mod tests { async fn copy_should_returns_an_error_if_source_file_doesnt_exists() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/").unwrap(), - InMemoryFile::Directory, + InMemoryFile::dir(), )])); let err = fs.copy("/myfile", "/mfilecopy").await.unwrap_err(); @@ -726,11 +717,8 @@ mod tests { #[tokio::test] async fn copy_should_returns_an_error_if_source_file_is_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), - ( - OsString::from_str("/myfile").unwrap(), - InMemoryFile::Directory, - ), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/myfile").unwrap(), InMemoryFile::dir()), ])); let err = fs.copy("/myfile", "/mfilecopy").await.unwrap_err(); @@ -741,14 +729,14 @@ mod tests { #[tokio::test] async fn copy_should_returns_an_error_if_destination_file_is_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/myfile").unwrap(), - InMemoryFile::File("my file content".as_bytes().to_vec()), + InMemoryFile::file("my file content".as_bytes().to_vec()), ), ( OsString::from_str("/myfilecopy").unwrap(), - InMemoryFile::Directory, + InMemoryFile::dir(), ), ])); @@ -761,10 +749,10 @@ mod tests { async fn copy_should_returns_an_error_if_destination_file_is_new_and_some_ancestor_doesnt_exists( ) { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/myfile").unwrap(), - InMemoryFile::File("my file content".as_bytes().to_vec()), + InMemoryFile::file("my file content".as_bytes().to_vec()), ), ])); @@ -778,14 +766,14 @@ mod tests { async fn copy_should_returns_an_error_if_destination_file_is_new_and_some_ancestor_is_not_a_directory( ) { let fs = InMemoryFileSystem::new(HashMap::from([ - (OsString::from_str("/").unwrap(), InMemoryFile::Directory), + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/myfile").unwrap(), - InMemoryFile::File("my file content".as_bytes().to_vec()), + InMemoryFile::file("my file content".as_bytes().to_vec()), ), ( OsString::from_str("/mypath").unwrap(), - InMemoryFile::File(vec![]), + InMemoryFile::file(vec![]), ), ])); @@ -794,4 +782,54 @@ mod tests { assert_eq!(fs.files.read().await.len(), 3); assert_eq!(err.to_string(), "ancestor \"/mypath\" is not a directory"); } + + #[tokio::test] + async fn set_mode_should_update_the_file_mode_at_path() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::file("my file content".as_bytes().to_vec()), + ), + ])); + assert!( + matches!(fs.files.read().await.get(&OsString::from_str("/myfile").unwrap()).unwrap(), InMemoryFile::File { mode, .. } if *mode == 0o664) + ); + + fs.set_mode("/myfile", 0o400).await.unwrap(); + + assert!( + matches!(fs.files.read().await.get(&OsString::from_str("/myfile").unwrap()).unwrap(), InMemoryFile::File { mode, .. } if *mode == 0o400) + ); + } + + #[tokio::test] + async fn set_mode_should_update_the_directory_mode_at_path() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/mydir").unwrap(), InMemoryFile::dir()), + ])); + assert!( + matches!(fs.files.read().await.get(&OsString::from_str("/mydir").unwrap()).unwrap(), InMemoryFile::Directory { mode } if *mode == 0o775) + ); + + fs.set_mode("/mydir", 0o700).await.unwrap(); + + assert!( + matches!(fs.files.read().await.get(&OsString::from_str("/mydir").unwrap()).unwrap(), InMemoryFile::Directory { mode } if *mode == 0o700) + ); + } + + #[tokio::test] + async fn set_mode_should_returns_an_error_if_file_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/").unwrap(), + InMemoryFile::dir(), + )])); + // intentionally forget to create file + + let err = fs.set_mode("/myfile", 0o400).await.unwrap_err(); + + assert_eq!(err.to_string(), "file \"/myfile\" not found"); + } } diff --git a/crates/support/src/fs/local.rs b/crates/support/src/fs/local.rs index 179b7c557..a8519cc66 100644 --- a/crates/support/src/fs/local.rs +++ b/crates/support/src/fs/local.rs @@ -1,7 +1,8 @@ -use std::path::Path; +use std::{os::unix::fs::PermissionsExt, path::Path}; use tokio::io::AsyncWriteExt; use async_trait::async_trait; +use uuid::Uuid; use super::{FileSystem, FileSystemError, FileSystemResult}; @@ -64,6 +65,28 @@ impl FileSystem for LocalFileSystem { .and(Ok(())) .map_err(Into::into) } + + async fn set_mode(&self, path: impl AsRef + Send, mode: u32) -> FileSystemResult<()> { + // because we can't create a Permissions struct directly, we create a temporary empty file and retrieve the + // Permissions from it, we then modify its mode and apply it to our file + let temp_file_path = format!( + "{}/{}", + std::env::temp_dir().to_string_lossy(), + Uuid::new_v4() + ); + let temp_file = + std::fs::File::create(temp_file_path).map_err(Into::::into)?; + + let mut permissions = temp_file + .metadata() + .map_err(Into::::into)? + .permissions(); + permissions.set_mode(mode); + + tokio::fs::set_permissions(path, permissions) + .await + .map_err(Into::into) + } } #[cfg(test)] @@ -71,6 +94,9 @@ mod tests { use super::*; use uuid::Uuid; + const FILE_BITS: u32 = 0o100000; + const DIR_BITS: u32 = 0o40000; + fn setup() -> String { let test_dir = format!("/tmp/unit_test_{}", Uuid::new_v4()); std::fs::create_dir(&test_dir).unwrap(); @@ -308,4 +334,57 @@ mod tests { assert_eq!(err.to_string(), "No such file or directory (os error 2)"); teardown(test_dir); } + + #[tokio::test] + async fn set_mode_should_update_the_file_mode_at_path() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + let path = format!("{test_dir}/myfile"); + std::fs::write(&path, "Test").unwrap(); + assert_eq!( + std::fs::metadata(&path).unwrap().permissions().mode(), + FILE_BITS + 0o664 + ); + + fs.set_mode(&path, 0o400).await.unwrap(); + + assert_eq!( + std::fs::metadata(&path).unwrap().permissions().mode(), + FILE_BITS + 0o400 + ); + teardown(test_dir); + } + + #[tokio::test] + async fn set_mode_should_update_the_directory_mode_at_path() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + let path = format!("{test_dir}/mydir"); + std::fs::create_dir(&path).unwrap(); + assert_eq!( + std::fs::metadata(&path).unwrap().permissions().mode(), + DIR_BITS + 0o775 + ); + + fs.set_mode(&path, 0o700).await.unwrap(); + + assert_eq!( + std::fs::metadata(&path).unwrap().permissions().mode(), + DIR_BITS + 0o700 + ); + teardown(test_dir); + } + + #[tokio::test] + async fn set_mode_should_bubble_up_error_if_some_happens() { + let test_dir = setup(); + let fs = LocalFileSystem::default(); + let path = format!("{test_dir}/somemissingfile"); + // intentionnally don't create file + + let err = fs.set_mode(&path, 0o400).await.unwrap_err(); + + assert_eq!(err.to_string(), "No such file or directory (os error 2)"); + teardown(test_dir); + } } From 563dd2c347e0c5daf7a106fae72453a937a995e2 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Wed, 6 Sep 2023 22:13:37 +0300 Subject: [PATCH 24/69] feat: added builder for options types used in provider traits methods, moved error next to provider traits --- crates/provider/src/errors.rs | 50 -------------- crates/provider/src/lib.rs | 120 ++++++++++++++++++++++++++-------- 2 files changed, 92 insertions(+), 78 deletions(-) delete mode 100644 crates/provider/src/errors.rs diff --git a/crates/provider/src/errors.rs b/crates/provider/src/errors.rs deleted file mode 100644 index f63a290d0..000000000 --- a/crates/provider/src/errors.rs +++ /dev/null @@ -1,50 +0,0 @@ -//! Zombienet Provider error definitions. - -use support::fs::FileSystemError; - -macro_rules! from_error { - ($type:ty, $target:ident, $targetvar:expr) => { - impl From<$type> for $target { - fn from(s: $type) -> Self { - $targetvar(s.into()) - } - } - }; -} - -#[derive(Debug, thiserror::Error)] -#[allow(missing_docs)] -pub enum ProviderError { - #[error("Failed to spawn node '{0}': {1}")] - NodeSpawningFailed(String, anyhow::Error), - - #[error("Invalid network configuration field {0}")] - InvalidConfig(String), - #[error("Can recover node: {0} info, field: {1}")] - MissingNodeInfo(String, String), - #[error("Duplicated node name: {0}")] - DuplicatedNodeName(String), - #[error("Error running cmd: {0}")] - RunCommandError(String), - #[error("Error spawning node: {0}")] - ErrorSpawningNode(String), - #[error("Node die/stale, logs: {0}")] - NodeNotReady(String), - // FSErrors are implemented in the associated type - #[error(transparent)] - FSError(#[from] FileSystemError), - // From serde errors - #[error("Serialization error")] - SerializationError(serde_json::Error), - #[error("IO error: {0}")] - IOError(std::io::Error), - #[error("Invalid script_path: {0}")] - InvalidScriptPath(String), -} - -from_error!( - serde_json::Error, - ProviderError, - ProviderError::SerializationError -); -from_error!(std::io::Error, ProviderError, ProviderError::IOError); diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs index 4a17faf54..23880d31b 100644 --- a/crates/provider/src/lib.rs +++ b/crates/provider/src/lib.rs @@ -1,16 +1,38 @@ -mod errors; mod native; mod shared; use std::{net::IpAddr, path::PathBuf, process::ExitStatus, sync::Arc, time::Duration}; use async_trait::async_trait; -use tokio::sync::RwLock; -use crate::{ - errors::ProviderError, - shared::types::{FileMap, Port}, -}; +use crate::shared::types::{FileMap, Port}; + +use support::fs::FileSystemError; + +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum ProviderError { + #[error("Failed to spawn node '{0}': {1}")] + NodeSpawningFailed(String, anyhow::Error), + + #[error("Error running command: {0}")] + RunCommandError(anyhow::Error), + + #[error("Invalid network configuration field {0}")] + InvalidConfig(String), + + #[error("Can recover node: {0} info, field: {1}")] + MissingNodeInfo(String, String), + + #[error("Duplicated node name: {0}")] + DuplicatedNodeName(String), + + #[error(transparent)] + FSError(#[from] FileSystemError), + + #[error("Invalid script path for {0}")] + InvalidScriptPath(String), +} #[derive(Debug, Clone)] pub struct ProviderCapabilities { @@ -42,18 +64,38 @@ pub trait Provider { pub type DynProvider = Arc; +macro_rules! common_options { + () => { + fn args(mut self, args: Vec) -> Self { + self.args = args; + self + } + + fn env(mut self, env: Vec<(String, String)>) -> Self { + self.env = env; + self + } + }; +} + pub struct SpawnNodeOptions { - pub name: String, - pub command: String, - pub args: Vec, - pub env: Vec<(String, String)>, - // Files to inject, `before` we run the provider command. - pub files_inject: Vec, - // TODO: keystore logic should live in the orchestrator - pub keystore: String, - // chain_spec_id: String, - // TODO: abstract logic for download and uncompress - pub db_snapshot: String, + name: String, + command: String, + args: Vec, + env: Vec<(String, String)>, +} + +impl SpawnNodeOptions { + fn new(name: String, command: String) -> Self { + Self { + name, + command, + args: vec![], + env: vec![], + } + } + + common_options!(); } pub struct SpawnTempOptions { @@ -65,11 +107,8 @@ pub struct SpawnTempOptions { #[async_trait] pub trait ProviderNamespace { async fn id(&self) -> String; - /// Spawn a long live node/process. async fn spawn_node(&self, options: SpawnNodeOptions) -> Result; - /// Spawn a temporary node, will be shutdown after `get` the desired files or output. async fn spawn_temp(&self, options: SpawnTempOptions) -> Result<(), ProviderError>; - /// Destroy namespace (and inner resources). async fn destroy(&self) -> Result<(), ProviderError>; async fn static_setup(&self) -> Result<(), ProviderError>; } @@ -77,17 +116,42 @@ pub trait ProviderNamespace { pub type DynNamespace = Arc; pub struct RunCommandOptions { - pub args: Vec, - pub is_failure_allowed: bool, + pub(crate) command: String, + pub(crate) args: Vec, + pub(crate) env: Vec<(String, String)>, +} + +impl RunCommandOptions { + fn new(command: String) -> Self { + Self { + command, + args: vec![], + env: vec![], + } + } + + common_options!(); } pub struct RunScriptOptions { - pub identifier: String, - pub script_path: String, - pub args: Vec, + pub(crate) local_script_path: String, + pub(crate) args: Vec, + pub(crate) env: Vec<(String, String)>, +} + +impl RunScriptOptions { + fn new(local_script_path: String) -> Self { + Self { + local_script_path, + args: vec![], + env: vec![], + } + } + + common_options!(); } -type ExecutionResult = Result)>; +type ExecutionResult = Result; #[async_trait] pub trait ProviderNode { @@ -99,7 +163,7 @@ pub trait ProviderNode { async fn logs(&self) -> Result; - async fn dump_logs(&self, dest: PathBuf) -> Result<(), ProviderError>; + async fn dump_logs(&self, local_dest: PathBuf) -> Result<(), ProviderError>; async fn run_command( &self, @@ -119,7 +183,7 @@ pub trait ProviderNode { async fn resume(&self) -> Result<(), ProviderError>; - async fn restart(&mut self, after: Option) -> Result<(), ProviderError>; + async fn restart(&self, after: Option) -> Result<(), ProviderError>; async fn destroy(&self) -> Result<(), ProviderError>; } From 30feb4b193f8137ad5a3d4ee4948dac9c0e42045 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Wed, 6 Sep 2023 22:14:53 +0300 Subject: [PATCH 25/69] feat: added modified implementation of run_script/run_command/copy_from_node in NativeProvider, removed unused comments --- crates/provider/src/native.rs | 461 +++++------------------- crates/provider/src/shared/constants.rs | 2 + 2 files changed, 87 insertions(+), 376 deletions(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 0473246be..6903ad888 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -9,6 +9,7 @@ use std::{ sync::{Arc, Weak}, }; +use anyhow::anyhow; use async_trait::async_trait; use configuration::types::Port; use nix::{ @@ -29,10 +30,10 @@ use tokio::{ use uuid::Uuid; use crate::{ - errors::ProviderError, - shared::constants::{DEFAULT_TMP_DIR, NODE_CONFIG_DIR, NODE_DATA_DIR}, - DynNamespace, DynNode, ExecutionResult, Provider, ProviderCapabilities, ProviderNamespace, - ProviderNode, RunCommandOptions, RunScriptOptions, SpawnNodeOptions, SpawnTempOptions, + shared::constants::{DEFAULT_TMP_DIR, NODE_CONFIG_DIR, NODE_DATA_DIR, NODE_SCRIPTS_DIR}, + DynNamespace, DynNode, ExecutionResult, Provider, ProviderCapabilities, ProviderError, + ProviderNamespace, ProviderNode, RunCommandOptions, RunScriptOptions, SpawnNodeOptions, + SpawnTempOptions, }; pub struct NativeProviderOptions @@ -140,6 +141,7 @@ impl ProviderNamespace for Nativ let log_path = format!("{}/{}.log", &base_dir, &options.name); let config_dir = format!("{}{}", &base_dir, NODE_CONFIG_DIR); let data_dir = format!("{}{}", &base_dir, NODE_DATA_DIR); + let scripts_dir = format!("{}{}", &base_dir, NODE_SCRIPTS_DIR); inner.filesystem.create_dir(&base_dir).await.unwrap(); inner.filesystem.create_dir(&config_dir).await.unwrap(); inner.filesystem.create_dir(&data_dir).await.unwrap(); @@ -161,6 +163,8 @@ impl ProviderNamespace for Nativ command: options.command, args: options.args, env: options.env, + base_dir, + scripts_dir, log_path, process, stdout_reading_handle, @@ -219,6 +223,8 @@ struct NativeNodeInner { command: String, args: Vec, env: Vec<(String, String)>, + base_dir: String, + scripts_dir: String, log_path: String, process: Child, stdout_reading_handle: JoinHandle<()>, @@ -228,8 +234,6 @@ struct NativeNodeInner { namespace: WeakNativeNamespace, } -impl NativeNodeInner {} - #[derive(Debug, Clone)] struct NativeNode { inner: Arc>>, @@ -254,37 +258,81 @@ impl ProviderNode for NativeNode Ok(inner.filesystem.read_to_string(&inner.log_path).await?) } - async fn dump_logs(&self, dest: PathBuf) -> Result<(), ProviderError> { + async fn dump_logs(&self, local_dest: PathBuf) -> Result<(), ProviderError> { let logs = self.logs().await?; Ok(self .inner .write() .await .filesystem - .write(dest, logs.as_bytes()) + .write(local_dest, logs.as_bytes()) .await?) } async fn run_command( &self, - _options: RunCommandOptions, + options: RunCommandOptions, ) -> Result { - todo!() + let result = Command::new(options.command) + .args(options.args) + .output() + .await + .map_err(|err| ProviderError::RunCommandError(err.into()))?; + + if result.status.success() { + Ok(Ok(String::from_utf8_lossy(&result.stdout).to_string())) + } else { + Ok(Err(( + result.status, + String::from_utf8_lossy(&result.stderr).to_string(), + ))) + } } async fn run_script( &self, - _options: RunScriptOptions, + options: RunScriptOptions, ) -> Result { - todo!() + let inner = self.inner.read().await; + let local_script_path = PathBuf::from(&options.local_script_path); + + if !local_script_path.try_exists().unwrap() { + return Err(ProviderError::RunCommandError(anyhow!("Test"))); + } + + // extract file name and build remote file path + let script_file_name = local_script_path + .file_name() + .map(|file_name| file_name.to_string_lossy().to_string()) + .ok_or(ProviderError::InvalidScriptPath(options.local_script_path))?; + let remote_script_path = format!("{}/{}", inner.scripts_dir, script_file_name); + + // copy and set script's execute permission + inner + .filesystem + .copy(local_script_path, &remote_script_path) + .await?; + inner + .filesystem + .set_mode(&remote_script_path, 0o744) + .await?; + + // execute script + self.run_command(RunCommandOptions::new(remote_script_path).args(options.args)) + .await } async fn copy_file_from_node( &self, - _remote_src: PathBuf, - _local_dest: PathBuf, + remote_src: PathBuf, + local_dest: PathBuf, ) -> Result<(), ProviderError> { - todo!() + let inner = self.inner.read().await; + + let remote_file_path = format!("{}{}", inner.base_dir, remote_src.to_str().unwrap()); + inner.filesystem.copy(remote_file_path, local_dest).await?; + + Ok(()) } async fn pause(&self) -> Result<(), ProviderError> { @@ -307,7 +355,7 @@ impl ProviderNode for NativeNode Ok(()) } - async fn restart(&mut self, after: Option) -> Result<(), ProviderError> { + async fn restart(&self, after: Option) -> Result<(), ProviderError> { if let Some(duration) = after { sleep(duration).await; } @@ -418,7 +466,7 @@ fn create_process_with_log_tasks( let stdout = process.stdout.take().expect("infaillible, stdout is piped"); let stderr = process.stderr.take().expect("Infaillible, stderr is piped"); - // create additonnal long-running tasks for logs + // create additionnal long-running tasks for logs let (stdout_tx, rx) = mpsc::channel(10); let stderr_tx = stdout_tx.clone(); let stdout_reading_handle = create_stream_polling_task(stdout, stdout_tx); @@ -435,372 +483,33 @@ fn create_process_with_log_tasks( #[cfg(test)] mod tests { - #[tokio::test(flavor = "multi_thread", worker_threads = 8)] - async fn it_should_works() { - todo!(); - } -} - -// #[derive(Debug, Clone, PartialEq)] -// pub struct NativeProvider { -// // Namespace of the client (isolation directory) -// namespace: String, -// // TODO: re-iterate, since we are creating the config with the sdk -// // Path where configuration relies, all the `files` are accessed relative to this. -// // config_path: String, -// // Command to use, e.g "bash" -// command: String, -// // Temporary directory, root directory for the network -// tmp_dir: String, -// remote_dir: String, -// data_dir: String, -// process_map: HashMap, -// filesystem: T, -// } - -// impl NativeProvider { -// /// Zombienet `native` provider allows to run the nodes as a local process in the local environment -// /// params: -// /// namespace: Namespace of the client -// /// config_path: Path where configuration relies -// /// tmp_dir: Temporary directory where files will be placed -// /// filesystem: Filesystem to use (std::fs::FileSystem, mock etc.) -// pub fn new( -// namespace: impl Into, -// // config_path: impl Into, -// tmp_dir: impl Into, -// filesystem: T, -// ) -> Self { -// let tmp_dir = tmp_dir.into(); -// let process_map: HashMap = HashMap::new(); - -// Self { -// namespace: namespace.into(), -// // config_path: config_path.into(), -// remote_dir: format!("{}{}", &tmp_dir, DEFAULT_REMOTE_DIR), -// data_dir: format!("{}{}", &tmp_dir, DEFAULT_DATA_DIR), -// command: "bash".into(), -// tmp_dir, -// process_map, -// filesystem, -// } -// } - -// fn get_process_by_node_name(&self, node_name: &str) -> Result<&Process, ProviderError> { -// self.process_map -// .get(node_name) -// .ok_or(ProviderError::MissingNodeInfo( -// node_name.to_owned(), -// "process".into(), -// )) -// } -// } - -// pub struct Node {} - -// #[async_trait] -// impl Provider for NativeProvider -// where -// T: FileSystem + Send + Sync, -// { -// type Node = Node; - -// fn require_image() -> bool { -// false -// } - -// async fn create_namespace(&mut self) -> Result<(), ProviderError> { -// // Native provider don't have the `namespace` isolation. -// // but we create the `remoteDir` to place files -// self.filesystem -// .create_dir(&self.remote_dir) -// .await -// .map_err(|e| ProviderError::FSError(Box::new(e)))?; -// Ok(()) -// } - -// async fn destroy_namespace(&self) -> Result<(), ProviderError> { -// // get pids to kill all related process -// let pids: Vec = self -// .process_map -// .iter() -// .filter(|(_, process)| process.pid != 0) -// .map(|(_, process)| process.pid.to_string()) -// .collect(); - -// // TODO: use a crate (or even std) to get this info instead of relying on bash -// let result = self -// .run_command( -// [format!( -// "ps ax| awk '{{print $1}}'| grep -E '{}'", -// pids.join("|") -// )] -// .to_vec(), -// NativeRunCommandOptions { -// is_failure_allowed: true, -// }, -// ) -// .await -// .unwrap(); - -// if result.exit_code.code().unwrap() == 0 { -// let pids_to_kill: Vec = result -// .std_out -// .split(|c| c == '\n') -// .map(|s| s.into()) -// .collect(); - -// let _ = self -// .run_command( -// [format!("kill -9 {}", pids_to_kill.join(" "))].to_vec(), -// NativeRunCommandOptions { -// is_failure_allowed: true, -// }, -// ) -// .await?; -// } -// Ok(()) -// } - -// async fn static_setup(&mut self) -> Result<(), ProviderError> { -// Ok(()) -// } - -// async fn spawn_node( -// &self, -// _node: Node, -// _files_inject: Vec, -// _keystore: &str, -// _db_snapshot: &str, -// ) -> Result<(), ProviderError> { -// // TODO: We should implement the logic to go from the `Node` (nodeSpec) -// // to the running node, since we will no expose anymore the underline `Def`. -// // We can follow the logic of the spawn_from_def later. - -// Ok(()) -// } - -// async fn spawn_temp( -// &self, -// _node: Node, -// _files_inject: Vec, -// _files_get: Vec, -// ) -> Result<(), ProviderError> { -// // TODO: We should implement the logic to go from the `Node` (nodeSpec) -// // to the running node, since we will no expose anymore the underline `Def`. -// // We can follow the logic of the spawn_from_def later. - -// Ok(()) -// } - -// async fn copy_file_from_node( -// &mut self, -// pod_file_path: PathBuf, -// local_file_path: PathBuf, -// ) -> Result<(), ProviderError> { -// // log::debug!("cp {} {}", pod_file_path.to_string_lossy(), local_file_path.to_string_lossy()); + use std::os::unix::prelude::PermissionsExt; -// self.filesystem -// .copy(&pod_file_path, &local_file_path) -// .await -// .map_err(|e| ProviderError::FSError(Box::new(e)))?; -// Ok(()) -// } - -// async fn run_command( -// &self, -// mut args: Vec, -// opts: NativeRunCommandOptions, -// ) -> Result { -// if let Some(arg) = args.get(0) { -// if arg == "bash" { -// args.remove(0); -// } -// } + use super::*; -// // -c is already used in the process::Command to execute the command thus -// // needs to be removed in case provided -// if let Some(arg) = args.get(0) { -// if arg == "-c" { -// args.remove(0); -// } -// } - -// let result = Command::new(&self.command) -// .arg("-c") -// .arg(args.join(" ")) -// .output() -// .await?; - -// if !result.status.success() && !opts.is_failure_allowed { -// return Err(ProviderError::RunCommandError(args.join(" "))); -// } else { -// // cmd success or we allow to fail -// // in either case we return Ok -// Ok(RunCommandResponse { -// exit_code: result.status, -// std_out: String::from_utf8_lossy(&result.stdout).into(), -// std_err: if result.stderr.is_empty() { -// None -// } else { -// Some(String::from_utf8_lossy(&result.stderr).into()) -// }, -// }) -// } -// } - -// // TODO: Add test -// async fn run_script( -// &mut self, -// identifier: String, -// script_path: String, -// args: Vec, -// ) -> Result { -// let script_filename = Path::new(&script_path) -// .file_name() -// .ok_or(ProviderError::InvalidScriptPath(script_path.clone()))? -// .to_str() -// .ok_or(ProviderError::InvalidScriptPath(script_path.clone()))?; -// let script_path_in_pod = format!("{}/{}/{}", self.tmp_dir, identifier, script_filename); - -// // upload the script -// self.filesystem -// .copy(&script_path, &script_path_in_pod) -// .await -// .map_err(|e| ProviderError::FSError(Box::new(e)))?; - -// // set as executable -// self.run_command( -// vec![ -// "chmod".to_owned(), -// "+x".to_owned(), -// script_path_in_pod.clone(), -// ], -// NativeRunCommandOptions::default(), -// ) -// .await?; - -// let command = format!( -// "cd {}/{} && {} {}", -// self.tmp_dir, -// identifier, -// script_path_in_pod, -// args.join(" ") -// ); -// let result = self -// .run_command(vec![command], NativeRunCommandOptions::default()) -// .await?; - -// Ok(RunCommandResponse { -// exit_code: result.exit_code, -// std_out: result.std_out, -// std_err: result.std_err, -// }) -// } - -// // TODO: Add test -// async fn get_node_logs(&mut self, name: &str) -> Result { -// // For now in native let's just return all the logs -// let result = self -// .filesystem -// .read_file(&format!("{}/{}.log", self.tmp_dir, name)) -// .await -// .map_err(|e| ProviderError::FSError(Box::new(e)))?; -// return Ok(result); -// } - -// async fn dump_logs(&mut self, path: String, pod_name: String) -> Result<(), ProviderError> { -// let dst_file_name: String = format!("{}/logs/{}.log", path, pod_name); -// let _ = self -// .filesystem -// .copy( -// &format!("{}/{}.log", self.tmp_dir, pod_name), -// &dst_file_name, -// ) -// .await; -// Ok(()) -// } + #[tokio::test(flavor = "multi_thread", worker_threads = 8)] + async fn it_should_works() { + let file = std::fs::File::create(format!( + "{}/{}", + std::env::temp_dir().to_string_lossy(), + Uuid::new_v4() + )) + .unwrap(); -// async fn get_logs_command(&self, name: &str) -> Result { -// Ok(format!("tail -f {}/{}.log", self.tmp_dir, name)) -// } + let metadata = file.metadata().unwrap(); -// // TODO: Add test -// async fn restart( -// &mut self, -// node_name: &str, -// after_secs: Option, -// ) -> Result { -// let process = self.get_process_by_node_name(node_name)?; + let mut permissions = metadata.permissions(); + permissions.set_mode(0o744); -// let _resp = self -// .run_command( -// vec![format!("kill -9 {:?}", process.pid)], -// NativeRunCommandOptions { -// is_failure_allowed: true, -// }, -// ) -// .await?; - -// // log::debug!("{:?}", &resp); - -// if let Some(secs) = after_secs { -// sleep(Duration::from_secs(secs.into())).await; -// } - -// let process: &mut Process = -// self.process_map -// .get_mut(node_name) -// .ok_or(ProviderError::MissingNodeInfo( -// node_name.to_owned(), -// "process".into(), -// ))?; - -// let mapped_env: HashMap<&str, &str> = process -// .env -// .iter() -// .map(|env_var| (env_var.name.as_str(), env_var.value.as_str())) -// .collect(); - -// let child_process: Child = Command::new(self.command.clone()) -// .arg("-c") -// .arg(process.command.clone()) -// .envs(&mapped_env) -// .spawn() -// .map_err(|e| ProviderError::ErrorSpawningNode(e.to_string()))?; - -// process.pid = child_process.id().ok_or(ProviderError::ErrorSpawningNode( -// "Failed to get pid".to_string(), -// ))?; - -// Ok(true) -// } - -// async fn get_node_info(&self, node_name: &str) -> Result<(IpAddr, Port), ProviderError> { -// let host_port = self.get_port_mapping(P2P_PORT, node_name).await?; -// Ok((LOCALHOST, host_port)) -// } + tokio::fs::set_permissions("/tmp/myscript.sh", permissions) + .await + .unwrap(); -// async fn get_node_ip(&self, _node_name: &str) -> Result { -// Ok(LOCALHOST) -// } + // let result = Command::new("/tmp/myscript.sh").output().await.unwrap(); -// async fn get_port_mapping(&self, port: Port, node_name: &str) -> Result { -// match self.process_map.get(node_name) { -// Some(process) => match process.port_mapping.get(&port) { -// Some(port) => Ok(*port), -// None => Err(ProviderError::MissingNodeInfo( -// node_name.to_owned(), -// "port".into(), -// )), -// }, -// None => Err(ProviderError::MissingNodeInfo( -// node_name.to_owned(), -// "process".into(), -// )), -// } -// } -// } + // println!("{:?}", result); + } +} // #[cfg(test)] // mod tests { diff --git a/crates/provider/src/shared/constants.rs b/crates/provider/src/shared/constants.rs index 2a6e3f23e..8b619f68b 100644 --- a/crates/provider/src/shared/constants.rs +++ b/crates/provider/src/shared/constants.rs @@ -6,6 +6,8 @@ pub const DEFAULT_TMP_DIR: &str = "/tmp"; pub const NODE_CONFIG_DIR: &str = "/cfg"; /// Directory for node configuration pub const NODE_DATA_DIR: &str = "/data"; +/// Directory for node scripts +pub const NODE_SCRIPTS_DIR: &str = "/scripts"; /// Localhost ip pub const _LOCALHOST: IpAddr = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); /// The port substrate listens for p2p connections on From d143ed42dce47612136f2df758df48c415e48384 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Sat, 9 Sep 2023 17:40:15 +0300 Subject: [PATCH 26/69] feat: moved filesystem, capabilities and tmp_dir out of NativeProviderInner --- crates/provider/src/lib.rs | 2 +- crates/provider/src/native.rs | 33 ++++++++++++++----------- crates/provider/src/shared/constants.rs | 2 -- 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs index 23880d31b..0c3b7c1b9 100644 --- a/crates/provider/src/lib.rs +++ b/crates/provider/src/lib.rs @@ -57,7 +57,7 @@ impl Default for CreateNamespaceOptions { #[async_trait] pub trait Provider { - async fn capabilities(&self) -> ProviderCapabilities; + fn capabilities(&self) -> ProviderCapabilities; async fn create_namespace(&self) -> Result; // TODO(team): Do we need at this point to handle cleanner/pod-monitor? } diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 6903ad888..5cec46f9a 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -30,7 +30,7 @@ use tokio::{ use uuid::Uuid; use crate::{ - shared::constants::{DEFAULT_TMP_DIR, NODE_CONFIG_DIR, NODE_DATA_DIR, NODE_SCRIPTS_DIR}, + shared::constants::{NODE_CONFIG_DIR, NODE_DATA_DIR, NODE_SCRIPTS_DIR}, DynNamespace, DynNode, ExecutionResult, Provider, ProviderCapabilities, ProviderError, ProviderNamespace, ProviderNode, RunCommandOptions, RunScriptOptions, SpawnNodeOptions, SpawnTempOptions, @@ -41,19 +41,19 @@ where FS: FileSystem + Send + Sync, { filesystem: FS, - tmp_dir: Option, + tmp_dir: Option, } #[derive(Debug)] struct NativeProviderInner { - capabilities: ProviderCapabilities, - tmp_dir: String, namespaces: HashMap>, - filesystem: FS, } #[derive(Debug, Clone)] pub struct NativeProvider { + capabilities: ProviderCapabilities, + tmp_dir: PathBuf, + filesystem: FS, inner: Arc>>, } @@ -65,13 +65,13 @@ struct WeakNativeProvider { impl NativeProvider { pub fn new(options: NativeProviderOptions) -> Self { NativeProvider { + capabilities: ProviderCapabilities { + requires_image: false, + }, + tmp_dir: options.tmp_dir.unwrap_or(std::env::temp_dir()), + filesystem: options.filesystem, inner: Arc::new(RwLock::new(NativeProviderInner { - capabilities: ProviderCapabilities { - requires_image: false, - }, - tmp_dir: options.tmp_dir.unwrap_or(DEFAULT_TMP_DIR.to_string()), namespaces: Default::default(), - filesystem: options.filesystem, })), } } @@ -79,23 +79,23 @@ impl NativeProvider { #[async_trait] impl Provider for NativeProvider { - async fn capabilities(&self) -> ProviderCapabilities { - self.inner.read().await.capabilities.clone() + fn capabilities(&self) -> ProviderCapabilities { + self.capabilities.clone() } async fn create_namespace(&self) -> Result { let id = format!("zombie_{}", Uuid::new_v4()); let mut inner = self.inner.write().await; - let base_dir = format!("{}/{}", inner.tmp_dir, &id); - inner.filesystem.create_dir(&base_dir).await.unwrap(); + let base_dir = format!("{}/{}", self.tmp_dir.to_string_lossy(), &id); + self.filesystem.create_dir(&base_dir).await.unwrap(); let namespace = NativeNamespace { inner: Arc::new(RwLock::new(NativeNamespaceInner { id: id.clone(), base_dir, nodes: Default::default(), - filesystem: inner.filesystem.clone(), + filesystem: self.filesystem.clone(), provider: WeakNativeProvider { inner: Arc::downgrade(&self.inner), }, @@ -487,6 +487,9 @@ mod tests { use super::*; + // #[tokio::test(flavor = "multi_thread", worker_threads = 8)] + // async fn + #[tokio::test(flavor = "multi_thread", worker_threads = 8)] async fn it_should_works() { let file = std::fs::File::create(format!( diff --git a/crates/provider/src/shared/constants.rs b/crates/provider/src/shared/constants.rs index 8b619f68b..e76f5353d 100644 --- a/crates/provider/src/shared/constants.rs +++ b/crates/provider/src/shared/constants.rs @@ -1,7 +1,5 @@ use std::net::{IpAddr, Ipv4Addr}; -/// Default temporary directory -pub const DEFAULT_TMP_DIR: &str = "/tmp"; /// Directory for node configuration pub const NODE_CONFIG_DIR: &str = "/cfg"; /// Directory for node configuration From 89c188282d2c0ced068027d50c2e3a891f18a1f3 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 12 Sep 2023 19:02:29 +0300 Subject: [PATCH 27/69] feat: make constructors and fields public for testing on InMemoryFileSystem --- crates/support/src/fs/in_memory.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/support/src/fs/in_memory.rs b/crates/support/src/fs/in_memory.rs index 6a43e3d95..a0fd21915 100644 --- a/crates/support/src/fs/in_memory.rs +++ b/crates/support/src/fs/in_memory.rs @@ -12,21 +12,21 @@ pub enum InMemoryFile { } impl InMemoryFile { - fn file(contents: Vec) -> Self { + pub fn file(contents: Vec) -> Self { Self::File { mode: 0o664, contents, } } - fn dir() -> Self { + pub fn dir() -> Self { Self::Directory { mode: 0o775 } } } #[derive(Default, Debug, Clone)] pub struct InMemoryFileSystem { - files: Arc>>, + pub files: Arc>>, } impl InMemoryFileSystem { From 8634df70df67baf0eb45968c38485ba316ac22b8 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 12 Sep 2023 21:35:47 +0300 Subject: [PATCH 28/69] feat: removed unused types atm and added TransferedFile --- crates/provider/src/shared/types.rs | 185 +--------------------------- 1 file changed, 3 insertions(+), 182 deletions(-) diff --git a/crates/provider/src/shared/types.rs b/crates/provider/src/shared/types.rs index 6817678b8..440b24017 100644 --- a/crates/provider/src/shared/types.rs +++ b/crates/provider/src/shared/types.rs @@ -1,185 +1,6 @@ -use std::{ - collections::HashMap, os::unix::process::ExitStatusExt, path::PathBuf, process::ExitStatus, -}; - -use serde::{Deserialize, Serialize}; - pub type Port = u16; -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub enum ZombieRole { - Temp, - Node, - BootNode, - Collator, - CumulusCollator, - Authority, - FullNode, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub enum PortName { - Prometheus, - Rpc, - RpcWs, - P2P, -} - -// TODO: remove when we implement k8s/podman -#[allow(dead_code)] -#[derive(Debug, Clone, PartialEq)] -enum ImagePullPolicy { - IfNotPresent, - Never, - Always, -} - -#[derive(Debug, Clone, PartialEq)] -pub struct FileMap { - pub local_file_path: PathBuf, - pub remote_file_path: PathBuf, - pub is_unique: bool, -} - -#[derive(Debug, Clone, PartialEq)] -pub struct RunCommandResponse { - pub exit_code: ExitStatus, - pub std_out: String, - pub std_err: Option, -} - -impl RunCommandResponse { - pub fn default() -> Self { - Self { - exit_code: ExitStatus::from_raw(0), - std_out: String::default(), - std_err: None, - } - } -} - -#[derive(Debug, Default, Clone, PartialEq)] -pub struct NativeRunCommandOptions { - pub is_failure_allowed: bool, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct NamespaceLabels { - job_id: String, - project_name: String, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct NamespaceMetadata { - pub name: String, - pub labels: Option, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct NamespaceDef { - pub api_version: String, - pub kind: String, - pub metadata: NamespaceMetadata, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct PodLabels { - pub zombie_role: ZombieRole, - pub app: String, - pub zombie_ns: String, - pub name: String, - pub instance: String, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct PodMetadata { - pub name: String, - pub namespace: String, - pub labels: PodLabels, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct PodSpec { - pub cfg_path: String, - pub data_path: String, - pub ports: Vec, - pub command: Vec, - pub env: ProcessEnvironment, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct PodDef { - pub metadata: PodMetadata, - pub spec: PodSpec, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct EnvVar { - pub(crate) name: String, - pub(crate) value: String, -} - -impl From<(&str, &str)> for EnvVar { - fn from(value: (&str, &str)) -> Self { - Self { - name: value.0.into(), - value: value.1.into(), - } - } -} - -type ProcessEnvironment = Vec; - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct PortInfo { - pub container_port: Port, - pub name: PortName, - pub flag: String, - pub host_port: Port, -} - -#[derive(Debug, Clone, PartialEq)] -struct Volume { - name: String, - fs_type: String, - mount_path: String, -} - -#[derive(Debug, Clone, PartialEq)] -pub struct Settings { - volumes: Option>, - bootnode: Option, - bootnode_domain: Option, - timeout: u16, - node_spawn_timeout: u16, - grafana: Option, - telemetry: Option, - prometheus: Option, - /// agent or collator - jaeger_agent: Option, - /// collator query url - tracing_collator_url: Option, - /// only used by k8s provider and if not set the `url` - tracing_collator_service_name: Option, - /// only used by k8s provider and if not set the `url` - tracing_collator_service_namespace: Option, - /// only used by k8s provider and if not set the `url` - tracing_collator_service_port: Option, - enable_tracing: Option, - provider: String, - polkadot_introspector: Option, - /// only used in k8s at the moment, spawn a backchannel instance - backchannel: Option, - image_pull_policy: ImagePullPolicy, - /// ip used for expose local services (rpc/metrics/monitors) - local_ip: Option, -} - -#[derive(Debug, Clone, Serialize, PartialEq)] -pub struct Process { - pub pid: u32, - pub logs: String, - pub port_mapping: HashMap, - pub command: String, - pub env: ProcessEnvironment, +pub struct TransferedFile { + pub local_path: String, + pub remote_path: String, } From c8f4ce894aed51c7ea1381cb379c1213bf59752f Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 12 Sep 2023 21:36:36 +0300 Subject: [PATCH 29/69] feat: updated types for Provider methods output, updated non needed async methods --- crates/provider/src/lib.rs | 98 ++++++++++++-------------------------- 1 file changed, 31 insertions(+), 67 deletions(-) diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs index 0c3b7c1b9..9238a8eb1 100644 --- a/crates/provider/src/lib.rs +++ b/crates/provider/src/lib.rs @@ -4,8 +4,9 @@ mod shared; use std::{net::IpAddr, path::PathBuf, process::ExitStatus, sync::Arc, time::Duration}; use async_trait::async_trait; +use shared::types::TransferedFile; -use crate::shared::types::{FileMap, Port}; +use crate::shared::types::Port; use support::fs::FileSystemError; @@ -32,6 +33,9 @@ pub enum ProviderError { #[error("Invalid script path for {0}")] InvalidScriptPath(String), + + #[error("File generation failed: {0}")] + FileGenerationFailed(anyhow::Error), } #[derive(Debug, Clone)] @@ -64,98 +68,58 @@ pub trait Provider { pub type DynProvider = Arc; -macro_rules! common_options { - () => { - fn args(mut self, args: Vec) -> Self { - self.args = args; - self - } - - fn env(mut self, env: Vec<(String, String)>) -> Self { - self.env = env; - self - } - }; -} - pub struct SpawnNodeOptions { - name: String, - command: String, - args: Vec, - env: Vec<(String, String)>, + pub name: String, + pub command: String, + pub args: Vec, + pub env: Vec<(String, String)>, + pub injected_files: Vec, } -impl SpawnNodeOptions { - fn new(name: String, command: String) -> Self { - Self { - name, - command, - args: vec![], - env: vec![], - } - } - - common_options!(); +pub struct GenerateFileCommand { + pub command: String, + pub args: Vec, + pub env: Vec<(String, String)>, + pub local_output_path: String, } -pub struct SpawnTempOptions { - pub node: (), - pub injected_files: Vec, - pub files_to_retrieve: Vec, +pub struct GenerateFilesOptions { + pub commands: Vec, + pub injected_files: Vec, } #[async_trait] pub trait ProviderNamespace { - async fn id(&self) -> String; + fn id(&self) -> String; + async fn spawn_node(&self, options: SpawnNodeOptions) -> Result; - async fn spawn_temp(&self, options: SpawnTempOptions) -> Result<(), ProviderError>; + + async fn generate_files(&self, options: GenerateFilesOptions) -> Result<(), ProviderError>; + async fn destroy(&self) -> Result<(), ProviderError>; + async fn static_setup(&self) -> Result<(), ProviderError>; } pub type DynNamespace = Arc; pub struct RunCommandOptions { - pub(crate) command: String, - pub(crate) args: Vec, - pub(crate) env: Vec<(String, String)>, -} - -impl RunCommandOptions { - fn new(command: String) -> Self { - Self { - command, - args: vec![], - env: vec![], - } - } - - common_options!(); + pub command: String, + pub args: Vec, + pub env: Vec<(String, String)>, } pub struct RunScriptOptions { - pub(crate) local_script_path: String, - pub(crate) args: Vec, - pub(crate) env: Vec<(String, String)>, -} - -impl RunScriptOptions { - fn new(local_script_path: String) -> Self { - Self { - local_script_path, - args: vec![], - env: vec![], - } - } - - common_options!(); + pub local_script_path: String, + pub args: Vec, + pub env: Vec<(String, String)>, } type ExecutionResult = Result; #[async_trait] pub trait ProviderNode { - async fn name(&self) -> String; + fn name(&self) -> String; async fn endpoint(&self) -> Result<(IpAddr, Port), ProviderError>; From 31869b98ae3149fd7a15445269988bc5ef9dd0c0 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 12 Sep 2023 21:39:20 +0300 Subject: [PATCH 30/69] feat: added generate_files implementation on namespace using temporary nodes, moved some non mutable fields out of inners --- crates/provider/src/native.rs | 306 +++++++++++++++++++++------------- 1 file changed, 189 insertions(+), 117 deletions(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 5cec46f9a..2e5b82203 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -31,24 +31,11 @@ use uuid::Uuid; use crate::{ shared::constants::{NODE_CONFIG_DIR, NODE_DATA_DIR, NODE_SCRIPTS_DIR}, - DynNamespace, DynNode, ExecutionResult, Provider, ProviderCapabilities, ProviderError, - ProviderNamespace, ProviderNode, RunCommandOptions, RunScriptOptions, SpawnNodeOptions, - SpawnTempOptions, + DynNamespace, DynNode, ExecutionResult, GenerateFileCommand, GenerateFilesOptions, Provider, + ProviderCapabilities, ProviderError, ProviderNamespace, ProviderNode, RunCommandOptions, + RunScriptOptions, SpawnNodeOptions, }; -pub struct NativeProviderOptions -where - FS: FileSystem + Send + Sync, -{ - filesystem: FS, - tmp_dir: Option, -} - -#[derive(Debug)] -struct NativeProviderInner { - namespaces: HashMap>, -} - #[derive(Debug, Clone)] pub struct NativeProvider { capabilities: ProviderCapabilities, @@ -57,24 +44,34 @@ pub struct NativeProvider { inner: Arc>>, } +#[derive(Debug)] +struct NativeProviderInner { + namespaces: HashMap>, +} + #[derive(Debug, Clone)] struct WeakNativeProvider { inner: Weak>>, } impl NativeProvider { - pub fn new(options: NativeProviderOptions) -> Self { + pub fn new(filesystem: FS) -> Self { NativeProvider { capabilities: ProviderCapabilities { requires_image: false, }, - tmp_dir: options.tmp_dir.unwrap_or(std::env::temp_dir()), - filesystem: options.filesystem, + tmp_dir: std::env::temp_dir(), + filesystem, inner: Arc::new(RwLock::new(NativeProviderInner { namespaces: Default::default(), })), } } + + pub fn tmp_dir(mut self, tmp_dir: impl Into) -> Self { + self.tmp_dir = tmp_dir.into(); + self + } } #[async_trait] @@ -91,14 +88,14 @@ impl Provider for NativeProvider self.filesystem.create_dir(&base_dir).await.unwrap(); let namespace = NativeNamespace { + id: id.clone(), + base_dir, + filesystem: self.filesystem.clone(), + provider: WeakNativeProvider { + inner: Arc::downgrade(&self.inner), + }, inner: Arc::new(RwLock::new(NativeNamespaceInner { - id: id.clone(), - base_dir, nodes: Default::default(), - filesystem: self.filesystem.clone(), - provider: WeakNativeProvider { - inner: Arc::downgrade(&self.inner), - }, })), }; @@ -108,18 +105,18 @@ impl Provider for NativeProvider } } -#[derive(Debug)] -struct NativeNamespaceInner { +#[derive(Debug, Clone)] +pub struct NativeNamespace { id: String, base_dir: String, - nodes: HashMap>, + inner: Arc>>, filesystem: FS, provider: WeakNativeProvider, } -#[derive(Debug, Clone)] -pub struct NativeNamespace { - inner: Arc>>, +#[derive(Debug)] +struct NativeNamespaceInner { + nodes: HashMap>, } #[derive(Debug, Clone)] @@ -129,22 +126,22 @@ struct WeakNativeNamespace { #[async_trait] impl ProviderNamespace for NativeNamespace { - async fn id(&self) -> String { - self.inner.read().await.id.clone() + fn id(&self) -> String { + self.id.clone() } async fn spawn_node(&self, options: SpawnNodeOptions) -> Result { let mut inner = self.inner.write().await; // create node directories and filepaths - let base_dir = format!("{}/{}", &inner.base_dir, &options.name); + let base_dir = format!("{}/{}", &self.base_dir, &options.name); let log_path = format!("{}/{}.log", &base_dir, &options.name); let config_dir = format!("{}{}", &base_dir, NODE_CONFIG_DIR); let data_dir = format!("{}{}", &base_dir, NODE_DATA_DIR); let scripts_dir = format!("{}{}", &base_dir, NODE_SCRIPTS_DIR); - inner.filesystem.create_dir(&base_dir).await.unwrap(); - inner.filesystem.create_dir(&config_dir).await.unwrap(); - inner.filesystem.create_dir(&data_dir).await.unwrap(); + self.filesystem.create_dir(&base_dir).await.unwrap(); + self.filesystem.create_dir(&config_dir).await.unwrap(); + self.filesystem.create_dir(&data_dir).await.unwrap(); let (process, stdout_reading_handle, stderr_reading_handle, log_writing_handle) = create_process_with_log_tasks( @@ -153,27 +150,27 @@ impl ProviderNamespace for Nativ &options.args, &options.env, &log_path, - inner.filesystem.clone(), + self.filesystem.clone(), )?; // create node structure holding state let node = NativeNode { + name: options.name.clone(), + command: options.command, + args: options.args, + env: options.env, + base_dir, + scripts_dir, + log_path, + filesystem: self.filesystem.clone(), + namespace: WeakNativeNamespace { + inner: Arc::downgrade(&self.inner), + }, inner: Arc::new(RwLock::new(NativeNodeInner { - name: options.name.clone(), - command: options.command, - args: options.args, - env: options.env, - base_dir, - scripts_dir, - log_path, process, stdout_reading_handle, stderr_reading_handle, log_writing_handle, - filesystem: inner.filesystem.clone(), - namespace: WeakNativeNamespace { - inner: Arc::downgrade(&self.inner), - }, })), }; @@ -183,8 +180,43 @@ impl ProviderNamespace for Nativ Ok(Arc::new(node)) } - async fn spawn_temp(&self, _options: SpawnTempOptions) -> Result<(), ProviderError> { - todo!() + async fn generate_files(&self, options: GenerateFilesOptions) -> Result<(), ProviderError> { + // we spawn a node doing nothing but looping so we can execute our commands + let temp_node = self + .spawn_node(SpawnNodeOptions { + name: format!("temp_{}", Uuid::new_v4()), + command: "bash".to_string(), + args: vec!["-c".to_string(), "while :; do sleep 1; done".to_string()], + env: vec![], + injected_files: options.injected_files, + }) + .await?; + + for GenerateFileCommand { + command, + args, + env, + local_output_path, + } in options.commands + { + match temp_node + .run_command(RunCommandOptions { command, args, env }) + .await + .map_err(|err| ProviderError::FileGenerationFailed(err.into()))? + { + Ok(contents) => self + .filesystem + .write( + format!("{}/{}", self.base_dir, local_output_path), + contents, + ) + .await + .map_err(|err| ProviderError::FileGenerationFailed(err.into()))?, + Err((_, msg)) => Err(ProviderError::FileGenerationFailed(anyhow!("{msg}")))?, + }; + } + + temp_node.destroy().await } async fn static_setup(&self) -> Result<(), ProviderError> { @@ -208,17 +240,16 @@ impl ProviderNamespace for Nativ } // remove namespace from provider - let inner = self.inner.write().await; - if let Some(provider) = inner.provider.inner.upgrade() { - provider.write().await.namespaces.remove(&inner.id); + if let Some(provider) = self.provider.inner.upgrade() { + provider.write().await.namespaces.remove(&self.id); } Ok(()) } } -#[derive(Debug)] -struct NativeNodeInner { +#[derive(Debug, Clone)] +struct NativeNode { name: String, command: String, args: Vec, @@ -226,23 +257,23 @@ struct NativeNodeInner { base_dir: String, scripts_dir: String, log_path: String, - process: Child, - stdout_reading_handle: JoinHandle<()>, - stderr_reading_handle: JoinHandle<()>, - log_writing_handle: JoinHandle<()>, + inner: Arc>, filesystem: FS, namespace: WeakNativeNamespace, } -#[derive(Debug, Clone)] -struct NativeNode { - inner: Arc>>, +#[derive(Debug)] +struct NativeNodeInner { + process: Child, + stdout_reading_handle: JoinHandle<()>, + stderr_reading_handle: JoinHandle<()>, + log_writing_handle: JoinHandle<()>, } #[async_trait] impl ProviderNode for NativeNode { - async fn name(&self) -> String { - self.inner.read().await.name.clone() + fn name(&self) -> String { + self.name.clone() } async fn endpoint(&self) -> Result<(IpAddr, Port), ProviderError> { @@ -254,19 +285,12 @@ impl ProviderNode for NativeNode } async fn logs(&self) -> Result { - let inner = self.inner.read().await; - Ok(inner.filesystem.read_to_string(&inner.log_path).await?) + Ok(self.filesystem.read_to_string(&self.log_path).await?) } async fn dump_logs(&self, local_dest: PathBuf) -> Result<(), ProviderError> { let logs = self.logs().await?; - Ok(self - .inner - .write() - .await - .filesystem - .write(local_dest, logs.as_bytes()) - .await?) + Ok(self.filesystem.write(local_dest, logs.as_bytes()).await?) } async fn run_command( @@ -293,7 +317,6 @@ impl ProviderNode for NativeNode &self, options: RunScriptOptions, ) -> Result { - let inner = self.inner.read().await; let local_script_path = PathBuf::from(&options.local_script_path); if !local_script_path.try_exists().unwrap() { @@ -305,21 +328,21 @@ impl ProviderNode for NativeNode .file_name() .map(|file_name| file_name.to_string_lossy().to_string()) .ok_or(ProviderError::InvalidScriptPath(options.local_script_path))?; - let remote_script_path = format!("{}/{}", inner.scripts_dir, script_file_name); + let remote_script_path = format!("{}/{}", self.scripts_dir, script_file_name); // copy and set script's execute permission - inner - .filesystem + self.filesystem .copy(local_script_path, &remote_script_path) .await?; - inner - .filesystem - .set_mode(&remote_script_path, 0o744) - .await?; + self.filesystem.set_mode(&remote_script_path, 0o744).await?; // execute script - self.run_command(RunCommandOptions::new(remote_script_path).args(options.args)) - .await + self.run_command(RunCommandOptions { + command: remote_script_path, + args: options.args, + env: options.env, + }) + .await } async fn copy_file_from_node( @@ -327,10 +350,8 @@ impl ProviderNode for NativeNode remote_src: PathBuf, local_dest: PathBuf, ) -> Result<(), ProviderError> { - let inner = self.inner.read().await; - - let remote_file_path = format!("{}{}", inner.base_dir, remote_src.to_str().unwrap()); - inner.filesystem.copy(remote_file_path, local_dest).await?; + let remote_file_path = format!("{}{}", self.base_dir, remote_src.to_str().unwrap()); + self.filesystem.copy(remote_file_path, local_dest).await?; Ok(()) } @@ -371,12 +392,12 @@ impl ProviderNode for NativeNode // re-spawn process with tasks for logs let (process, stdout_reading_handle, stderr_reading_handle, log_writing_handle) = create_process_with_log_tasks( - &inner.name, - &inner.command, - &inner.args, - &inner.env, - &inner.log_path, - inner.filesystem.clone(), + &self.name, + &self.command, + &self.args, + &self.env, + &self.log_path, + self.filesystem.clone(), )?; // update node process and handlers @@ -396,8 +417,8 @@ impl ProviderNode for NativeNode inner.stderr_reading_handle.abort(); inner.process.kill().await.unwrap(); - if let Some(namespace) = inner.namespace.inner.upgrade() { - namespace.write().await.nodes.remove(&inner.name); + if let Some(namespace) = self.namespace.inner.upgrade() { + namespace.write().await.nodes.remove(&self.name); } Ok(()) @@ -448,15 +469,16 @@ fn create_log_writing_task( fn create_process_with_log_tasks( name: &str, command: &str, - args: &[String], - env: &[(String, String)], + args: &Vec, + env: &Vec<(String, String)>, log_path: &str, filesystem: impl FileSystem + Send + Sync + 'static, ) -> Result<(Child, JoinHandle<()>, JoinHandle<()>, JoinHandle<()>), ProviderError> { // create process + println!("{:?} {:?}", args, env); let mut process = Command::new(command) - .args(args) - .envs(env.to_owned()) + // .args(args) + // .envs(env.to_owned()) .stdin(Stdio::null()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) @@ -483,34 +505,84 @@ fn create_process_with_log_tasks( #[cfg(test)] mod tests { - use std::os::unix::prelude::PermissionsExt; + use std::{ffi::OsString, str::FromStr}; + + use support::fs::{ + in_memory::{InMemoryFile, InMemoryFileSystem}, + local::LocalFileSystem, + }; use super::*; - // #[tokio::test(flavor = "multi_thread", worker_threads = 8)] - // async fn + #[test] + fn it_should_possible_to_retrieve_capabilities() { + let fs = InMemoryFileSystem::default(); + let provider = NativeProvider::new(fs); - #[tokio::test(flavor = "multi_thread", worker_threads = 8)] - async fn it_should_works() { - let file = std::fs::File::create(format!( - "{}/{}", - std::env::temp_dir().to_string_lossy(), - Uuid::new_v4() - )) - .unwrap(); + let capabilities = provider.capabilities(); - let metadata = file.metadata().unwrap(); + assert_eq!(capabilities.requires_image, false); + } - let mut permissions = metadata.permissions(); - permissions.set_mode(0o744); + #[tokio::test] + async fn it_should_be_possible_to_create_a_new_namespace() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ])); + let provider = NativeProvider::new(fs.clone()); - tokio::fs::set_permissions("/tmp/myscript.sh", permissions) + let namespace = provider.create_namespace().await.unwrap(); + + println!("{:?}", fs.files.read().await); + } + + #[tokio::test] + async fn it_works() { + let fs = LocalFileSystem::default(); + let provider = NativeProvider::new(fs); + + let namespace = provider.create_namespace().await.unwrap(); + + namespace + .generate_files(GenerateFilesOptions { + commands: vec![GenerateFileCommand { + command: "/home/user/.bin/polkadot".to_string(), + args: vec![ + "build-spec".to_string(), + "--chain=rococo-local".to_string(), + "--disable-default-bootnode".to_string(), + ], + env: vec![], + local_output_path: "rococo-local-plain.json".into(), + }], + injected_files: vec![], + }) .await .unwrap(); - // let result = Command::new("/tmp/myscript.sh").output().await.unwrap(); + // let node = namespace + // .spawn_node(SpawnNodeOptions { + // name: "node1".to_string(), + // command: "/home/user/.bin/polkadot".to_string(), + // args: vec![], + // env: vec![], + // injected_files: vec![], + // }) + // .await + // .unwrap(); + + // sleep(Duration::from_secs(10)).await; + + // node.pause().await.unwrap(); + + // sleep(Duration::from_secs(10)).await; + + // node.resume().await.unwrap(); + + // node.restart(Some(Duration::from_secs(10))).await.unwrap(); - // println!("{:?}", result); + // sleep(Duration::from_secs(10)).await; } } From f47539c151004a3e1fbc6143c12d430841232ac4 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 12 Sep 2023 21:49:55 +0300 Subject: [PATCH 31/69] feat: removed comment --- crates/provider/src/native.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 2e5b82203..3a3064ed2 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -475,10 +475,9 @@ fn create_process_with_log_tasks( filesystem: impl FileSystem + Send + Sync + 'static, ) -> Result<(Child, JoinHandle<()>, JoinHandle<()>, JoinHandle<()>), ProviderError> { // create process - println!("{:?} {:?}", args, env); let mut process = Command::new(command) - // .args(args) - // .envs(env.to_owned()) + .args(args) + .envs(env.to_owned()) .stdin(Stdio::null()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) From ce6f0b2d67ecbad7112829d5b0a5943fbb47d812 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 12 Sep 2023 22:53:30 +0300 Subject: [PATCH 32/69] feat: refactored provider types and added builders --- crates/provider/src/lib.rs | 60 +------- crates/provider/src/shared/types.rs | 226 +++++++++++++++++++++++++++- 2 files changed, 229 insertions(+), 57 deletions(-) diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs index 9238a8eb1..98fac939c 100644 --- a/crates/provider/src/lib.rs +++ b/crates/provider/src/lib.rs @@ -4,7 +4,10 @@ mod shared; use std::{net::IpAddr, path::PathBuf, process::ExitStatus, sync::Arc, time::Duration}; use async_trait::async_trait; -use shared::types::TransferedFile; +use shared::types::{ + GenerateFileCommand, GenerateFilesOptions, ProviderCapabilities, RunCommandOptions, + RunScriptOptions, SpawnNodeOptions, +}; use crate::shared::types::Port; @@ -38,56 +41,15 @@ pub enum ProviderError { FileGenerationFailed(anyhow::Error), } -#[derive(Debug, Clone)] -pub struct ProviderCapabilities { - pub requires_image: bool, -} - -pub struct CreateNamespaceOptions { - pub root_dir: String, - pub config_dir: String, - pub data_dir: String, -} - -impl Default for CreateNamespaceOptions { - fn default() -> Self { - Self { - root_dir: "/tmp".to_string(), - config_dir: "/cfg".to_string(), - data_dir: "/data".to_string(), - } - } -} - #[async_trait] pub trait Provider { fn capabilities(&self) -> ProviderCapabilities; + async fn create_namespace(&self) -> Result; - // TODO(team): Do we need at this point to handle cleanner/pod-monitor? } pub type DynProvider = Arc; -pub struct SpawnNodeOptions { - pub name: String, - pub command: String, - pub args: Vec, - pub env: Vec<(String, String)>, - pub injected_files: Vec, -} - -pub struct GenerateFileCommand { - pub command: String, - pub args: Vec, - pub env: Vec<(String, String)>, - pub local_output_path: String, -} - -pub struct GenerateFilesOptions { - pub commands: Vec, - pub injected_files: Vec, -} - #[async_trait] pub trait ProviderNamespace { fn id(&self) -> String; @@ -103,18 +65,6 @@ pub trait ProviderNamespace { pub type DynNamespace = Arc; -pub struct RunCommandOptions { - pub command: String, - pub args: Vec, - pub env: Vec<(String, String)>, -} - -pub struct RunScriptOptions { - pub local_script_path: String, - pub args: Vec, - pub env: Vec<(String, String)>, -} - type ExecutionResult = Result; #[async_trait] diff --git a/crates/provider/src/shared/types.rs b/crates/provider/src/shared/types.rs index 440b24017..ba2487c5e 100644 --- a/crates/provider/src/shared/types.rs +++ b/crates/provider/src/shared/types.rs @@ -1,6 +1,228 @@ +use std::path::{Path, PathBuf}; + pub type Port = u16; +#[derive(Debug, Default, Clone)] +pub struct ProviderCapabilities { + pub requires_image: bool, +} + +impl ProviderCapabilities { + fn new() -> Self { + Self::default() + } + + fn requires_image(mut self) -> Self { + self.requires_image = true; + self + } +} + +pub struct SpawnNodeOptions { + pub name: String, + pub command: String, + pub args: Vec, + pub env: Vec<(String, String)>, + pub injected_files: Vec, +} + +impl SpawnNodeOptions { + fn new(name: S, command: S) -> Self + where + S: AsRef, + { + Self { + name: name.as_ref().to_string(), + command: command.as_ref().to_string(), + args: vec![], + env: vec![], + injected_files: vec![], + } + } + + fn args(mut self, args: I) -> Self + where + S: AsRef, + I: IntoIterator, + { + self.args = args.into_iter().map(|s| s.as_ref().to_string()).collect(); + self + } + + fn env(mut self, env: I) -> Self + where + S: AsRef, + I: IntoIterator, + { + self.env = env + .into_iter() + .map(|(name, value)| (name.as_ref().to_string(), value.as_ref().to_string())) + .collect(); + self + } +} + +pub struct GenerateFileCommand { + pub command: String, + pub args: Vec, + pub env: Vec<(String, String)>, + pub local_output_path: PathBuf, +} + +impl GenerateFileCommand { + fn new(command: S, local_output_path: P) -> Self + where + S: AsRef, + P: AsRef, + { + Self { + command: command.as_ref().to_string(), + args: vec![], + env: vec![], + local_output_path: local_output_path.as_ref().into(), + } + } + + fn args(mut self, args: I) -> Self + where + S: AsRef, + I: IntoIterator, + { + self.args = args.into_iter().map(|s| s.as_ref().to_string()).collect(); + self + } + + fn env(mut self, env: I) -> Self + where + S: AsRef, + I: IntoIterator, + { + self.env = env + .into_iter() + .map(|(name, value)| (name.as_ref().to_string(), value.as_ref().to_string())) + .collect(); + self + } +} + +pub struct GenerateFilesOptions { + pub commands: Vec, + pub injected_files: Vec, +} + +impl GenerateFilesOptions { + fn new(commands: I) -> Self + where + I: IntoIterator, + { + Self { + commands: vec![], + injected_files: vec![], + } + } + + fn injected_files(mut self, injected_files: I) -> Self + where + I: IntoIterator, + { + self.injected_files = injected_files.into_iter().collect(); + self + } +} + +pub struct RunCommandOptions { + pub command: String, + pub args: Vec, + pub env: Vec<(String, String)>, +} + +impl RunCommandOptions { + fn new(command: S) -> Self + where + S: AsRef, + { + Self { + command: command.as_ref().to_string(), + args: vec![], + env: vec![], + } + } + + fn args(mut self, args: I) -> Self + where + S: AsRef, + I: IntoIterator, + { + self.args = args.into_iter().map(|s| s.as_ref().to_string()).collect(); + self + } + + fn env(mut self, env: I) -> Self + where + S: AsRef, + I: IntoIterator, + { + self.env = env + .into_iter() + .map(|(name, value)| (name.as_ref().to_string(), value.as_ref().to_string())) + .collect(); + self + } +} + +pub struct RunScriptOptions { + pub local_script_path: PathBuf, + pub args: Vec, + pub env: Vec<(String, String)>, +} + +impl RunScriptOptions { + fn new

(local_script_path: P) -> Self + where + P: AsRef, + { + Self { + local_script_path: local_script_path.as_ref().into(), + args: vec![], + env: vec![], + } + } + + fn args(mut self, args: I) -> Self + where + S: AsRef, + I: IntoIterator, + { + self.args = args.into_iter().map(|s| s.as_ref().to_string()).collect(); + self + } + + fn env(mut self, env: I) -> Self + where + S: AsRef, + I: IntoIterator, + { + self.env = env + .into_iter() + .map(|(name, value)| (name.as_ref().to_string(), value.as_ref().to_string())) + .collect(); + self + } +} + pub struct TransferedFile { - pub local_path: String, - pub remote_path: String, + pub local_path: PathBuf, + pub remote_path: PathBuf, +} + +impl TransferedFile { + fn new

(local_path: P, remote_path: P) -> Self + where + P: AsRef, + { + Self { + local_path: local_path.as_ref().into(), + remote_path: remote_path.as_ref().into(), + } + } } From 86568b9631606be415a15a6d50c81a6fa486b913 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Wed, 13 Sep 2023 19:51:06 +0300 Subject: [PATCH 33/69] feat: replaced String path to PathBuf --- crates/provider/src/lib.rs | 2 +- crates/provider/src/native.rs | 43 +++++++++++++++++++++++------------ 2 files changed, 29 insertions(+), 16 deletions(-) diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs index 98fac939c..73d592bd9 100644 --- a/crates/provider/src/lib.rs +++ b/crates/provider/src/lib.rs @@ -35,7 +35,7 @@ pub enum ProviderError { FSError(#[from] FileSystemError), #[error("Invalid script path for {0}")] - InvalidScriptPath(String), + InvalidScriptPath(PathBuf), #[error("File generation failed: {0}")] FileGenerationFailed(anyhow::Error), diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 3a3064ed2..2f76c8d0f 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -84,7 +84,7 @@ impl Provider for NativeProvider let id = format!("zombie_{}", Uuid::new_v4()); let mut inner = self.inner.write().await; - let base_dir = format!("{}/{}", self.tmp_dir.to_string_lossy(), &id); + let base_dir = PathBuf::from(format!("{}/{}", self.tmp_dir.to_string_lossy(), &id)); self.filesystem.create_dir(&base_dir).await.unwrap(); let namespace = NativeNamespace { @@ -108,7 +108,7 @@ impl Provider for NativeProvider #[derive(Debug, Clone)] pub struct NativeNamespace { id: String, - base_dir: String, + base_dir: PathBuf, inner: Arc>>, filesystem: FS, provider: WeakNativeProvider, @@ -134,11 +134,12 @@ impl ProviderNamespace for Nativ let mut inner = self.inner.write().await; // create node directories and filepaths - let base_dir = format!("{}/{}", &self.base_dir, &options.name); - let log_path = format!("{}/{}.log", &base_dir, &options.name); - let config_dir = format!("{}{}", &base_dir, NODE_CONFIG_DIR); - let data_dir = format!("{}{}", &base_dir, NODE_DATA_DIR); - let scripts_dir = format!("{}{}", &base_dir, NODE_SCRIPTS_DIR); + let base_dir_raw = format!("{}/{}", &self.base_dir.to_string_lossy(), &options.name); + let base_dir = PathBuf::from(&base_dir_raw); + let log_path = PathBuf::from(format!("{}/{}.log", base_dir_raw, &options.name)); + let config_dir = PathBuf::from(format!("{}/{}", base_dir_raw, NODE_CONFIG_DIR)); + let data_dir = PathBuf::from(format!("{}/{}", base_dir_raw, NODE_DATA_DIR)); + let scripts_dir = PathBuf::from(format!("{}/{}", base_dir_raw, NODE_SCRIPTS_DIR)); self.filesystem.create_dir(&base_dir).await.unwrap(); self.filesystem.create_dir(&config_dir).await.unwrap(); self.filesystem.create_dir(&data_dir).await.unwrap(); @@ -207,7 +208,11 @@ impl ProviderNamespace for Nativ Ok(contents) => self .filesystem .write( - format!("{}/{}", self.base_dir, local_output_path), + format!( + "{}/{}", + self.base_dir.to_string_lossy(), + local_output_path.to_string_lossy() + ), contents, ) .await @@ -254,9 +259,9 @@ struct NativeNode { command: String, args: Vec, env: Vec<(String, String)>, - base_dir: String, - scripts_dir: String, - log_path: String, + base_dir: PathBuf, + scripts_dir: PathBuf, + log_path: PathBuf, inner: Arc>, filesystem: FS, namespace: WeakNativeNamespace, @@ -328,7 +333,11 @@ impl ProviderNode for NativeNode .file_name() .map(|file_name| file_name.to_string_lossy().to_string()) .ok_or(ProviderError::InvalidScriptPath(options.local_script_path))?; - let remote_script_path = format!("{}/{}", self.scripts_dir, script_file_name); + let remote_script_path = format!( + "{}/{}", + self.scripts_dir.to_string_lossy(), + script_file_name + ); // copy and set script's execute permission self.filesystem @@ -350,7 +359,11 @@ impl ProviderNode for NativeNode remote_src: PathBuf, local_dest: PathBuf, ) -> Result<(), ProviderError> { - let remote_file_path = format!("{}{}", self.base_dir, remote_src.to_str().unwrap()); + let remote_file_path = format!( + "{}/{}", + self.base_dir.to_string_lossy(), + remote_src.to_string_lossy() + ); self.filesystem.copy(remote_file_path, local_dest).await?; Ok(()) @@ -454,7 +467,7 @@ fn create_stream_polling_task( fn create_log_writing_task( mut rx: Receiver, Error>>, filesystem: impl FileSystem + Send + Sync + 'static, - log_path: String, + log_path: PathBuf, ) -> JoinHandle<()> { tokio::spawn(async move { loop { @@ -471,7 +484,7 @@ fn create_process_with_log_tasks( command: &str, args: &Vec, env: &Vec<(String, String)>, - log_path: &str, + log_path: &PathBuf, filesystem: impl FileSystem + Send + Sync + 'static, ) -> Result<(Child, JoinHandle<()>, JoinHandle<()>, JoinHandle<()>), ProviderError> { // create process From 4aee8b0ce600c26c5a2179a0acfdc3146c6c9f9c Mon Sep 17 00:00:00 2001 From: l0r1s Date: Wed, 13 Sep 2023 21:10:20 +0300 Subject: [PATCH 34/69] feat: added correct error handling --- crates/provider/src/lib.rs | 25 +++++++++----- crates/provider/src/native.rs | 64 ++++++++++++++++++++++++++--------- 2 files changed, 65 insertions(+), 24 deletions(-) diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs index 73d592bd9..d8adf2fd0 100644 --- a/crates/provider/src/lib.rs +++ b/crates/provider/src/lib.rs @@ -22,23 +22,32 @@ pub enum ProviderError { #[error("Error running command: {0}")] RunCommandError(anyhow::Error), - #[error("Invalid network configuration field {0}")] - InvalidConfig(String), - - #[error("Can recover node: {0} info, field: {1}")] - MissingNodeInfo(String, String), - #[error("Duplicated node name: {0}")] DuplicatedNodeName(String), #[error(transparent)] - FSError(#[from] FileSystemError), + FileSystemError(#[from] FileSystemError), #[error("Invalid script path for {0}")] - InvalidScriptPath(PathBuf), + InvalidScriptPath(anyhow::Error), + + #[error("Script with path {0} not found")] + ScriptNotFound(PathBuf), #[error("File generation failed: {0}")] FileGenerationFailed(anyhow::Error), + + #[error("Failed to retrieve process ID for node '{0}'")] + ProcessIdRetrievalFailed(String), + + #[error("Failed to pause node '{0}'")] + PauseNodeFailed(String), + + #[error("Failed to resume node '{0}'")] + ResumeNodeFaied(String), + + #[error("Failed to kill node '{0}'")] + KillNodeFailed(String), } #[async_trait] diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 2f76c8d0f..e6851b4c6 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -13,6 +13,7 @@ use anyhow::anyhow; use async_trait::async_trait; use configuration::types::Port; use nix::{ + libc::pid_t, sys::signal::{kill, Signal}, unistd::Pid, }; @@ -85,7 +86,7 @@ impl Provider for NativeProvider let mut inner = self.inner.write().await; let base_dir = PathBuf::from(format!("{}/{}", self.tmp_dir.to_string_lossy(), &id)); - self.filesystem.create_dir(&base_dir).await.unwrap(); + self.filesystem.create_dir(&base_dir).await?; let namespace = NativeNamespace { id: id.clone(), @@ -133,6 +134,10 @@ impl ProviderNamespace for Nativ async fn spawn_node(&self, options: SpawnNodeOptions) -> Result { let mut inner = self.inner.write().await; + if inner.nodes.contains_key(&options.name) { + return Err(ProviderError::DuplicatedNodeName(options.name)); + } + // create node directories and filepaths let base_dir_raw = format!("{}/{}", &self.base_dir.to_string_lossy(), &options.name); let base_dir = PathBuf::from(&base_dir_raw); @@ -140,9 +145,9 @@ impl ProviderNamespace for Nativ let config_dir = PathBuf::from(format!("{}/{}", base_dir_raw, NODE_CONFIG_DIR)); let data_dir = PathBuf::from(format!("{}/{}", base_dir_raw, NODE_DATA_DIR)); let scripts_dir = PathBuf::from(format!("{}/{}", base_dir_raw, NODE_SCRIPTS_DIR)); - self.filesystem.create_dir(&base_dir).await.unwrap(); - self.filesystem.create_dir(&config_dir).await.unwrap(); - self.filesystem.create_dir(&data_dir).await.unwrap(); + self.filesystem.create_dir(&base_dir).await?; + self.filesystem.create_dir(&config_dir).await?; + self.filesystem.create_dir(&data_dir).await?; let (process, stdout_reading_handle, stderr_reading_handle, log_writing_handle) = create_process_with_log_tasks( @@ -324,15 +329,21 @@ impl ProviderNode for NativeNode ) -> Result { let local_script_path = PathBuf::from(&options.local_script_path); - if !local_script_path.try_exists().unwrap() { - return Err(ProviderError::RunCommandError(anyhow!("Test"))); + if !local_script_path + .try_exists() + .map_err(|err| ProviderError::InvalidScriptPath(err.into()))? + { + return Err(ProviderError::ScriptNotFound(local_script_path)); } // extract file name and build remote file path let script_file_name = local_script_path .file_name() .map(|file_name| file_name.to_string_lossy().to_string()) - .ok_or(ProviderError::InvalidScriptPath(options.local_script_path))?; + .ok_or(ProviderError::InvalidScriptPath(anyhow!( + "Can't retrieve filename from script with path: {:?}", + options.local_script_path + )))?; let remote_script_path = format!( "{}/{}", self.scripts_dir.to_string_lossy(), @@ -371,20 +382,20 @@ impl ProviderNode for NativeNode async fn pause(&self) -> Result<(), ProviderError> { let inner = self.inner.write().await; - let raw_pid = inner.process.id().unwrap(); - let pid = Pid::from_raw(raw_pid.try_into().unwrap()); + let pid = retrieve_pid_from_process(&inner.process, &self.name)?; - kill(pid, Signal::SIGSTOP).unwrap(); + kill(pid, Signal::SIGSTOP) + .map_err(|_| ProviderError::PauseNodeFailed(self.name.clone()))?; Ok(()) } async fn resume(&self) -> Result<(), ProviderError> { let inner = self.inner.write().await; - let raw_pid = inner.process.id().unwrap(); - let pid = Pid::from_raw(raw_pid.try_into().unwrap()); + let pid = retrieve_pid_from_process(&inner.process, &self.name)?; - kill(pid, Signal::SIGCONT).unwrap(); + kill(pid, Signal::SIGCONT) + .map_err(|_| ProviderError::ResumeNodeFaied(self.name.clone()))?; Ok(()) } @@ -400,7 +411,11 @@ impl ProviderNode for NativeNode inner.log_writing_handle.abort(); inner.stdout_reading_handle.abort(); inner.stderr_reading_handle.abort(); - inner.process.kill().await.unwrap(); + inner + .process + .kill() + .await + .map_err(|_| ProviderError::KillNodeFailed(self.name.clone()))?; // re-spawn process with tasks for logs let (process, stdout_reading_handle, stderr_reading_handle, log_writing_handle) = @@ -428,7 +443,11 @@ impl ProviderNode for NativeNode inner.log_writing_handle.abort(); inner.stdout_reading_handle.abort(); inner.stderr_reading_handle.abort(); - inner.process.kill().await.unwrap(); + inner + .process + .kill() + .await + .map_err(|_| ProviderError::KillNodeFailed(self.name.clone()))?; if let Some(namespace) = self.namespace.inner.upgrade() { namespace.write().await.nodes.remove(&self.name); @@ -438,6 +457,18 @@ impl ProviderNode for NativeNode } } +fn retrieve_pid_from_process(process: &Child, node_name: &str) -> Result { + Ok(Pid::from_raw( + process + .id() + .ok_or(ProviderError::ProcessIdRetrievalFailed( + node_name.to_string(), + ))? + .try_into() + .map_err(|_| ProviderError::ProcessIdRetrievalFailed(node_name.to_string()))?, + )) +} + fn create_stream_polling_task( stream: impl AsyncRead + Unpin + Send + 'static, tx: Sender, Error>>, @@ -473,7 +504,8 @@ fn create_log_writing_task( loop { sleep(Duration::from_millis(250)).await; while let Some(Ok(data)) = rx.recv().await { - filesystem.append(&log_path, data).await.unwrap(); + // TODO: find a better way instead of ignoring error ? + let _ = filesystem.append(&log_path, data).await; } } }) From b14bfa3acbdf3a06a255cff96703a4a44a60f7ef Mon Sep 17 00:00:00 2001 From: l0r1s Date: Wed, 13 Sep 2023 21:13:49 +0300 Subject: [PATCH 35/69] feat: added logic for injected files in spawn node method --- crates/provider/src/native.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index e6851b4c6..10460df5a 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -31,7 +31,10 @@ use tokio::{ use uuid::Uuid; use crate::{ - shared::constants::{NODE_CONFIG_DIR, NODE_DATA_DIR, NODE_SCRIPTS_DIR}, + shared::{ + constants::{NODE_CONFIG_DIR, NODE_DATA_DIR, NODE_SCRIPTS_DIR}, + types::TransferedFile, + }, DynNamespace, DynNode, ExecutionResult, GenerateFileCommand, GenerateFilesOptions, Provider, ProviderCapabilities, ProviderError, ProviderNamespace, ProviderNode, RunCommandOptions, RunScriptOptions, SpawnNodeOptions, @@ -149,6 +152,20 @@ impl ProviderNamespace for Nativ self.filesystem.create_dir(&config_dir).await?; self.filesystem.create_dir(&data_dir).await?; + // copy injected files + for file in options.injected_files { + self.filesystem + .copy( + file.local_path, + format!( + "{}/{}", + self.base_dir.to_string_lossy(), + file.remote_path.to_string_lossy() + ), + ) + .await?; + } + let (process, stdout_reading_handle, stderr_reading_handle, log_writing_handle) = create_process_with_log_tasks( &options.name, From 469c3000e5a1927baacbe5b54ba3ae42e4e43d7c Mon Sep 17 00:00:00 2001 From: l0r1s Date: Wed, 13 Sep 2023 21:16:56 +0300 Subject: [PATCH 36/69] chore: cargo fmt --- crates/provider/src/lib.rs | 3 +-- crates/support/src/fs/in_memory.rs | 6 ++++-- crates/support/src/fs/local.rs | 5 +++-- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs index d8adf2fd0..822c2c766 100644 --- a/crates/provider/src/lib.rs +++ b/crates/provider/src/lib.rs @@ -8,11 +8,10 @@ use shared::types::{ GenerateFileCommand, GenerateFilesOptions, ProviderCapabilities, RunCommandOptions, RunScriptOptions, SpawnNodeOptions, }; +use support::fs::FileSystemError; use crate::shared::types::Port; -use support::fs::FileSystemError; - #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum ProviderError { diff --git a/crates/support/src/fs/in_memory.rs b/crates/support/src/fs/in_memory.rs index a0fd21915..efb042223 100644 --- a/crates/support/src/fs/in_memory.rs +++ b/crates/support/src/fs/in_memory.rs @@ -1,10 +1,11 @@ use std::{collections::HashMap, ffi::OsString, path::Path, sync::Arc}; -use super::{FileSystem, FileSystemResult}; use anyhow::anyhow; use async_trait::async_trait; use tokio::sync::RwLock; +use super::{FileSystem, FileSystemResult}; + #[derive(Debug, Clone)] pub enum InMemoryFile { File { mode: u32, contents: Vec }, @@ -197,9 +198,10 @@ impl FileSystem for InMemoryFileSystem { #[cfg(test)] mod tests { - use super::*; use std::str::FromStr; + use super::*; + #[tokio::test] async fn create_dir_should_create_a_directory_at_root() { let fs = InMemoryFileSystem::new(HashMap::from([( diff --git a/crates/support/src/fs/local.rs b/crates/support/src/fs/local.rs index a8519cc66..932933fb0 100644 --- a/crates/support/src/fs/local.rs +++ b/crates/support/src/fs/local.rs @@ -1,7 +1,7 @@ use std::{os::unix::fs::PermissionsExt, path::Path}; -use tokio::io::AsyncWriteExt; use async_trait::async_trait; +use tokio::io::AsyncWriteExt; use uuid::Uuid; use super::{FileSystem, FileSystemError, FileSystemResult}; @@ -91,9 +91,10 @@ impl FileSystem for LocalFileSystem { #[cfg(test)] mod tests { - use super::*; use uuid::Uuid; + use super::*; + const FILE_BITS: u32 = 0o100000; const DIR_BITS: u32 = 0o40000; From c8cadd715f74251bffaff722dd19379399f3404f Mon Sep 17 00:00:00 2001 From: l0r1s Date: Wed, 13 Sep 2023 21:24:29 +0300 Subject: [PATCH 37/69] chore: fixed clippy warnings --- crates/provider/src/native.rs | 20 +++++--------------- crates/provider/src/shared/types.rs | 2 +- crates/support/src/fs/in_memory.rs | 10 ++++------ 3 files changed, 10 insertions(+), 22 deletions(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 10460df5a..54bd588a7 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -13,7 +13,6 @@ use anyhow::anyhow; use async_trait::async_trait; use configuration::types::Port; use nix::{ - libc::pid_t, sys::signal::{kill, Signal}, unistd::Pid, }; @@ -31,10 +30,7 @@ use tokio::{ use uuid::Uuid; use crate::{ - shared::{ - constants::{NODE_CONFIG_DIR, NODE_DATA_DIR, NODE_SCRIPTS_DIR}, - types::TransferedFile, - }, + shared::constants::{NODE_CONFIG_DIR, NODE_DATA_DIR, NODE_SCRIPTS_DIR}, DynNamespace, DynNode, ExecutionResult, GenerateFileCommand, GenerateFilesOptions, Provider, ProviderCapabilities, ProviderError, ProviderNamespace, ProviderNode, RunCommandOptions, RunScriptOptions, SpawnNodeOptions, @@ -253,15 +249,7 @@ impl ProviderNamespace for Nativ async fn destroy(&self) -> Result<(), ProviderError> { // we need to clone nodes (behind an Arc, so cheaply) to avoid deadlock between the inner.write lock and the node.destroy // method acquiring a lock the namespace to remove the node from the nodes hashmap. - let nodes = self - .inner - .write() - .await - .nodes - .iter() - .map(|(_, node)| node.clone()) - .collect::>>(); - + let nodes: Vec> = self.inner.write().await.nodes.values().cloned().collect(); for node in nodes.iter() { node.destroy().await?; } @@ -528,6 +516,8 @@ fn create_log_writing_task( }) } +type CreateProcessOutput = (Child, JoinHandle<()>, JoinHandle<()>, JoinHandle<()>); + fn create_process_with_log_tasks( name: &str, command: &str, @@ -535,7 +525,7 @@ fn create_process_with_log_tasks( env: &Vec<(String, String)>, log_path: &PathBuf, filesystem: impl FileSystem + Send + Sync + 'static, -) -> Result<(Child, JoinHandle<()>, JoinHandle<()>, JoinHandle<()>), ProviderError> { +) -> Result { // create process let mut process = Command::new(command) .args(args) diff --git a/crates/provider/src/shared/types.rs b/crates/provider/src/shared/types.rs index ba2487c5e..921d678c0 100644 --- a/crates/provider/src/shared/types.rs +++ b/crates/provider/src/shared/types.rs @@ -116,7 +116,7 @@ impl GenerateFilesOptions { I: IntoIterator, { Self { - commands: vec![], + commands: commands.into_iter().collect(), injected_files: vec![], } } diff --git a/crates/support/src/fs/in_memory.rs b/crates/support/src/fs/in_memory.rs index efb042223..6c6397e01 100644 --- a/crates/support/src/fs/in_memory.rs +++ b/crates/support/src/fs/in_memory.rs @@ -54,8 +54,7 @@ impl FileSystem for InMemoryFileSystem { None => {}, }; - let mut ancestors = path.ancestors().skip(1); - while let Some(path) = ancestors.next() { + for path in path.ancestors().skip(1) { match self.files.read().await.get(path.as_os_str()) { Some(InMemoryFile::Directory { .. }) => continue, Some(InMemoryFile::File { .. }) => Err(anyhow!( @@ -77,14 +76,14 @@ impl FileSystem for InMemoryFileSystem { async fn create_dir_all(&self, path: impl AsRef + Send) -> FileSystemResult<()> { let path = path.as_ref(); let mut files = self.files.write().await; - let mut ancestors = path + let ancestors = path .ancestors() .collect::>() .into_iter() .rev() .skip(1); - while let Some(path) = ancestors.next() { + for path in ancestors { match files.get(path.as_os_str()) { Some(InMemoryFile::Directory { .. }) => continue, Some(InMemoryFile::File { .. }) => Err(anyhow!( @@ -127,8 +126,7 @@ impl FileSystem for InMemoryFileSystem { let os_path = path.as_os_str(); let mut files = self.files.write().await; - let mut ancestors = path.ancestors().skip(1); - while let Some(path) = ancestors.next() { + for path in path.ancestors().skip(1) { match files.get(path.as_os_str()) { Some(InMemoryFile::Directory { .. }) => continue, Some(InMemoryFile::File { .. }) => Err(anyhow!( From 8cadb38e82b7ef3d27506ad99d6a0b434305a212 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 18 Sep 2023 15:51:52 +0300 Subject: [PATCH 38/69] feat: added new helpers to create InMemoryFile for InMemoryFileSystem --- crates/support/src/fs/in_memory.rs | 59 ++++++++++++++++++------------ 1 file changed, 35 insertions(+), 24 deletions(-) diff --git a/crates/support/src/fs/in_memory.rs b/crates/support/src/fs/in_memory.rs index 6c6397e01..37bad21f5 100644 --- a/crates/support/src/fs/in_memory.rs +++ b/crates/support/src/fs/in_memory.rs @@ -13,13 +13,27 @@ pub enum InMemoryFile { } impl InMemoryFile { - pub fn file(contents: Vec) -> Self { + pub fn file(contents: C) -> Self + where + C: AsRef, + { + Self::file_raw(contents.as_ref()) + } + + pub fn file_raw(contents: C) -> Self + where + C: AsRef<[u8]>, + { Self::File { mode: 0o664, - contents, + contents: contents.as_ref().to_vec(), } } + pub fn empty() -> Self { + Self::file_raw(vec![]) + } + pub fn dir() -> Self { Self::Directory { mode: 0o775 } } @@ -141,10 +155,7 @@ impl FileSystem for InMemoryFileSystem { return Err(anyhow!("file {:?} is a directory", os_path).into()); } - files.insert( - os_path.to_owned(), - InMemoryFile::file(contents.as_ref().to_vec()), - ); + files.insert(os_path.to_owned(), InMemoryFile::file_raw(contents)); Ok(()) } @@ -239,7 +250,7 @@ mod tests { (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/dir").unwrap(), - InMemoryFile::file(vec![]), + InMemoryFile::empty(), ), ])); @@ -294,7 +305,7 @@ mod tests { (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/path").unwrap(), - InMemoryFile::file(vec![]), + InMemoryFile::empty(), ), (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), ( @@ -389,7 +400,7 @@ mod tests { (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/path").unwrap(), - InMemoryFile::file(vec![]), + InMemoryFile::empty(), ), (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), ])); @@ -404,7 +415,7 @@ mod tests { async fn read_should_return_the_file_content() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/myfile").unwrap(), - InMemoryFile::file("content".as_bytes().to_vec()), + InMemoryFile::file("content"), )])); let content = fs.read("/myfile").await.unwrap(); @@ -437,7 +448,7 @@ mod tests { async fn read_to_string_should_return_the_file_content_as_a_string() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/myfile").unwrap(), - InMemoryFile::file("content".as_bytes().to_vec()), + InMemoryFile::file("content"), )])); let content = fs.read_to_string("/myfile").await.unwrap(); @@ -470,7 +481,7 @@ mod tests { async fn read_to_string_should_return_an_error_if_file_isnt_utf8_encoded() { let fs = InMemoryFileSystem::new(HashMap::from([( OsString::from_str("/myfile").unwrap(), - InMemoryFile::file(vec![0xC3, 0x28]), + InMemoryFile::file_raw(vec![0xC3, 0x28]), )])); let err = fs.read_to_string("/myfile").await.unwrap_err(); @@ -506,7 +517,7 @@ mod tests { (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/myfile").unwrap(), - InMemoryFile::file("my file content".as_bytes().to_vec()), + InMemoryFile::file("my file content"), ), ])); @@ -557,7 +568,7 @@ mod tests { (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/path").unwrap(), - InMemoryFile::file(vec![]), + InMemoryFile::empty(), ), (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), ])); @@ -577,7 +588,7 @@ mod tests { (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/myfile").unwrap(), - InMemoryFile::file("my file content".as_bytes().to_vec()), + InMemoryFile::file("my file content"), ), ])); @@ -648,7 +659,7 @@ mod tests { (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/path").unwrap(), - InMemoryFile::file(vec![]), + InMemoryFile::empty(), ), (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), ])); @@ -668,7 +679,7 @@ mod tests { (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/myfile").unwrap(), - InMemoryFile::file("my file content".as_bytes().to_vec()), + InMemoryFile::file("my file content"), ), ])); @@ -686,11 +697,11 @@ mod tests { (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/myfile").unwrap(), - InMemoryFile::file("my new file content".as_bytes().to_vec()), + InMemoryFile::file("my new file content"), ), ( OsString::from_str("/myfilecopy").unwrap(), - InMemoryFile::file("my file content".as_bytes().to_vec()), + InMemoryFile::file("my file content"), ), ])); @@ -732,7 +743,7 @@ mod tests { (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/myfile").unwrap(), - InMemoryFile::file("my file content".as_bytes().to_vec()), + InMemoryFile::file("my file content"), ), ( OsString::from_str("/myfilecopy").unwrap(), @@ -752,7 +763,7 @@ mod tests { (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/myfile").unwrap(), - InMemoryFile::file("my file content".as_bytes().to_vec()), + InMemoryFile::file("my file content"), ), ])); @@ -769,11 +780,11 @@ mod tests { (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/myfile").unwrap(), - InMemoryFile::file("my file content".as_bytes().to_vec()), + InMemoryFile::file("my file content"), ), ( OsString::from_str("/mypath").unwrap(), - InMemoryFile::file(vec![]), + InMemoryFile::empty(), ), ])); @@ -789,7 +800,7 @@ mod tests { (OsString::from_str("/").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/myfile").unwrap(), - InMemoryFile::file("my file content".as_bytes().to_vec()), + InMemoryFile::file("my file content"), ), ])); assert!( From f9a15b83016dc3637a1097c7caa20edf747318e3 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 18 Sep 2023 15:53:14 +0300 Subject: [PATCH 39/69] feat: updated helpers types visibility, derived traits and methods to add injected files --- crates/provider/src/shared/types.rs | 44 +++++++++++++++++------------ 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/crates/provider/src/shared/types.rs b/crates/provider/src/shared/types.rs index 921d678c0..1219b20ec 100644 --- a/crates/provider/src/shared/types.rs +++ b/crates/provider/src/shared/types.rs @@ -2,17 +2,17 @@ use std::path::{Path, PathBuf}; pub type Port = u16; -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, PartialEq)] pub struct ProviderCapabilities { pub requires_image: bool, } impl ProviderCapabilities { - fn new() -> Self { + pub fn new() -> Self { Self::default() } - fn requires_image(mut self) -> Self { + pub fn requires_image(mut self) -> Self { self.requires_image = true; self } @@ -27,7 +27,7 @@ pub struct SpawnNodeOptions { } impl SpawnNodeOptions { - fn new(name: S, command: S) -> Self + pub fn new(name: S, command: S) -> Self where S: AsRef, { @@ -40,7 +40,7 @@ impl SpawnNodeOptions { } } - fn args(mut self, args: I) -> Self + pub fn args(mut self, args: I) -> Self where S: AsRef, I: IntoIterator, @@ -49,7 +49,7 @@ impl SpawnNodeOptions { self } - fn env(mut self, env: I) -> Self + pub fn env(mut self, env: I) -> Self where S: AsRef, I: IntoIterator, @@ -60,6 +60,14 @@ impl SpawnNodeOptions { .collect(); self } + + pub fn injected_files(mut self, injected_files: I) -> Self + where + I: IntoIterator, + { + self.injected_files = injected_files.into_iter().collect(); + self + } } pub struct GenerateFileCommand { @@ -70,7 +78,7 @@ pub struct GenerateFileCommand { } impl GenerateFileCommand { - fn new(command: S, local_output_path: P) -> Self + pub fn new(command: S, local_output_path: P) -> Self where S: AsRef, P: AsRef, @@ -83,7 +91,7 @@ impl GenerateFileCommand { } } - fn args(mut self, args: I) -> Self + pub fn args(mut self, args: I) -> Self where S: AsRef, I: IntoIterator, @@ -92,7 +100,7 @@ impl GenerateFileCommand { self } - fn env(mut self, env: I) -> Self + pub fn env(mut self, env: I) -> Self where S: AsRef, I: IntoIterator, @@ -111,7 +119,7 @@ pub struct GenerateFilesOptions { } impl GenerateFilesOptions { - fn new(commands: I) -> Self + pub fn new(commands: I) -> Self where I: IntoIterator, { @@ -121,7 +129,7 @@ impl GenerateFilesOptions { } } - fn injected_files(mut self, injected_files: I) -> Self + pub fn injected_files(mut self, injected_files: I) -> Self where I: IntoIterator, { @@ -137,7 +145,7 @@ pub struct RunCommandOptions { } impl RunCommandOptions { - fn new(command: S) -> Self + pub fn new(command: S) -> Self where S: AsRef, { @@ -148,7 +156,7 @@ impl RunCommandOptions { } } - fn args(mut self, args: I) -> Self + pub fn args(mut self, args: I) -> Self where S: AsRef, I: IntoIterator, @@ -157,7 +165,7 @@ impl RunCommandOptions { self } - fn env(mut self, env: I) -> Self + pub fn env(mut self, env: I) -> Self where S: AsRef, I: IntoIterator, @@ -177,7 +185,7 @@ pub struct RunScriptOptions { } impl RunScriptOptions { - fn new

(local_script_path: P) -> Self + pub fn new

(local_script_path: P) -> Self where P: AsRef, { @@ -188,7 +196,7 @@ impl RunScriptOptions { } } - fn args(mut self, args: I) -> Self + pub fn args(mut self, args: I) -> Self where S: AsRef, I: IntoIterator, @@ -197,7 +205,7 @@ impl RunScriptOptions { self } - fn env(mut self, env: I) -> Self + pub fn env(mut self, env: I) -> Self where S: AsRef, I: IntoIterator, @@ -216,7 +224,7 @@ pub struct TransferedFile { } impl TransferedFile { - fn new

(local_path: P, remote_path: P) -> Self + pub fn new

(local_path: P, remote_path: P) -> Self where P: AsRef, { From 2872e4f431abecf5e690f3b213237c1a363ae0a4 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 18 Sep 2023 19:42:37 +0300 Subject: [PATCH 40/69] feat: added new methods on Provider traits to retrieve node/namespace dirs/log paths, and added methods to retrieve all nodes/namespaces --- crates/provider/src/lib.rs | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs index 822c2c766..3646ef9ce 100644 --- a/crates/provider/src/lib.rs +++ b/crates/provider/src/lib.rs @@ -1,7 +1,7 @@ -mod native; -mod shared; +pub mod native; +pub mod shared; -use std::{net::IpAddr, path::PathBuf, process::ExitStatus, sync::Arc, time::Duration}; +use std::{net::IpAddr, path::PathBuf, process::ExitStatus, sync::Arc, time::Duration, collections::HashMap}; use async_trait::async_trait; use shared::types::{ @@ -51,7 +51,9 @@ pub enum ProviderError { #[async_trait] pub trait Provider { - fn capabilities(&self) -> ProviderCapabilities; + fn capabilities(&self) -> &ProviderCapabilities; + + async fn namespaces(&self) -> HashMap; async fn create_namespace(&self) -> Result; } @@ -60,7 +62,11 @@ pub type DynProvider = Arc; #[async_trait] pub trait ProviderNamespace { - fn id(&self) -> String; + fn id(&self) -> &str; + + fn base_dir(&self) -> &PathBuf; + + async fn nodes(&self) -> HashMap; async fn spawn_node(&self, options: SpawnNodeOptions) -> Result; @@ -77,7 +83,17 @@ type ExecutionResult = Result; #[async_trait] pub trait ProviderNode { - fn name(&self) -> String; + fn name(&self) -> &str; + + fn base_dir(&self) -> &PathBuf; + + fn config_dir(&self) -> &PathBuf; + + fn data_dir(&self) -> &PathBuf; + + fn scripts_dir(&self) -> &PathBuf; + + fn log_path(&self) -> &PathBuf; async fn endpoint(&self) -> Result<(IpAddr, Port), ProviderError>; From 3b1159e34cb889bf18de7b09174ccdf6642b76d8 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 18 Sep 2023 19:43:20 +0300 Subject: [PATCH 41/69] feat: added new dummy_node executable script writing echo to stdout for testing --- crates/provider/testing/dummy_node | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100755 crates/provider/testing/dummy_node diff --git a/crates/provider/testing/dummy_node b/crates/provider/testing/dummy_node new file mode 100755 index 000000000..b8492b87a --- /dev/null +++ b/crates/provider/testing/dummy_node @@ -0,0 +1,7 @@ +#!/bin/sh + +echo "Line 1" +sleep 1 +echo "Line 2" +sleep 1 +echo "Line 3" \ No newline at end of file From f40b26766f51e0338697b59f75050375d3a234ec Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 18 Sep 2023 19:44:20 +0300 Subject: [PATCH 42/69] feat: derived PartialEq on InMemoryFile --- crates/support/src/fs/in_memory.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/support/src/fs/in_memory.rs b/crates/support/src/fs/in_memory.rs index 37bad21f5..aa5c6a0e8 100644 --- a/crates/support/src/fs/in_memory.rs +++ b/crates/support/src/fs/in_memory.rs @@ -6,7 +6,7 @@ use tokio::sync::RwLock; use super::{FileSystem, FileSystemResult}; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub enum InMemoryFile { File { mode: u32, contents: Vec }, Directory { mode: u32 }, From 1f15890dd9bd1c2c5ede08baa7a54cb6308be750 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 18 Sep 2023 19:44:58 +0300 Subject: [PATCH 43/69] feat: added procfs dependency as dev dependency to provider to facilitate assertion on processes --- Cargo.toml | 3 ++- crates/provider/Cargo.toml | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index ee1a26b97..5d53e6cd9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,4 +24,5 @@ lazy_static = "1.4" multiaddr = "0.18" url = "2.3" uuid = "1.4" -nix = "0.27" \ No newline at end of file +nix = "0.27" +procfs = "0.15" diff --git a/crates/provider/Cargo.toml b/crates/provider/Cargo.toml index 895a93a00..7001258ed 100644 --- a/crates/provider/Cargo.toml +++ b/crates/provider/Cargo.toml @@ -25,3 +25,6 @@ thiserror = { workspace = true } anyhow = { workspace = true } uuid = { workspace = true, features = ["v4"] } nix = { workspace = true, features = ["signal"] } + +[dev-dependencies] +procfs = { workspace = true } From 3d316e7cb5c0547f1ed8cf021525843daa3da26f Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 18 Sep 2023 19:47:02 +0300 Subject: [PATCH 44/69] feat: added cfg_dir and data_dir to NativeNode struct, added paths getters and all nodes/namespaces methods --- crates/provider/src/native.rs | 79 +++++++++++++++++++++++++++-------- 1 file changed, 62 insertions(+), 17 deletions(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 54bd588a7..f2afaa4d0 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -57,9 +57,7 @@ struct WeakNativeProvider { impl NativeProvider { pub fn new(filesystem: FS) -> Self { NativeProvider { - capabilities: ProviderCapabilities { - requires_image: false, - }, + capabilities: ProviderCapabilities::new(), tmp_dir: std::env::temp_dir(), filesystem, inner: Arc::new(RwLock::new(NativeProviderInner { @@ -76,8 +74,19 @@ impl NativeProvider { #[async_trait] impl Provider for NativeProvider { - fn capabilities(&self) -> ProviderCapabilities { - self.capabilities.clone() + fn capabilities(&self) -> &ProviderCapabilities { + &self.capabilities + } + + async fn namespaces(&self) -> HashMap { + self.inner + .read() + .await + .namespaces + .clone() + .into_iter() + .map(|(id, namespace)| (id, Arc::new(namespace) as DynNamespace)) + .collect() } async fn create_namespace(&self) -> Result { @@ -126,8 +135,23 @@ struct WeakNativeNamespace { #[async_trait] impl ProviderNamespace for NativeNamespace { - fn id(&self) -> String { - self.id.clone() + fn id(&self) -> &str { + &self.id + } + + fn base_dir(&self) -> &PathBuf { + &self.base_dir + } + + async fn nodes(&self) -> HashMap { + self.inner + .read() + .await + .nodes + .clone() + .into_iter() + .map(|(id, node)| (id, Arc::new(node) as DynNode)) + .collect() } async fn spawn_node(&self, options: SpawnNodeOptions) -> Result { @@ -141,23 +165,20 @@ impl ProviderNamespace for Nativ let base_dir_raw = format!("{}/{}", &self.base_dir.to_string_lossy(), &options.name); let base_dir = PathBuf::from(&base_dir_raw); let log_path = PathBuf::from(format!("{}/{}.log", base_dir_raw, &options.name)); - let config_dir = PathBuf::from(format!("{}/{}", base_dir_raw, NODE_CONFIG_DIR)); - let data_dir = PathBuf::from(format!("{}/{}", base_dir_raw, NODE_DATA_DIR)); - let scripts_dir = PathBuf::from(format!("{}/{}", base_dir_raw, NODE_SCRIPTS_DIR)); + let config_dir = PathBuf::from(format!("{}{}", base_dir_raw, NODE_CONFIG_DIR)); + let data_dir = PathBuf::from(format!("{}{}", base_dir_raw, NODE_DATA_DIR)); + let scripts_dir = PathBuf::from(format!("{}{}", base_dir_raw, NODE_SCRIPTS_DIR)); self.filesystem.create_dir(&base_dir).await?; self.filesystem.create_dir(&config_dir).await?; self.filesystem.create_dir(&data_dir).await?; + self.filesystem.create_dir(&scripts_dir).await?; // copy injected files for file in options.injected_files { self.filesystem .copy( file.local_path, - format!( - "{}/{}", - self.base_dir.to_string_lossy(), - file.remote_path.to_string_lossy() - ), + format!("{}{}", base_dir_raw, file.remote_path.to_string_lossy()), ) .await?; } @@ -179,6 +200,8 @@ impl ProviderNamespace for Nativ args: options.args, env: options.env, base_dir, + config_dir, + data_dir, scripts_dir, log_path, filesystem: self.filesystem.clone(), @@ -270,6 +293,8 @@ struct NativeNode { args: Vec, env: Vec<(String, String)>, base_dir: PathBuf, + config_dir: PathBuf, + data_dir: PathBuf, scripts_dir: PathBuf, log_path: PathBuf, inner: Arc>, @@ -287,8 +312,28 @@ struct NativeNodeInner { #[async_trait] impl ProviderNode for NativeNode { - fn name(&self) -> String { - self.name.clone() + fn name(&self) -> &str { + &self.name + } + + fn base_dir(&self) -> &PathBuf { + &self.base_dir + } + + fn config_dir(&self) -> &PathBuf { + &self.config_dir + } + + fn data_dir(&self) -> &PathBuf { + &self.data_dir + } + + fn scripts_dir(&self) -> &PathBuf { + &self.scripts_dir + } + + fn log_path(&self) -> &PathBuf { + &self.log_path } async fn endpoint(&self) -> Result<(IpAddr, Port), ProviderError> { From 40a53a1d010b4bc16fbfa6dc0dec4d6af139f155 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 18 Sep 2023 19:47:55 +0300 Subject: [PATCH 45/69] feat: added side effects tests for native spawn_node happy path --- crates/provider/src/native.rs | 394 ++++++++++++++++++++-------------- 1 file changed, 233 insertions(+), 161 deletions(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index f2afaa4d0..9da7500b3 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -603,25 +603,30 @@ fn create_process_with_log_tasks( mod tests { use std::{ffi::OsString, str::FromStr}; - use support::fs::{ - in_memory::{InMemoryFile, InMemoryFileSystem}, - local::LocalFileSystem, - }; + use procfs::process::Process; + use support::fs::in_memory::{InMemoryFile, InMemoryFileSystem}; + + use crate::shared::types::TransferedFile; use super::*; #[test] - fn it_should_possible_to_retrieve_capabilities() { + fn provider_capabilities_method_should_return_provider_capabilities() { let fs = InMemoryFileSystem::default(); let provider = NativeProvider::new(fs); let capabilities = provider.capabilities(); - assert_eq!(capabilities.requires_image, false); + assert_eq!( + capabilities, + &ProviderCapabilities { + requires_image: false + } + ); } #[tokio::test] - async fn it_should_be_possible_to_create_a_new_namespace() { + async fn provider_create_namespace_method_should_create_a_new_namespace_and_returns_it() { let fs = InMemoryFileSystem::new(HashMap::from([ (OsString::from_str("/").unwrap(), InMemoryFile::dir()), (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), @@ -630,172 +635,239 @@ mod tests { let namespace = provider.create_namespace().await.unwrap(); - println!("{:?}", fs.files.read().await); + assert!(fs + .files + .read() + .await + .contains_key(namespace.base_dir().as_os_str())); + assert_eq!(provider.namespaces().await.len(), 1); + assert!(provider.namespaces().await.get(namespace.id()).is_some()); } #[tokio::test] - async fn it_works() { - let fs = LocalFileSystem::default(); - let provider = NativeProvider::new(fs); + async fn provider_namespaces_method_should_return_empty_namespaces_map_if_the_provider_has_no_namespaces( + ) { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ])); + let provider = NativeProvider::new(fs.clone()); + + assert_eq!(provider.namespaces().await.len(), 0); + } + + #[tokio::test] + async fn provider_namespaces_method_should_return_filled_namespaces_map_if_the_provider_has_one_namespace( + ) { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ])); + let provider = NativeProvider::new(fs.clone()); let namespace = provider.create_namespace().await.unwrap(); - namespace - .generate_files(GenerateFilesOptions { - commands: vec![GenerateFileCommand { - command: "/home/user/.bin/polkadot".to_string(), - args: vec![ - "build-spec".to_string(), - "--chain=rococo-local".to_string(), - "--disable-default-bootnode".to_string(), - ], - env: vec![], - local_output_path: "rococo-local-plain.json".into(), - }], - injected_files: vec![], - }) - .await - .unwrap(); + assert_eq!(provider.namespaces().await.len(), 1); + assert!(provider.namespaces().await.get(namespace.id()).is_some()); + } - // let node = namespace - // .spawn_node(SpawnNodeOptions { - // name: "node1".to_string(), - // command: "/home/user/.bin/polkadot".to_string(), - // args: vec![], - // env: vec![], - // injected_files: vec![], - // }) - // .await - // .unwrap(); + #[tokio::test] + async fn provider_namespaces_method_should_return_filled_namespaces_map_if_the_provider_has_two_namespaces( + ) { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ])); + let provider = NativeProvider::new(fs.clone()); - // sleep(Duration::from_secs(10)).await; + let namespace1 = provider.create_namespace().await.unwrap(); + let namespace2 = provider.create_namespace().await.unwrap(); - // node.pause().await.unwrap(); + assert_eq!(provider.namespaces().await.len(), 2); + assert!(provider.namespaces().await.get(namespace1.id()).is_some()); + assert!(provider.namespaces().await.get(namespace2.id()).is_some()); + } - // sleep(Duration::from_secs(10)).await; + #[tokio::test] + async fn namespace_spawn_node_method_should_creates_a_new_node_correctly() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ( + OsString::from_str("/file1").unwrap(), + InMemoryFile::file("My file 1"), + ), + ( + OsString::from_str("/file2").unwrap(), + InMemoryFile::file("My file 2"), + ), + ])); + let provider = NativeProvider::new(fs.clone()); + let namespace = provider.create_namespace().await.unwrap(); - // node.resume().await.unwrap(); + let node = namespace + .spawn_node( + SpawnNodeOptions::new( + "mynode", + "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + ) + .args(vec![ + "-flag1", + "--flag2", + "--option1=value1", + "-option2=value2", + "--option3 value3", + "-option4 value4", + ]) + .env(vec![ + ("MY_VAR_1", "MY_VALUE_1"), + ("MY_VAR_2", "MY_VALUE_2"), + ("MY_VAR_3", "MY_VALUE_3"), + ]) + .injected_files(vec![ + TransferedFile::new("/file1", "/cfg/file1"), + TransferedFile::new("/file2", "/data/file2"), + ]), + ) + .await + .unwrap(); - // node.restart(Some(Duration::from_secs(10))).await.unwrap(); + // ensure node directories are created + assert!(fs + .files + .read() + .await + .contains_key(node.base_dir().as_os_str())); + assert!(fs + .files + .read() + .await + .contains_key(node.config_dir().as_os_str())); + assert!(fs + .files + .read() + .await + .contains_key(node.data_dir().as_os_str())); + assert!(fs + .files + .read() + .await + .contains_key(node.scripts_dir().as_os_str())); - // sleep(Duration::from_secs(10)).await; + // ensure injected files are presents + assert!(matches!( + fs.files + .read() + .await + .get( + &OsString::from_str(&format!("{}/file1", node.config_dir().to_string_lossy())) + .unwrap() + ) + .unwrap(), + InMemoryFile::File { contents, .. } if contents == "My file 1".as_bytes() + )); + assert!(matches!( + fs.files + .read() + .await + .get( + &OsString::from_str(&format!("{}/file2", node.data_dir().to_string_lossy())) + .unwrap() + ) + .unwrap(), + InMemoryFile::File { contents, .. } if contents == "My file 2".as_bytes() + )); + + // retrieve running process + let processes = procfs::process::all_processes() + .unwrap() + .filter_map(|process| { + if let Ok(process) = process { + process + .cmdline() + .iter() + .any(|args| args.iter().any(|arg| arg.contains("dummy_node"))) + .then(|| process) + } else { + None + } + }) + .collect::>(); + assert_eq!(processes.len(), 1); + let node_process = processes.first().unwrap(); + + // ensure process has correct state + assert!(matches!( + node_process.stat().unwrap().state().unwrap(), + // pocess can be running or sleeping because we sleep between echo calls + procfs::process::ProcState::Running | procfs::process::ProcState::Sleeping + )); + + // ensure process is passed correct args + let node_args = node_process.cmdline().unwrap(); + assert!(node_args.contains(&"-flag1".to_string())); + assert!(node_args.contains(&"--flag2".to_string())); + assert!(node_args.contains(&"--option1=value1".to_string())); + assert!(node_args.contains(&"-option2=value2".to_string())); + assert!(node_args.contains(&"--option3 value3".to_string())); + assert!(node_args.contains(&"-option4 value4".to_string())); + + // ensure process has correct environment + let node_env = node_process.environ().unwrap(); + assert_eq!( + node_env + .get(&OsString::from_str("MY_VAR_1").unwrap()) + .unwrap(), + "MY_VALUE_1" + ); + assert_eq!( + node_env + .get(&OsString::from_str("MY_VAR_2").unwrap()) + .unwrap(), + "MY_VALUE_2" + ); + assert_eq!( + node_env + .get(&OsString::from_str("MY_VAR_3").unwrap()) + .unwrap(), + "MY_VALUE_3" + ); + + // ensure log file is created and logs are written 0.5s after process start + sleep(Duration::from_millis(500)).await; + assert!(matches!( + fs.files + .read() + .await + .get(node.log_path().as_os_str()) + .unwrap(), + InMemoryFile::File { contents, .. } if contents == "Line 1\n".as_bytes() + )); + + // ensure logs are updated when process continue running 1.5 sec after process start + sleep(Duration::from_millis(1000)).await; + assert!(matches!( + fs.files + .read() + .await + .get(node.log_path().as_os_str()) + .unwrap(), + InMemoryFile::File { contents, .. } if contents == "Line 1\nLine 2\n".as_bytes() + )); + + // ensure logs are updated when process continue running 2.5 sec after process start + sleep(Duration::from_millis(1000)).await; + assert!(matches!( + fs.files + .read() + .await + .get(node.log_path().as_os_str()) + .unwrap(), + InMemoryFile::File { contents, .. } if contents == "Line 1\nLine 2\nLine 3\n".as_bytes() + )); + + // ensure node is present in namespace + assert_eq!(namespace.nodes().await.len(), 1); + assert!(namespace.nodes().await.get(node.name()).is_some()); } } - -// #[cfg(test)] -// mod tests { -// use std::{os::unix::process::ExitStatusExt, process::ExitStatus}; - -// use support::fs::mock::{MockError, MockFilesystem, Operation}; - -// use super::*; - -// #[test] -// fn new_native_provider() { -// let native_provider: NativeProvider = -// NativeProvider::new("something", "/tmp", MockFilesystem::new()); - -// assert_eq!(native_provider.namespace, "something"); -// assert_eq!(native_provider.tmp_dir, "/tmp"); -// assert_eq!(native_provider.command, "bash"); -// assert_eq!(native_provider.remote_dir, "/tmp/cfg"); -// assert_eq!(native_provider.data_dir, "/tmp/data"); -// } - -// #[tokio::test] -// async fn test_fielsystem_usage() { -// let mut native_provider: NativeProvider = -// NativeProvider::new("something", "/tmp", MockFilesystem::new()); - -// native_provider.create_namespace().await.unwrap(); - -// assert!(native_provider.filesystem.operations.len() == 1); - -// assert_eq!( -// native_provider.filesystem.operations[0], -// Operation::CreateDir { -// path: "/tmp/cfg".into(), -// } -// ); -// } - -// #[tokio::test] -// #[should_panic(expected = "FSError(OpError(\"create\"))")] -// async fn test_fielsystem_usage_fails() { -// let mut native_provider: NativeProvider = NativeProvider::new( -// "something", -// "/tmp", -// MockFilesystem::with_create_dir_error(MockError::OpError("create".into())), -// ); - -// native_provider.create_namespace().await.unwrap(); -// } - -// #[tokio::test] -// async fn test_get_node_ip() { -// let native_provider: NativeProvider = -// NativeProvider::new("something", "/tmp", MockFilesystem::new()); - -// assert_eq!( -// native_provider.get_node_ip("some").await.unwrap(), -// LOCALHOST -// ); -// } - -// #[tokio::test] -// async fn test_run_command_when_bash_is_removed() { -// let native_provider: NativeProvider = -// NativeProvider::new("something", "/tmp", MockFilesystem::new()); - -// let result: RunCommandResponse = native_provider -// .run_command( -// vec!["bash".into(), "ls".into()], -// NativeRunCommandOptions::default(), -// ) -// .await -// .unwrap(); - -// assert_eq!( -// result, -// RunCommandResponse { -// exit_code: ExitStatus::from_raw(0), -// std_out: "Cargo.toml\nsrc\n".into(), -// std_err: None, -// } -// ); -// } - -// #[tokio::test] -// async fn test_run_command_when_dash_c_is_provided() { -// let native_provider = NativeProvider::new("something", "/tmp", MockFilesystem::new()); - -// let result = native_provider.run_command( -// vec!["-c".into(), "ls".into()], -// NativeRunCommandOptions::default(), -// ); - -// let a = result.await; -// assert!(a.is_ok()); -// } - -// #[tokio::test] -// async fn test_run_command_when_error_return_error() { -// let native_provider = NativeProvider::new("something", "/tmp", MockFilesystem::new()); - -// let mut some = native_provider.run_command( -// vec!["ls".into(), "ls".into()], -// NativeRunCommandOptions::default(), -// ); - -// assert!(some.await.is_err()); - -// some = native_provider.run_command( -// vec!["ls".into(), "ls".into()], -// NativeRunCommandOptions { -// is_failure_allowed: true, -// }, -// ); - -// assert!(some.await.is_ok()); -// } -// } From 127c1c0013c3adf91afbd124c01fe349bfffc265 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 19 Sep 2023 19:38:50 +0300 Subject: [PATCH 46/69] feat: updated provider/testing/dummy_node with infinite loop and incremental line number ouput with random sleeping --- crates/provider/testing/dummy_node | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/crates/provider/testing/dummy_node b/crates/provider/testing/dummy_node index b8492b87a..4a6e0cdf1 100755 --- a/crates/provider/testing/dummy_node +++ b/crates/provider/testing/dummy_node @@ -1,7 +1,11 @@ -#!/bin/sh +#!/bin/bash -echo "Line 1" -sleep 1 -echo "Line 2" -sleep 1 -echo "Line 3" \ No newline at end of file +i=0 + +# infinite loop to simulate long-running process with fake output +while :; do + echo "Line $i" + i=$((i+1)) + # sleep randomly between 1 and 3 (included) seconds + sleep $((RANDOM % 3 + 1)) +done \ No newline at end of file From bbd9819b7b8e988923922f6e445e2d2cbe893dc9 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 19 Sep 2023 19:39:58 +0300 Subject: [PATCH 47/69] feat: added helpers to InMemoryFileSystem to avoid having to use matches! to check contents/mode --- crates/support/src/fs/in_memory.rs | 46 +++++++++++++++++------------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/crates/support/src/fs/in_memory.rs b/crates/support/src/fs/in_memory.rs index aa5c6a0e8..9b111894d 100644 --- a/crates/support/src/fs/in_memory.rs +++ b/crates/support/src/fs/in_memory.rs @@ -37,6 +37,27 @@ impl InMemoryFile { pub fn dir() -> Self { Self::Directory { mode: 0o775 } } + + pub fn mode(&self) -> u32 { + match self { + &Self::File { mode, .. } => mode, + &Self::Directory { mode, .. } => mode, + } + } + + pub fn contents_raw(&self) -> Option> { + match self { + Self::File { contents, .. } => Some(contents.to_vec()), + Self::Directory { .. } => None, + } + } + + pub fn contents(&self) -> Option { + match self { + Self::File { contents, .. } => Some(String::from_utf8_lossy(contents).to_string()), + Self::Directory { .. } => None, + } + } } #[derive(Default, Debug, Clone)] @@ -248,10 +269,7 @@ mod tests { async fn create_dir_should_return_an_error_if_file_already_exists() { let fs = InMemoryFileSystem::new(HashMap::from([ (OsString::from_str("/").unwrap(), InMemoryFile::dir()), - ( - OsString::from_str("/dir").unwrap(), - InMemoryFile::empty(), - ), + (OsString::from_str("/dir").unwrap(), InMemoryFile::empty()), ])); let err = fs.create_dir("/dir").await.unwrap_err(); @@ -303,10 +321,7 @@ mod tests { async fn create_dir_should_return_an_error_if_some_ancestor_is_not_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([ (OsString::from_str("/").unwrap(), InMemoryFile::dir()), - ( - OsString::from_str("/path").unwrap(), - InMemoryFile::empty(), - ), + (OsString::from_str("/path").unwrap(), InMemoryFile::empty()), (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), ( OsString::from_str("/path/to/my").unwrap(), @@ -398,10 +413,7 @@ mod tests { async fn create_dir_all_should_return_an_error_if_some_ancestor_is_not_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([ (OsString::from_str("/").unwrap(), InMemoryFile::dir()), - ( - OsString::from_str("/path").unwrap(), - InMemoryFile::empty(), - ), + (OsString::from_str("/path").unwrap(), InMemoryFile::empty()), (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), ])); @@ -566,10 +578,7 @@ mod tests { async fn write_should_return_an_error_if_file_is_new_and_some_ancestor_is_not_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([ (OsString::from_str("/").unwrap(), InMemoryFile::dir()), - ( - OsString::from_str("/path").unwrap(), - InMemoryFile::empty(), - ), + (OsString::from_str("/path").unwrap(), InMemoryFile::empty()), (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), ])); @@ -657,10 +666,7 @@ mod tests { async fn append_should_return_an_error_if_file_is_new_and_some_ancestor_is_not_a_directory() { let fs = InMemoryFileSystem::new(HashMap::from([ (OsString::from_str("/").unwrap(), InMemoryFile::dir()), - ( - OsString::from_str("/path").unwrap(), - InMemoryFile::empty(), - ), + (OsString::from_str("/path").unwrap(), InMemoryFile::empty()), (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), ])); From 0b10aa9beeacb1b0c9dd47a9baa144122ba9ceed Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 19 Sep 2023 19:42:48 +0300 Subject: [PATCH 48/69] feat: fixed output path for generate file commands and fixed missing .envs with options.envs in run_command for node --- crates/provider/src/native.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 9da7500b3..1d7729a24 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -250,7 +250,7 @@ impl ProviderNamespace for Nativ .filesystem .write( format!( - "{}/{}", + "{}{}", self.base_dir.to_string_lossy(), local_output_path.to_string_lossy() ), @@ -359,6 +359,7 @@ impl ProviderNode for NativeNode ) -> Result { let result = Command::new(options.command) .args(options.args) + .envs(options.env) .output() .await .map_err(|err| ProviderError::RunCommandError(err.into()))?; From 69258dd379b648f76accb58fb4df0815e5c3dff8 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 19 Sep 2023 19:46:51 +0300 Subject: [PATCH 49/69] feat: added namespace generate_files and destroy happy path test, fixed tests --- crates/provider/src/native.rs | 171 ++++++++++++++++++++++++++++++---- 1 file changed, 152 insertions(+), 19 deletions(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 1d7729a24..7434cfbe0 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -636,12 +636,17 @@ mod tests { let namespace = provider.create_namespace().await.unwrap(); + // ensure namespace directory is created assert!(fs .files .read() .await .contains_key(namespace.base_dir().as_os_str())); + + // ensure namespace is added to provider namespaces assert_eq!(provider.namespaces().await.len(), 1); + + // ensure the only provider namespace is the same one as the one we just created assert!(provider.namespaces().await.get(namespace.id()).is_some()); } @@ -794,13 +799,15 @@ mod tests { } }) .collect::>(); + + // ensure only one dummy process exists assert_eq!(processes.len(), 1); let node_process = processes.first().unwrap(); // ensure process has correct state assert!(matches!( node_process.stat().unwrap().state().unwrap(), - // pocess can be running or sleeping because we sleep between echo calls + // process can be running or sleeping because we sleep between echo calls procfs::process::ProcState::Running | procfs::process::ProcState::Sleeping )); @@ -834,41 +841,167 @@ mod tests { "MY_VALUE_3" ); - // ensure log file is created and logs are written 0.5s after process start - sleep(Duration::from_millis(500)).await; - assert!(matches!( + // ensure log file is created and logs are written 5s after process start + sleep(Duration::from_secs(5)).await; + assert!( fs.files .read() .await .get(node.log_path().as_os_str()) - .unwrap(), - InMemoryFile::File { contents, .. } if contents == "Line 1\n".as_bytes() - )); + .unwrap() + .contents() + .unwrap() + .lines() + .count() + >= 2 + ); - // ensure logs are updated when process continue running 1.5 sec after process start - sleep(Duration::from_millis(1000)).await; - assert!(matches!( + // ensure logs are updated when process continue running 10s after process start + sleep(Duration::from_secs(5)).await; + assert!( fs.files .read() .await .get(node.log_path().as_os_str()) - .unwrap(), - InMemoryFile::File { contents, .. } if contents == "Line 1\nLine 2\n".as_bytes() - )); + .unwrap() + .contents() + .unwrap() + .lines() + .count() + >= 4 + ); - // ensure logs are updated when process continue running 2.5 sec after process start - sleep(Duration::from_millis(1000)).await; - assert!(matches!( + // ensure logs are updated when process continue running 15s after process start + sleep(Duration::from_secs(5)).await; + assert!( fs.files .read() .await .get(node.log_path().as_os_str()) - .unwrap(), - InMemoryFile::File { contents, .. } if contents == "Line 1\nLine 2\nLine 3\n".as_bytes() - )); + .unwrap() + .contents() + .unwrap() + .lines() + .count() + >= 6 + ); // ensure node is present in namespace assert_eq!(namespace.nodes().await.len(), 1); assert!(namespace.nodes().await.get(node.name()).is_some()); } + + #[tokio::test] + async fn namespace_generate_files_method_should_create_files_at_the_correct_locations_using_given_commands( + ) { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ])); + let provider = NativeProvider::new(fs.clone()); + let namespace = provider.create_namespace().await.unwrap(); + + namespace + .generate_files(GenerateFilesOptions::new(vec![ + GenerateFileCommand::new("echo", "/myfile1").args(vec!["My file 1"]), + GenerateFileCommand::new("sh", "/myfile2") + .args(vec!["-c", "echo -n $MY_CONTENT"]) + .env(vec![("MY_CONTENT", "My file 2")]), + ])) + .await + .unwrap(); + + // ensure files have been generated correctly to right location + assert_eq!( + fs.files + .read() + .await + .get( + &OsString::from_str(&format!( + "{}/myfile1", + namespace.base_dir().to_string_lossy() + )) + .unwrap() + ) + .unwrap() + .contents() + .unwrap(), + "My file 1\n" + ); + assert_eq!( + fs.files + .read() + .await + .get( + &OsString::from_str(&format!( + "{}/myfile2", + namespace.base_dir().to_string_lossy() + )) + .unwrap() + ) + .unwrap() + .contents() + .unwrap(), + "My file 2" + ); + + // ensure temporary node has been destroyed + assert_eq!(namespace.nodes().await.len(), 0); + } + + #[tokio::test] + async fn namespace_destroy_should_destroy_all_namespace_nodes_and_namespace_itself() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ])); + let provider = NativeProvider::new(fs.clone()); + let namespace = provider.create_namespace().await.unwrap(); + + // spawn 2 dummy nodes to populate namespace + namespace + .spawn_node(SpawnNodeOptions::new( + "mynode1", + "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + )) + .await + .unwrap(); + namespace + .spawn_node(SpawnNodeOptions::new( + "mynode2", + "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + )) + .await + .unwrap(); + + // ensure nodes are presents + assert_eq!(namespace.nodes().await.len(), 2); + + namespace.destroy().await.unwrap(); + + // ensure nodes are destroyed + assert_eq!(namespace.nodes().await.len(), 0); + + // retrieve running process + let processes = procfs::process::all_processes() + .unwrap() + .filter_map(|process| { + if let Ok(process) = process { + process + .cmdline() + .iter() + .any(|args| args.iter().any(|arg| arg.contains("dummy_node"))) + .then(|| process) + } else { + None + } + }) + .collect::>(); + + // ensure no running process exists + assert_eq!(processes.len(), 0); + + // ensure namespace is destroyed + assert_eq!(provider.namespaces().await.len(), 0); + } } From 171050a0170c394c4be29d71b0878d449e01265b Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 19 Sep 2023 20:43:57 +0300 Subject: [PATCH 50/69] feat: updated ProviderError::RunCommandError to add command in message --- crates/provider/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs index 3646ef9ce..6047a117b 100644 --- a/crates/provider/src/lib.rs +++ b/crates/provider/src/lib.rs @@ -18,8 +18,8 @@ pub enum ProviderError { #[error("Failed to spawn node '{0}': {1}")] NodeSpawningFailed(String, anyhow::Error), - #[error("Error running command: {0}")] - RunCommandError(anyhow::Error), + #[error("Error running command '{0}': {1}")] + RunCommandError(String, anyhow::Error), #[error("Duplicated node name: {0}")] DuplicatedNodeName(String), From a2a44376eba0aecb53d97b6eb3cc10debfd0801c Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 19 Sep 2023 20:44:43 +0300 Subject: [PATCH 51/69] feat: added tests for NativeNode logs method, dump_logs and run_command --- crates/provider/src/native.rs | 239 ++++++++++++++++++++++++++-------- 1 file changed, 188 insertions(+), 51 deletions(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 7434cfbe0..dd3f091c4 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -349,20 +349,19 @@ impl ProviderNode for NativeNode } async fn dump_logs(&self, local_dest: PathBuf) -> Result<(), ProviderError> { - let logs = self.logs().await?; - Ok(self.filesystem.write(local_dest, logs.as_bytes()).await?) + Ok(self.filesystem.copy(&self.log_path, local_dest).await?) } async fn run_command( &self, options: RunCommandOptions, ) -> Result { - let result = Command::new(options.command) + let result = Command::new(options.command.clone()) .args(options.args) .envs(options.env) .output() .await - .map_err(|err| ProviderError::RunCommandError(err.into()))?; + .map_err(|err| ProviderError::RunCommandError(options.command, err.into()))?; if result.status.success() { Ok(Ok(String::from_utf8_lossy(&result.stdout).to_string())) @@ -738,51 +737,37 @@ mod tests { .await .unwrap(); + let files = fs.files.read().await; + // ensure node directories are created - assert!(fs - .files - .read() - .await - .contains_key(node.base_dir().as_os_str())); - assert!(fs - .files - .read() - .await - .contains_key(node.config_dir().as_os_str())); - assert!(fs - .files - .read() - .await - .contains_key(node.data_dir().as_os_str())); - assert!(fs - .files - .read() - .await - .contains_key(node.scripts_dir().as_os_str())); + assert!(files.contains_key(node.base_dir().as_os_str())); + assert!(files.contains_key(node.config_dir().as_os_str())); + assert!(files.contains_key(node.data_dir().as_os_str())); + assert!(files.contains_key(node.scripts_dir().as_os_str())); // ensure injected files are presents - assert!(matches!( - fs.files - .read() - .await + assert_eq!( + files .get( &OsString::from_str(&format!("{}/file1", node.config_dir().to_string_lossy())) .unwrap() ) + .unwrap() + .contents() .unwrap(), - InMemoryFile::File { contents, .. } if contents == "My file 1".as_bytes() - )); - assert!(matches!( - fs.files - .read() - .await + "My file 1" + ); + assert_eq!( + files .get( &OsString::from_str(&format!("{}/file2", node.data_dir().to_string_lossy())) .unwrap() ) + .unwrap() + .contents() .unwrap(), - InMemoryFile::File { contents, .. } if contents == "My file 2".as_bytes() - )); + "My file 2" + ); // retrieve running process let processes = procfs::process::all_processes() @@ -844,9 +829,7 @@ mod tests { // ensure log file is created and logs are written 5s after process start sleep(Duration::from_secs(5)).await; assert!( - fs.files - .read() - .await + files .get(node.log_path().as_os_str()) .unwrap() .contents() @@ -859,9 +842,7 @@ mod tests { // ensure logs are updated when process continue running 10s after process start sleep(Duration::from_secs(5)).await; assert!( - fs.files - .read() - .await + files .get(node.log_path().as_os_str()) .unwrap() .contents() @@ -874,9 +855,7 @@ mod tests { // ensure logs are updated when process continue running 15s after process start sleep(Duration::from_secs(5)).await; assert!( - fs.files - .read() - .await + files .get(node.log_path().as_os_str()) .unwrap() .contents() @@ -911,11 +890,11 @@ mod tests { .await .unwrap(); + let files = fs.files.read().await; + // ensure files have been generated correctly to right location assert_eq!( - fs.files - .read() - .await + files .get( &OsString::from_str(&format!( "{}/myfile1", @@ -929,9 +908,7 @@ mod tests { "My file 1\n" ); assert_eq!( - fs.files - .read() - .await + files .get( &OsString::from_str(&format!( "{}/myfile2", @@ -1004,4 +981,164 @@ mod tests { // ensure namespace is destroyed assert_eq!(provider.namespaces().await.len(), 0); } + + #[tokio::test] + async fn node_logs_method_should_return_its_logs_as_a_string() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ])); + let provider = NativeProvider::new(fs.clone()); + let namespace = provider.create_namespace().await.unwrap(); + + // spawn dummy node + let node = namespace + .spawn_node(SpawnNodeOptions::new( + "mynode", + "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + )) + .await + .unwrap(); + + // wait some time for node to write logs + sleep(Duration::from_secs(5)).await; + + assert_eq!( + fs.files + .read() + .await + .get(node.log_path().as_os_str()) + .unwrap() + .contents() + .unwrap(), + node.logs().await.unwrap() + ); + } + + #[tokio::test] + async fn node_dump_logs_method_should_writes_its_logs_to_a_given_destination() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ])); + let provider = NativeProvider::new(fs.clone()); + let namespace = provider.create_namespace().await.unwrap(); + + // spawn dummy node + let node = namespace + .spawn_node(SpawnNodeOptions::new( + "mynode", + "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + )) + .await + .unwrap(); + + // wait some time for node to write logs + sleep(Duration::from_secs(5)).await; + + node.dump_logs(PathBuf::from("/tmp/my_log_file")) + .await + .unwrap(); + + let files = fs.files.read().await; + + assert_eq!( + files + .get(node.log_path().as_os_str()) + .unwrap() + .contents() + .unwrap(), + files + .get(&OsString::from_str("/tmp/my_log_file").unwrap()) + .unwrap() + .contents() + .unwrap(), + ); + } + + #[tokio::test] + async fn node_run_command_method_should_execute_the_command_successfully_and_returns_stdout() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ])); + let provider = NativeProvider::new(fs.clone()); + let namespace = provider.create_namespace().await.unwrap(); + + // spawn dummy node + let node = namespace + .spawn_node(SpawnNodeOptions::new( + "mynode", + "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + )) + .await + .unwrap(); + + let result = node + .run_command( + RunCommandOptions::new("sh") + .args(vec!["-c", "echo $MY_ENV_VAR"]) + .env(vec![("MY_ENV_VAR", "Here is my content")]), + ) + .await; + + assert!(matches!(result, Ok(Ok(stdout)) if stdout == "Here is my content\n")); + } + + #[tokio::test] + async fn node_run_command_method_should_execute_the_command_successfully_and_returns_error_code_and_stderr_if_an_error_happened( + ) { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ])); + let provider = NativeProvider::new(fs.clone()); + let namespace = provider.create_namespace().await.unwrap(); + + // spawn dummy node + let node = namespace + .spawn_node(SpawnNodeOptions::new( + "mynode", + "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + )) + .await + .unwrap(); + + let result = node + .run_command(RunCommandOptions::new("sh").args(vec!["-fakeargs"])) + .await; + + assert!( + matches!(result, Ok(Err((exit_code, stderr))) if !exit_code.success() && stderr == "sh: 0: Illegal option -k\n") + ); + } + + #[tokio::test] + async fn node_run_command_method_should_fail_to_execute_the_command_if_command_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ])); + let provider = NativeProvider::new(fs.clone()); + let namespace = provider.create_namespace().await.unwrap(); + + // spawn dummy node + let node = namespace + .spawn_node(SpawnNodeOptions::new( + "mynode", + "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + )) + .await + .unwrap(); + + let err = node + .run_command(RunCommandOptions::new("myrandomprogram")) + .await + .unwrap_err(); + + assert_eq!( + err.to_string(), + "Error running command 'myrandomprogram': No such file or directory (os error 2)" + ); + } } From bbb24f2aba37c99a21b203d1a735c676ebac8e5f Mon Sep 17 00:00:00 2001 From: l0r1s Date: Thu, 21 Sep 2023 02:40:26 +0300 Subject: [PATCH 52/69] feat: fixed datarace in tests by keeping a lock on fs.files, added better log writting testing with timeout loop tasks --- crates/provider/src/native.rs | 175 ++++++++++++++++++++++++---------- 1 file changed, 125 insertions(+), 50 deletions(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index dd3f091c4..6c2a98f13 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -605,6 +605,7 @@ mod tests { use procfs::process::Process; use support::fs::in_memory::{InMemoryFile, InMemoryFileSystem}; + use tokio::time::timeout; use crate::shared::types::TransferedFile; @@ -737,17 +738,33 @@ mod tests { .await .unwrap(); - let files = fs.files.read().await; - // ensure node directories are created - assert!(files.contains_key(node.base_dir().as_os_str())); - assert!(files.contains_key(node.config_dir().as_os_str())); - assert!(files.contains_key(node.data_dir().as_os_str())); - assert!(files.contains_key(node.scripts_dir().as_os_str())); + assert!(fs + .files + .read() + .await + .contains_key(node.base_dir().as_os_str())); + assert!(fs + .files + .read() + .await + .contains_key(node.config_dir().as_os_str())); + assert!(fs + .files + .read() + .await + .contains_key(node.data_dir().as_os_str())); + assert!(fs + .files + .read() + .await + .contains_key(node.scripts_dir().as_os_str())); // ensure injected files are presents assert_eq!( - files + fs.files + .read() + .await .get( &OsString::from_str(&format!("{}/file1", node.config_dir().to_string_lossy())) .unwrap() @@ -758,7 +775,9 @@ mod tests { "My file 1" ); assert_eq!( - files + fs.files + .read() + .await .get( &OsString::from_str(&format!("{}/file2", node.data_dir().to_string_lossy())) .unwrap() @@ -826,44 +845,56 @@ mod tests { "MY_VALUE_3" ); - // ensure log file is created and logs are written 5s after process start - sleep(Duration::from_secs(5)).await; - assert!( - files - .get(node.log_path().as_os_str()) - .unwrap() - .contents() - .unwrap() - .lines() - .count() - >= 2 - ); - - // ensure logs are updated when process continue running 10s after process start - sleep(Duration::from_secs(5)).await; - assert!( - files - .get(node.log_path().as_os_str()) - .unwrap() - .contents() - .unwrap() - .lines() - .count() - >= 4 - ); - - // ensure logs are updated when process continue running 15s after process start - sleep(Duration::from_secs(5)).await; - assert!( - files - .get(node.log_path().as_os_str()) - .unwrap() - .contents() - .unwrap() - .lines() - .count() - >= 6 - ); + // ensure log file is created and logs are written and at least 2 lines + timeout(Duration::from_secs(10), async { + loop { + sleep(Duration::from_millis(200)).await; + + if let Some(file) = fs.files.read().await.get(node.log_path().as_os_str()) { + if let Some(contents) = file.contents() { + if contents.lines().count() >= 2 { + return; + } + } + } + } + }) + .await + .unwrap(); + + // ensure logs contains at least 4 lines when node continue running + timeout(Duration::from_secs(10), async { + loop { + sleep(Duration::from_millis(200)).await; + + if let Some(file) = fs.files.read().await.get(node.log_path().as_os_str()) { + if let Some(contents) = file.contents() { + if contents.lines().count() >= 4 { + return; + } + } + } + } + }) + .await + .unwrap(); + + // ensure logs contains at least 6 lines when node continue running + timeout(Duration::from_secs(10), async { + loop { + sleep(Duration::from_millis(200)).await; + + if let Some(file) = fs.files.read().await.get(node.log_path().as_os_str()) { + if let Some(contents) = file.contents() { + if contents.lines().count() >= 6 { + return; + } + } + } + } + }) + .await + .unwrap(); // ensure node is present in namespace assert_eq!(namespace.nodes().await.len(), 1); @@ -890,11 +921,11 @@ mod tests { .await .unwrap(); - let files = fs.files.read().await; - // ensure files have been generated correctly to right location assert_eq!( - files + fs.files + .read() + .await .get( &OsString::from_str(&format!( "{}/myfile1", @@ -908,7 +939,9 @@ mod tests { "My file 1\n" ); assert_eq!( - files + fs.files + .read() + .await .get( &OsString::from_str(&format!( "{}/myfile2", @@ -1141,4 +1174,46 @@ mod tests { "Error running command 'myrandomprogram': No such file or directory (os error 2)" ); } + + // #[tokio::test] + // async fn node_run_script_method_should_execute_the_script_successfully_and_returns_stdout() { + // // we need to mirror the script between local fs and in memory fs else + // // the tokio::process::Command won't be able to execute it + // fs::copy( + // "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_script", + // "/tmp/dummy_script", + // ) + // .unwrap(); + + // let fs = InMemoryFileSystem::new(HashMap::from([ + // (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + // (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + // ( + // OsString::from_str("/tmp/dummy_script").unwrap(), + // InMemoryFile::file(fs::read_to_string("/tmp/dummy_script").unwrap()), + // ), + // ])); + // let provider = NativeProvider::new(fs.clone()); + // let namespace = provider.create_namespace().await.unwrap(); + + // // spawn dummy node + // let node = namespace + // .spawn_node(SpawnNodeOptions::new( + // "mynode", + // "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + // )) + // .await + // .unwrap(); + + // let result = node + // .run_script( + // RunScriptOptions::new("/tmp/dummy_script") + // .args(vec!["-c"]) + // .env(vec![("MY_ENV_VAR", "Here is my content")]), + // ) + // .await; + + // println!("{:?}", result); + // // assert!(matches!(result, Ok(Ok(stdout)) if stdout == "Here is my content\n")); + // } } From cd8bdfb064c609322aa29f1d91fa77ac5a7e7221 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Thu, 21 Sep 2023 03:20:03 +0300 Subject: [PATCH 53/69] feat: added happy path testing for node copy_file_from_node, pause and resume methods --- crates/provider/src/native.rs | 244 ++++++++++++++++++++++++++++------ 1 file changed, 206 insertions(+), 38 deletions(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 6c2a98f13..9ff10e641 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -421,7 +421,7 @@ impl ProviderNode for NativeNode local_dest: PathBuf, ) -> Result<(), ProviderError> { let remote_file_path = format!( - "{}/{}", + "{}{}", self.base_dir.to_string_lossy(), remote_src.to_string_lossy() ); @@ -845,49 +845,21 @@ mod tests { "MY_VALUE_3" ); - // ensure log file is created and logs are written and at least 2 lines - timeout(Duration::from_secs(10), async { - loop { - sleep(Duration::from_millis(200)).await; - - if let Some(file) = fs.files.read().await.get(node.log_path().as_os_str()) { - if let Some(contents) = file.contents() { - if contents.lines().count() >= 2 { - return; - } - } - } - } - }) - .await - .unwrap(); - - // ensure logs contains at least 4 lines when node continue running - timeout(Duration::from_secs(10), async { - loop { - sleep(Duration::from_millis(200)).await; - - if let Some(file) = fs.files.read().await.get(node.log_path().as_os_str()) { - if let Some(contents) = file.contents() { - if contents.lines().count() >= 4 { - return; - } - } - } - } - }) - .await - .unwrap(); + // ensure log file is created and logs are written and keep being written for some time + timeout(Duration::from_secs(30), async { + let mut expected_logs_line_count = 2; - // ensure logs contains at least 6 lines when node continue running - timeout(Duration::from_secs(10), async { loop { sleep(Duration::from_millis(200)).await; if let Some(file) = fs.files.read().await.get(node.log_path().as_os_str()) { if let Some(contents) = file.contents() { - if contents.lines().count() >= 6 { - return; + if contents.lines().count() >= expected_logs_line_count { + if expected_logs_line_count >= 6 { + return; + } else { + expected_logs_line_count += 2; + } } } } @@ -1175,6 +1147,202 @@ mod tests { ); } + #[tokio::test] + async fn node_copy_file_from_node_method_should_copy_node_remote_file_to_local_path() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ])); + let provider = NativeProvider::new(fs.clone()); + let namespace = provider.create_namespace().await.unwrap(); + + // spawn dummy node + let node = namespace + .spawn_node(SpawnNodeOptions::new( + "mynode", + "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + )) + .await + .unwrap(); + + // wait 3s for node to start writing logs + sleep(Duration::from_secs(3)).await; + + node.copy_file_from_node( + PathBuf::from("/mynode.log"), + PathBuf::from("/nodelog.backup"), + ) + .await + .unwrap(); + + assert_eq!( + fs.files.read().await.get(node.log_path().as_os_str()), + fs.files + .read() + .await + .get(&OsString::from_str("/nodelog.backup").unwrap()) + ); + } + + #[tokio::test] + async fn node_pause_method_should_pause_the_node_process() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ])); + let provider = NativeProvider::new(fs.clone()); + let namespace = provider.create_namespace().await.unwrap(); + + // spawn dummy node + let node = namespace + .spawn_node(SpawnNodeOptions::new( + "mynode", + "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + )) + .await + .unwrap(); + + // wait 2s for node to spawn + sleep(Duration::from_secs(2)).await; + + // retrieve running process + let processes = procfs::process::all_processes() + .unwrap() + .filter_map(|process| { + if let Ok(process) = process { + process + .cmdline() + .iter() + .any(|args| args.iter().any(|arg| arg.contains("dummy_node"))) + .then(|| process) + } else { + None + } + }) + .collect::>(); + let node_process = processes.first().unwrap(); + + // ensure process has correct state pre-pause + assert!(matches!( + node_process.stat().unwrap().state().unwrap(), + // process can be running or sleeping because we sleep between echo calls + procfs::process::ProcState::Running | procfs::process::ProcState::Sleeping + )); + + node.pause().await.unwrap(); + + // wait node 1s to stop writing logs + sleep(Duration::from_secs(1)).await; + let logs = node.logs().await.unwrap(); + + // ensure process has been paused for 10sec and logs stopped writing + let _ = timeout(Duration::from_secs(10), async { + loop { + sleep(Duration::from_millis(200)).await; + + assert!(matches!( + node_process.stat().unwrap().state().unwrap(), + procfs::process::ProcState::Stopped + )); + assert_eq!(logs, node.logs().await.unwrap()); + } + }) + .await; + } + + #[tokio::test] + async fn node_resume_method_should_resume_the_paused_node_process() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ])); + let provider = NativeProvider::new(fs.clone()); + let namespace = provider.create_namespace().await.unwrap(); + + // spawn dummy node + let node = namespace + .spawn_node(SpawnNodeOptions::new( + "mynode", + "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + )) + .await + .unwrap(); + + // wait 2s for node to spawn + sleep(Duration::from_secs(2)).await; + + // retrieve running process + let processes = procfs::process::all_processes() + .unwrap() + .filter_map(|process| { + if let Ok(process) = process { + process + .cmdline() + .iter() + .any(|args| args.iter().any(|arg| arg.contains("dummy_node"))) + .then(|| process) + } else { + None + } + }) + .collect::>(); + let node_process = processes.first().unwrap(); + + node.pause().await.unwrap(); + + // ensure process has been paused for 5sec + let _ = timeout(Duration::from_secs(5), async { + loop { + sleep(Duration::from_millis(200)).await; + + assert!(matches!( + node_process.stat().unwrap().state().unwrap(), + procfs::process::ProcState::Stopped + )); + } + }) + .await; + + node.resume().await.unwrap(); + + // ensure process has been resumed for 10sec + let _ = timeout(Duration::from_secs(10), async { + loop { + sleep(Duration::from_millis(200)).await; + + assert!(matches!( + node_process.stat().unwrap().state().unwrap(), + // process can be running or sleeping because we sleep between echo calls + procfs::process::ProcState::Running | procfs::process::ProcState::Sleeping + )); + } + }) + .await; + + // ensure logs continue being written for some time + timeout(Duration::from_secs(30), async { + let mut expected_logs_line_count = 2; + + loop { + sleep(Duration::from_millis(200)).await; + + if let Some(file) = fs.files.read().await.get(node.log_path().as_os_str()) { + if let Some(contents) = file.contents() { + if contents.lines().count() >= expected_logs_line_count { + if expected_logs_line_count >= 6 { + return; + } else { + expected_logs_line_count += 2; + } + } + } + } + } + }) + .await + .unwrap(); + } + // #[tokio::test] // async fn node_run_script_method_should_execute_the_script_successfully_and_returns_stdout() { // // we need to mirror the script between local fs and in memory fs else From a186c8b4ad808af06fc4c6477bc34eecad7bb518 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Thu, 21 Sep 2023 22:12:51 +0300 Subject: [PATCH 54/69] feat: added happy path test for node restart and destroy --- crates/provider/src/native.rs | 253 ++++++++++++++++++++++++++-------- 1 file changed, 197 insertions(+), 56 deletions(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 9ff10e641..9ee2ce5ec 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -789,20 +789,7 @@ mod tests { ); // retrieve running process - let processes = procfs::process::all_processes() - .unwrap() - .filter_map(|process| { - if let Ok(process) = process { - process - .cmdline() - .iter() - .any(|args| args.iter().any(|arg| arg.contains("dummy_node"))) - .then(|| process) - } else { - None - } - }) - .collect::>(); + let processes = get_processes_by_name("dummy_node").await; // ensure only one dummy process exists assert_eq!(processes.len(), 1); @@ -965,20 +952,7 @@ mod tests { assert_eq!(namespace.nodes().await.len(), 0); // retrieve running process - let processes = procfs::process::all_processes() - .unwrap() - .filter_map(|process| { - if let Ok(process) = process { - process - .cmdline() - .iter() - .any(|args| args.iter().any(|arg| arg.contains("dummy_node"))) - .then(|| process) - } else { - None - } - }) - .collect::>(); + let processes = get_processes_by_name("dummy_node").await; // ensure no running process exists assert_eq!(processes.len(), 0); @@ -1206,20 +1180,7 @@ mod tests { sleep(Duration::from_secs(2)).await; // retrieve running process - let processes = procfs::process::all_processes() - .unwrap() - .filter_map(|process| { - if let Ok(process) = process { - process - .cmdline() - .iter() - .any(|args| args.iter().any(|arg| arg.contains("dummy_node"))) - .then(|| process) - } else { - None - } - }) - .collect::>(); + let processes = get_processes_by_name("dummy_node").await; let node_process = processes.first().unwrap(); // ensure process has correct state pre-pause @@ -1272,20 +1233,8 @@ mod tests { sleep(Duration::from_secs(2)).await; // retrieve running process - let processes = procfs::process::all_processes() - .unwrap() - .filter_map(|process| { - if let Ok(process) = process { - process - .cmdline() - .iter() - .any(|args| args.iter().any(|arg| arg.contains("dummy_node"))) - .then(|| process) - } else { - None - } - }) - .collect::>(); + let processes = get_processes_by_name("dummy_node").await; + assert_eq!(processes.len(), 1); // needed to avoid test run in parallel and false results let node_process = processes.first().unwrap(); node.pause().await.unwrap(); @@ -1343,6 +1292,181 @@ mod tests { .unwrap(); } + #[tokio::test] + async fn node_restart_should_kill_the_node_and_respawn_it_successfully() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ( + OsString::from_str("/file1").unwrap(), + InMemoryFile::file("My file 1"), + ), + ( + OsString::from_str("/file2").unwrap(), + InMemoryFile::file("My file 2"), + ), + ])); + let provider = NativeProvider::new(fs.clone()); + let namespace = provider.create_namespace().await.unwrap(); + + let node = namespace + .spawn_node( + SpawnNodeOptions::new( + "mynode", + "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + ) + .args(vec![ + "-flag1", + "--flag2", + "--option1=value1", + "-option2=value2", + "--option3 value3", + "-option4 value4", + ]) + .env(vec![ + ("MY_VAR_1", "MY_VALUE_1"), + ("MY_VAR_2", "MY_VALUE_2"), + ("MY_VAR_3", "MY_VALUE_3"), + ]) + .injected_files(vec![ + TransferedFile::new("/file1", "/cfg/file1"), + TransferedFile::new("/file2", "/data/file2"), + ]), + ) + .await + .unwrap(); + + // wait 3s for node to spawn and start writing logs + sleep(Duration::from_secs(3)).await; + + let processes = get_processes_by_name("dummy_node").await; + assert_eq!(processes.len(), 1); // needed to avoid test run in parallel and false results + let old_process_id = processes.first().unwrap().pid(); + let old_logs_count = node.logs().await.unwrap().lines().count(); + + node.restart(None).await.unwrap(); + + // wait 3s for node to restart and restart writing logs + sleep(Duration::from_secs(3)).await; + + let processes = get_processes_by_name("dummy_node").await; + assert_eq!(processes.len(), 1); // needed to avoid test run in parallel and false results + let node_process = processes.first().unwrap(); + + // ensure process has correct state + assert!(matches!( + node_process.stat().unwrap().state().unwrap(), + // process can be running or sleeping because we sleep between echo calls + procfs::process::ProcState::Running | procfs::process::ProcState::Sleeping + )); + + // ensure PID changed + assert_ne!(old_process_id, node_process.pid()); + + // ensure process restarted with correct args + let node_args = node_process.cmdline().unwrap(); + assert!(node_args.contains(&"-flag1".to_string())); + assert!(node_args.contains(&"--flag2".to_string())); + assert!(node_args.contains(&"--option1=value1".to_string())); + assert!(node_args.contains(&"-option2=value2".to_string())); + assert!(node_args.contains(&"--option3 value3".to_string())); + assert!(node_args.contains(&"-option4 value4".to_string())); + + // ensure process restarted with correct environment + let node_env = node_process.environ().unwrap(); + assert_eq!( + node_env + .get(&OsString::from_str("MY_VAR_1").unwrap()) + .unwrap(), + "MY_VALUE_1" + ); + assert_eq!( + node_env + .get(&OsString::from_str("MY_VAR_2").unwrap()) + .unwrap(), + "MY_VALUE_2" + ); + assert_eq!( + node_env + .get(&OsString::from_str("MY_VAR_3").unwrap()) + .unwrap(), + "MY_VALUE_3" + ); + + // ensure log writing restarted and they keep being written for some time + timeout(Duration::from_secs(30), async { + let mut expected_logs_line_count = old_logs_count; + + loop { + sleep(Duration::from_millis(200)).await; + + if let Some(file) = fs.files.read().await.get(node.log_path().as_os_str()) { + if let Some(contents) = file.contents() { + if contents.lines().count() >= expected_logs_line_count { + if expected_logs_line_count >= old_logs_count + 6 { + return; + } else { + expected_logs_line_count += 2; + } + } + } + } + } + }) + .await + .unwrap(); + + // ensure node is present in namespace + assert_eq!(namespace.nodes().await.len(), 1); + assert!(namespace.nodes().await.get(node.name()).is_some()); + } + + #[tokio::test] + async fn node_destroy_method_should_destroy_the_node_itfself_and_remove_process_and_stop_logs_writing( + ) { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ])); + let provider = NativeProvider::new(fs.clone()); + let namespace = provider.create_namespace().await.unwrap(); + + // spawn dummy node + let node = namespace + .spawn_node(SpawnNodeOptions::new( + "mynode", + "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + )) + .await + .unwrap(); + + // wait 3s for node to start and begin writing logs + sleep(Duration::from_secs(3)).await; + + node.destroy().await.unwrap(); + + // wait node 1s to be killed and stop writing logs + sleep(Duration::from_secs(1)).await; + let logs = node.logs().await.unwrap(); + + // ensure process is not running anymore + let processes = get_processes_by_name("dummy_node").await; + assert_eq!(processes.len(), 0); + + // ensure logs are not being written anymore + let _ = timeout(Duration::from_secs(10), async { + loop { + sleep(Duration::from_millis(200)).await; + + assert_eq!(logs, node.logs().await.unwrap()); + } + }) + .await; + + // ensure node doesn't exists anymore in namespace + assert_eq!(namespace.nodes().await.len(), 0); + } + // #[tokio::test] // async fn node_run_script_method_should_execute_the_script_successfully_and_returns_stdout() { // // we need to mirror the script between local fs and in memory fs else @@ -1384,4 +1508,21 @@ mod tests { // println!("{:?}", result); // // assert!(matches!(result, Ok(Ok(stdout)) if stdout == "Here is my content\n")); // } + + async fn get_processes_by_name(name: &str) -> Vec { + procfs::process::all_processes() + .unwrap() + .filter_map(|process| { + if let Ok(process) = process { + process + .cmdline() + .iter() + .any(|args| args.iter().any(|arg| arg.contains(name))) + .then(|| process) + } else { + None + } + }) + .collect::>() + } } From 176184678022e0d6f5977b54827dcf7e82e0a684 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 25 Sep 2023 16:39:14 +0300 Subject: [PATCH 55/69] feat: refacto set_mode method on LocalFilesystem and fixed tests to be platform agnostic --- crates/support/src/fs/local.rs | 31 ++++--------------------------- 1 file changed, 4 insertions(+), 27 deletions(-) diff --git a/crates/support/src/fs/local.rs b/crates/support/src/fs/local.rs index 932933fb0..1906f5588 100644 --- a/crates/support/src/fs/local.rs +++ b/crates/support/src/fs/local.rs @@ -1,8 +1,7 @@ -use std::{os::unix::fs::PermissionsExt, path::Path}; +use std::{fs::Permissions, os::unix::fs::PermissionsExt, path::Path}; use async_trait::async_trait; use tokio::io::AsyncWriteExt; -use uuid::Uuid; use super::{FileSystem, FileSystemError, FileSystemResult}; @@ -67,23 +66,7 @@ impl FileSystem for LocalFileSystem { } async fn set_mode(&self, path: impl AsRef + Send, mode: u32) -> FileSystemResult<()> { - // because we can't create a Permissions struct directly, we create a temporary empty file and retrieve the - // Permissions from it, we then modify its mode and apply it to our file - let temp_file_path = format!( - "{}/{}", - std::env::temp_dir().to_string_lossy(), - Uuid::new_v4() - ); - let temp_file = - std::fs::File::create(temp_file_path).map_err(Into::::into)?; - - let mut permissions = temp_file - .metadata() - .map_err(Into::::into)? - .permissions(); - permissions.set_mode(mode); - - tokio::fs::set_permissions(path, permissions) + tokio::fs::set_permissions(path, Permissions::from_mode(mode)) .await .map_err(Into::into) } @@ -342,10 +325,7 @@ mod tests { let fs = LocalFileSystem::default(); let path = format!("{test_dir}/myfile"); std::fs::write(&path, "Test").unwrap(); - assert_eq!( - std::fs::metadata(&path).unwrap().permissions().mode(), - FILE_BITS + 0o664 - ); + assert!(std::fs::metadata(&path).unwrap().permissions().mode() != (FILE_BITS + 0400)); fs.set_mode(&path, 0o400).await.unwrap(); @@ -362,10 +342,7 @@ mod tests { let fs = LocalFileSystem::default(); let path = format!("{test_dir}/mydir"); std::fs::create_dir(&path).unwrap(); - assert_eq!( - std::fs::metadata(&path).unwrap().permissions().mode(), - DIR_BITS + 0o775 - ); + assert!(std::fs::metadata(&path).unwrap().permissions().mode() != (DIR_BITS + 0o700)); fs.set_mode(&path, 0o700).await.unwrap(); From e14c5781745e3e7fc5941e06258028a59ef39ed8 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 25 Sep 2023 16:42:30 +0300 Subject: [PATCH 56/69] feat: added test for tmp_dir method on NativeProvider --- crates/provider/src/native.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 9ee2ce5ec..353bcac04 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -601,7 +601,7 @@ fn create_process_with_log_tasks( #[cfg(test)] mod tests { - use std::{ffi::OsString, str::FromStr}; + use std::{ffi::OsString, str::FromStr, fs}; use procfs::process::Process; use support::fs::in_memory::{InMemoryFile, InMemoryFileSystem}; @@ -626,6 +626,23 @@ mod tests { ); } + #[tokio::test] + async fn provider_tmp_dir_method_should_set_the_temporary_for_provider() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + ( + OsString::from_str("/someotherdir").unwrap(), + InMemoryFile::dir(), + ), + ])); + let provider = NativeProvider::new(fs.clone()).tmp_dir("/someotherdir"); + + // we create a namespace to ensure tmp dir will be used to store namespace + let namespace = provider.create_namespace().await.unwrap(); + + assert!(namespace.base_dir().starts_with("/someotherdir")) + } + #[tokio::test] async fn provider_create_namespace_method_should_create_a_new_namespace_and_returns_it() { let fs = InMemoryFileSystem::new(HashMap::from([ From f62034f90a0832741d9d33c476734ff7d99c2213 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 25 Sep 2023 16:43:55 +0300 Subject: [PATCH 57/69] feat: added error test for namespace spawn_node method on NativerProvider, fixed node run_command test to be platform agnostic --- crates/provider/src/native.rs | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 353bcac04..7b7a7176a 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -877,6 +877,38 @@ mod tests { assert!(namespace.nodes().await.get(node.name()).is_some()); } + #[tokio::test] + async fn namespace_spawn_node_method_should_returns_an_error_if_a_node_already_exists_with_this_name( + ) { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ])); + let provider = NativeProvider::new(fs.clone()); + let namespace = provider.create_namespace().await.unwrap(); + + namespace + .spawn_node(SpawnNodeOptions::new( + "mynode", + "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + )) + .await + .unwrap(); + + let result = namespace + .spawn_node(SpawnNodeOptions::new( + "mynode", + "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + )) + .await; + + // we must match here because Arc doesn't implements Debug, so unwrap_err is not an option + match result { + Ok(_) => panic!("expected result to be an error"), + Err(err) => assert_eq!(err.to_string(), "Duplicated node name: mynode"), + }; + } + #[tokio::test] async fn namespace_generate_files_method_should_create_files_at_the_correct_locations_using_given_commands( ) { @@ -1105,7 +1137,7 @@ mod tests { .await; assert!( - matches!(result, Ok(Err((exit_code, stderr))) if !exit_code.success() && stderr == "sh: 0: Illegal option -k\n") + matches!(result, Ok(Err((exit_code, stderr))) if !exit_code.success() && stderr.len() > 0) ); } From 8d0d5980b84bfd40fdde7d21ab923cc2e1dc05b4 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 25 Sep 2023 19:01:48 +0300 Subject: [PATCH 58/69] feat: added dummy script for native provider testing --- crates/provider/testing/dummy_script | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 crates/provider/testing/dummy_script diff --git a/crates/provider/testing/dummy_script b/crates/provider/testing/dummy_script new file mode 100644 index 000000000..89c039a48 --- /dev/null +++ b/crates/provider/testing/dummy_script @@ -0,0 +1,9 @@ +#!/bin/bash + +echo "My script" + +echo "$MY_ENV_VAR" + +if [ "$1" == "-c" ]; then + echo "With args" +fi \ No newline at end of file From 2d8222d6b3829e808499e9f8a41f05264fc98dba Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 25 Sep 2023 19:02:41 +0300 Subject: [PATCH 59/69] feat: updated InMemoryFileSystem to handle mirrored file locally --- crates/support/src/fs/in_memory.rs | 98 ++++++++++++++++++++++++++---- 1 file changed, 86 insertions(+), 12 deletions(-) diff --git a/crates/support/src/fs/in_memory.rs b/crates/support/src/fs/in_memory.rs index 9b111894d..7f3fa04f7 100644 --- a/crates/support/src/fs/in_memory.rs +++ b/crates/support/src/fs/in_memory.rs @@ -1,4 +1,11 @@ -use std::{collections::HashMap, ffi::OsString, path::Path, sync::Arc}; +use std::{ + collections::HashMap, + ffi::OsString, + fs::{self, Permissions}, + os::unix::prelude::PermissionsExt, + path::Path, + sync::Arc, +}; use anyhow::anyhow; use async_trait::async_trait; @@ -8,8 +15,14 @@ use super::{FileSystem, FileSystemResult}; #[derive(Debug, Clone, PartialEq)] pub enum InMemoryFile { - File { mode: u32, contents: Vec }, - Directory { mode: u32 }, + File { + mode: u32, + contents: Vec, + mirror: bool, + }, + Directory { + mode: u32, + }, } impl InMemoryFile { @@ -27,6 +40,31 @@ impl InMemoryFile { Self::File { mode: 0o664, contents: contents.as_ref().to_vec(), + mirror: false, + } + } + + pub fn mirror(path: P, contents: C) -> Self + where + P: AsRef, + C: AsRef, + { + Self::mirror_raw(path, contents.as_ref()) + } + + pub fn mirror_raw(path: P, contents: C) -> Self + where + P: AsRef, + C: AsRef<[u8]>, + { + // mirror file to local filesystem + fs::create_dir_all(path.as_ref().parent().unwrap()).unwrap(); + fs::write(path, contents.as_ref()).unwrap(); + + Self::File { + mode: 0o664, + contents: contents.as_ref().to_vec(), + mirror: true, } } @@ -58,6 +96,13 @@ impl InMemoryFile { Self::Directory { .. } => None, } } + + pub fn set_mirror(&mut self) { + match self { + Self::File { mirror, .. } => *mirror = true, + _ => {}, + }; + } } #[derive(Default, Debug, Clone)] @@ -204,16 +249,45 @@ impl FileSystem for InMemoryFileSystem { from: impl AsRef + Send, to: impl AsRef + Send, ) -> FileSystemResult<()> { - let content = self.read(from).await?; - self.write(to, content).await + let from_ref = from.as_ref(); + let to_ref = to.as_ref(); + let content = self.read(from_ref).await?; + self.write(to_ref, content).await?; + + // handle mirror file + let mut files = self.files.write().await; + let file = files.get(from_ref.as_os_str()).unwrap(); + if let InMemoryFile::File { + mode, + contents, + mirror, + } = file + { + if *mirror { + fs::create_dir_all(to_ref.parent().unwrap()).unwrap(); + fs::write(to_ref, contents).unwrap(); + fs::set_permissions(to_ref, Permissions::from_mode(*mode)).unwrap(); + files.get_mut(to_ref.as_os_str()).unwrap().set_mirror(); + } + } + + Ok(()) } async fn set_mode(&self, path: impl AsRef + Send, mode: u32) -> FileSystemResult<()> { let os_path = path.as_ref().as_os_str(); if let Some(file) = self.files.write().await.get_mut(os_path) { match file { - InMemoryFile::File { mode: old_mode, .. } => { + InMemoryFile::File { + mode: old_mode, + mirror, + .. + } => { *old_mode = mode; + + if *mirror { + fs::set_permissions(os_path, Permissions::from_mode(mode)).unwrap(); + } }, InMemoryFile::Directory { mode: old_mode, .. } => { *old_mode = mode; @@ -519,7 +593,7 @@ mod tests { .read() .await .get(&OsString::from_str("/myfile").unwrap()), - Some(InMemoryFile::File {mode, contents}) if *mode == 0o664 && contents == "my file content".as_bytes() + Some(InMemoryFile::File {mode, contents, .. }) if *mode == 0o664 && contents == "my file content".as_bytes() )); } @@ -541,7 +615,7 @@ mod tests { .read() .await .get(&OsString::from_str("/myfile").unwrap()), - Some(InMemoryFile::File { mode, contents }) if *mode == 0o664 && contents == "my new file content".as_bytes() + Some(InMemoryFile::File { mode, contents, .. }) if *mode == 0o664 && contents == "my new file content".as_bytes() )); } @@ -611,7 +685,7 @@ mod tests { .read() .await .get(&OsString::from_str("/myfile").unwrap()), - Some(InMemoryFile::File { mode, contents }) if *mode == 0o664 && contents == "my file content has been updated with new things".as_bytes() + Some(InMemoryFile::File { mode, contents, .. }) if *mode == 0o664 && contents == "my file content has been updated with new things".as_bytes() )); } @@ -630,7 +704,7 @@ mod tests { .read() .await .get(&OsString::from_str("/myfile").unwrap()), - Some(InMemoryFile::File { mode,contents }) if *mode == 0o664 && contents == "my file content".as_bytes() + Some(InMemoryFile::File { mode,contents, .. }) if *mode == 0o664 && contents == "my file content".as_bytes() )); } @@ -693,7 +767,7 @@ mod tests { assert_eq!(fs.files.read().await.len(), 3); assert!( - matches!(fs.files.read().await.get(&OsString::from_str("/myfilecopy").unwrap()).unwrap(), InMemoryFile::File { mode, contents } if *mode == 0o664 && contents == "my file content".as_bytes()) + matches!(fs.files.read().await.get(&OsString::from_str("/myfilecopy").unwrap()).unwrap(), InMemoryFile::File { mode, contents, .. } if *mode == 0o664 && contents == "my file content".as_bytes()) ); } @@ -715,7 +789,7 @@ mod tests { assert_eq!(fs.files.read().await.len(), 3); assert!( - matches!(fs.files.read().await.get(&OsString::from_str("/myfilecopy").unwrap()).unwrap(), InMemoryFile::File { mode, contents } if *mode == 0o664 && contents == "my new file content".as_bytes()) + matches!(fs.files.read().await.get(&OsString::from_str("/myfilecopy").unwrap()).unwrap(), InMemoryFile::File { mode, contents, .. } if *mode == 0o664 && contents == "my new file content".as_bytes()) ); } From 34a2c5a1ecc54c7434d69c9340a1a535144d5954 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 25 Sep 2023 19:03:17 +0300 Subject: [PATCH 60/69] feat: added happy path testing for native node run_script method --- crates/provider/src/native.rs | 80 ++++++++++++++++------------------- 1 file changed, 37 insertions(+), 43 deletions(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 7b7a7176a..af525744e 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -601,7 +601,7 @@ fn create_process_with_log_tasks( #[cfg(test)] mod tests { - use std::{ffi::OsString, str::FromStr, fs}; + use std::{ffi::OsString, fs, str::FromStr}; use procfs::process::Process; use support::fs::in_memory::{InMemoryFile, InMemoryFileSystem}; @@ -1170,6 +1170,42 @@ mod tests { ); } + #[tokio::test] + async fn node_run_script_method_should_execute_the_script_successfully_and_returns_stdout() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), + ( + OsString::from_str("/tmp/dummy_script").unwrap(), + InMemoryFile::mirror( + "/tmp/dummy_script", + fs::read_to_string("/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_script").unwrap(), + ), + ), + ])); + let provider = NativeProvider::new(fs.clone()); + let namespace = provider.create_namespace().await.unwrap(); + + // spawn dummy node + let node = namespace + .spawn_node(SpawnNodeOptions::new( + "mynode", + "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + )) + .await + .unwrap(); + + let result = node + .run_script( + RunScriptOptions::new("/tmp/dummy_script") + .args(vec!["-c"]) + .env(vec![("MY_ENV_VAR", "With env")]), + ) + .await; + + assert!(matches!(result, Ok(Ok(stdout)) if stdout == "My script\nWith env\nWith args\n")); + } + #[tokio::test] async fn node_copy_file_from_node_method_should_copy_node_remote_file_to_local_path() { let fs = InMemoryFileSystem::new(HashMap::from([ @@ -1516,48 +1552,6 @@ mod tests { assert_eq!(namespace.nodes().await.len(), 0); } - // #[tokio::test] - // async fn node_run_script_method_should_execute_the_script_successfully_and_returns_stdout() { - // // we need to mirror the script between local fs and in memory fs else - // // the tokio::process::Command won't be able to execute it - // fs::copy( - // "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_script", - // "/tmp/dummy_script", - // ) - // .unwrap(); - - // let fs = InMemoryFileSystem::new(HashMap::from([ - // (OsString::from_str("/").unwrap(), InMemoryFile::dir()), - // (OsString::from_str("/tmp").unwrap(), InMemoryFile::dir()), - // ( - // OsString::from_str("/tmp/dummy_script").unwrap(), - // InMemoryFile::file(fs::read_to_string("/tmp/dummy_script").unwrap()), - // ), - // ])); - // let provider = NativeProvider::new(fs.clone()); - // let namespace = provider.create_namespace().await.unwrap(); - - // // spawn dummy node - // let node = namespace - // .spawn_node(SpawnNodeOptions::new( - // "mynode", - // "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", - // )) - // .await - // .unwrap(); - - // let result = node - // .run_script( - // RunScriptOptions::new("/tmp/dummy_script") - // .args(vec!["-c"]) - // .env(vec![("MY_ENV_VAR", "Here is my content")]), - // ) - // .await; - - // println!("{:?}", result); - // // assert!(matches!(result, Ok(Ok(stdout)) if stdout == "Here is my content\n")); - // } - async fn get_processes_by_name(name: &str) -> Vec { procfs::process::all_processes() .unwrap() From 737d6b6c09df813d42e16342b58622dde89f0ff9 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 25 Sep 2023 19:14:45 +0300 Subject: [PATCH 61/69] chore: fixed local paths --- crates/provider/src/native.rs | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index af525744e..5c9ba23cc 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -732,7 +732,7 @@ mod tests { .spawn_node( SpawnNodeOptions::new( "mynode", - "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + "./testing/dummy_node", ) .args(vec![ "-flag1", @@ -890,7 +890,7 @@ mod tests { namespace .spawn_node(SpawnNodeOptions::new( "mynode", - "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + "./testing/dummy_node", )) .await .unwrap(); @@ -898,7 +898,7 @@ mod tests { let result = namespace .spawn_node(SpawnNodeOptions::new( "mynode", - "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + "./testing/dummy_node", )) .await; @@ -980,14 +980,14 @@ mod tests { namespace .spawn_node(SpawnNodeOptions::new( "mynode1", - "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + "./testing/dummy_node", )) .await .unwrap(); namespace .spawn_node(SpawnNodeOptions::new( "mynode2", - "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + "./testing/dummy_node", )) .await .unwrap(); @@ -1023,7 +1023,7 @@ mod tests { let node = namespace .spawn_node(SpawnNodeOptions::new( "mynode", - "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + "./testing/dummy_node", )) .await .unwrap(); @@ -1056,7 +1056,7 @@ mod tests { let node = namespace .spawn_node(SpawnNodeOptions::new( "mynode", - "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + "./testing/dummy_node", )) .await .unwrap(); @@ -1097,7 +1097,7 @@ mod tests { let node = namespace .spawn_node(SpawnNodeOptions::new( "mynode", - "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + "./testing/dummy_node", )) .await .unwrap(); @@ -1127,7 +1127,7 @@ mod tests { let node = namespace .spawn_node(SpawnNodeOptions::new( "mynode", - "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + "./testing/dummy_node", )) .await .unwrap(); @@ -1154,7 +1154,7 @@ mod tests { let node = namespace .spawn_node(SpawnNodeOptions::new( "mynode", - "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + "./testing/dummy_node", )) .await .unwrap(); @@ -1179,7 +1179,7 @@ mod tests { OsString::from_str("/tmp/dummy_script").unwrap(), InMemoryFile::mirror( "/tmp/dummy_script", - fs::read_to_string("/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_script").unwrap(), + fs::read_to_string("./testing/dummy_script").unwrap(), ), ), ])); @@ -1190,7 +1190,7 @@ mod tests { let node = namespace .spawn_node(SpawnNodeOptions::new( "mynode", - "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + "./testing/dummy_node", )) .await .unwrap(); @@ -1219,7 +1219,7 @@ mod tests { let node = namespace .spawn_node(SpawnNodeOptions::new( "mynode", - "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + "./testing/dummy_node", )) .await .unwrap(); @@ -1256,7 +1256,7 @@ mod tests { let node = namespace .spawn_node(SpawnNodeOptions::new( "mynode", - "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + "./testing/dummy_node", )) .await .unwrap(); @@ -1309,7 +1309,7 @@ mod tests { let node = namespace .spawn_node(SpawnNodeOptions::new( "mynode", - "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + "./testing/dummy_node", )) .await .unwrap(); @@ -1398,7 +1398,7 @@ mod tests { .spawn_node( SpawnNodeOptions::new( "mynode", - "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + "./testing/dummy_node", ) .args(vec![ "-flag1", @@ -1520,7 +1520,7 @@ mod tests { let node = namespace .spawn_node(SpawnNodeOptions::new( "mynode", - "/home/user/Work/parity/zombienet-sdk/crates/provider/testing/dummy_node", + "./testing/dummy_node", )) .await .unwrap(); From d8e43cfbc69ae766d226bc2b1c0035f473938518 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 25 Sep 2023 19:15:11 +0300 Subject: [PATCH 62/69] ci: set test threads to 1 for native provider tests --- .github/workflows/ci.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8feb59fdf..e61455221 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -38,7 +38,8 @@ jobs: run: cargo build - name: Tests - run: cargo test + # there should be a unique test thread for native provider tests (asserting spawned processes count) + run: cargo test -- --test-threads 1 coverage: name: Zombienet SDK - coverage @@ -60,7 +61,8 @@ jobs: uses: taiki-e/install-action@cargo-llvm-cov - name: Collect coverage data - run: cargo llvm-cov nextest --lcov --output-path lcov.info + # there should be a unique test thread for native provider tests (asserting spawned processes count) + run: cargo llvm-cov nextest -j 1 --lcov --output-path lcov.info - name: Report code coverage uses: Nef10/lcov-reporter-action@v0.4.0 From 3830c873e493c6d15080b5044f1c8062d3a363b2 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 25 Sep 2023 19:19:07 +0300 Subject: [PATCH 63/69] feat: fix clippy warnings --- crates/provider/src/native.rs | 152 ++++++++++------------------- crates/support/src/fs/in_memory.rs | 11 +-- crates/support/src/fs/local.rs | 44 ++++----- 3 files changed, 79 insertions(+), 128 deletions(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 5c9ba23cc..b7d90c05c 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -730,27 +730,24 @@ mod tests { let node = namespace .spawn_node( - SpawnNodeOptions::new( - "mynode", - "./testing/dummy_node", - ) - .args(vec![ - "-flag1", - "--flag2", - "--option1=value1", - "-option2=value2", - "--option3 value3", - "-option4 value4", - ]) - .env(vec![ - ("MY_VAR_1", "MY_VALUE_1"), - ("MY_VAR_2", "MY_VALUE_2"), - ("MY_VAR_3", "MY_VALUE_3"), - ]) - .injected_files(vec![ - TransferedFile::new("/file1", "/cfg/file1"), - TransferedFile::new("/file2", "/data/file2"), - ]), + SpawnNodeOptions::new("mynode", "./testing/dummy_node") + .args(vec![ + "-flag1", + "--flag2", + "--option1=value1", + "-option2=value2", + "--option3 value3", + "-option4 value4", + ]) + .env(vec![ + ("MY_VAR_1", "MY_VALUE_1"), + ("MY_VAR_2", "MY_VALUE_2"), + ("MY_VAR_3", "MY_VALUE_3"), + ]) + .injected_files(vec![ + TransferedFile::new("/file1", "/cfg/file1"), + TransferedFile::new("/file2", "/data/file2"), + ]), ) .await .unwrap(); @@ -888,18 +885,12 @@ mod tests { let namespace = provider.create_namespace().await.unwrap(); namespace - .spawn_node(SpawnNodeOptions::new( - "mynode", - "./testing/dummy_node", - )) + .spawn_node(SpawnNodeOptions::new("mynode", "./testing/dummy_node")) .await .unwrap(); let result = namespace - .spawn_node(SpawnNodeOptions::new( - "mynode", - "./testing/dummy_node", - )) + .spawn_node(SpawnNodeOptions::new("mynode", "./testing/dummy_node")) .await; // we must match here because Arc doesn't implements Debug, so unwrap_err is not an option @@ -978,17 +969,11 @@ mod tests { // spawn 2 dummy nodes to populate namespace namespace - .spawn_node(SpawnNodeOptions::new( - "mynode1", - "./testing/dummy_node", - )) + .spawn_node(SpawnNodeOptions::new("mynode1", "./testing/dummy_node")) .await .unwrap(); namespace - .spawn_node(SpawnNodeOptions::new( - "mynode2", - "./testing/dummy_node", - )) + .spawn_node(SpawnNodeOptions::new("mynode2", "./testing/dummy_node")) .await .unwrap(); @@ -1021,10 +1006,7 @@ mod tests { // spawn dummy node let node = namespace - .spawn_node(SpawnNodeOptions::new( - "mynode", - "./testing/dummy_node", - )) + .spawn_node(SpawnNodeOptions::new("mynode", "./testing/dummy_node")) .await .unwrap(); @@ -1054,10 +1036,7 @@ mod tests { // spawn dummy node let node = namespace - .spawn_node(SpawnNodeOptions::new( - "mynode", - "./testing/dummy_node", - )) + .spawn_node(SpawnNodeOptions::new("mynode", "./testing/dummy_node")) .await .unwrap(); @@ -1095,10 +1074,7 @@ mod tests { // spawn dummy node let node = namespace - .spawn_node(SpawnNodeOptions::new( - "mynode", - "./testing/dummy_node", - )) + .spawn_node(SpawnNodeOptions::new("mynode", "./testing/dummy_node")) .await .unwrap(); @@ -1125,10 +1101,7 @@ mod tests { // spawn dummy node let node = namespace - .spawn_node(SpawnNodeOptions::new( - "mynode", - "./testing/dummy_node", - )) + .spawn_node(SpawnNodeOptions::new("mynode", "./testing/dummy_node")) .await .unwrap(); @@ -1137,7 +1110,7 @@ mod tests { .await; assert!( - matches!(result, Ok(Err((exit_code, stderr))) if !exit_code.success() && stderr.len() > 0) + matches!(result, Ok(Err((exit_code, stderr))) if !exit_code.success() && !stderr.is_empty()) ); } @@ -1152,10 +1125,7 @@ mod tests { // spawn dummy node let node = namespace - .spawn_node(SpawnNodeOptions::new( - "mynode", - "./testing/dummy_node", - )) + .spawn_node(SpawnNodeOptions::new("mynode", "./testing/dummy_node")) .await .unwrap(); @@ -1188,10 +1158,7 @@ mod tests { // spawn dummy node let node = namespace - .spawn_node(SpawnNodeOptions::new( - "mynode", - "./testing/dummy_node", - )) + .spawn_node(SpawnNodeOptions::new("mynode", "./testing/dummy_node")) .await .unwrap(); @@ -1217,10 +1184,7 @@ mod tests { // spawn dummy node let node = namespace - .spawn_node(SpawnNodeOptions::new( - "mynode", - "./testing/dummy_node", - )) + .spawn_node(SpawnNodeOptions::new("mynode", "./testing/dummy_node")) .await .unwrap(); @@ -1254,10 +1218,7 @@ mod tests { // spawn dummy node let node = namespace - .spawn_node(SpawnNodeOptions::new( - "mynode", - "./testing/dummy_node", - )) + .spawn_node(SpawnNodeOptions::new("mynode", "./testing/dummy_node")) .await .unwrap(); @@ -1307,10 +1268,7 @@ mod tests { // spawn dummy node let node = namespace - .spawn_node(SpawnNodeOptions::new( - "mynode", - "./testing/dummy_node", - )) + .spawn_node(SpawnNodeOptions::new("mynode", "./testing/dummy_node")) .await .unwrap(); @@ -1396,27 +1354,24 @@ mod tests { let node = namespace .spawn_node( - SpawnNodeOptions::new( - "mynode", - "./testing/dummy_node", - ) - .args(vec![ - "-flag1", - "--flag2", - "--option1=value1", - "-option2=value2", - "--option3 value3", - "-option4 value4", - ]) - .env(vec![ - ("MY_VAR_1", "MY_VALUE_1"), - ("MY_VAR_2", "MY_VALUE_2"), - ("MY_VAR_3", "MY_VALUE_3"), - ]) - .injected_files(vec![ - TransferedFile::new("/file1", "/cfg/file1"), - TransferedFile::new("/file2", "/data/file2"), - ]), + SpawnNodeOptions::new("mynode", "./testing/dummy_node") + .args(vec![ + "-flag1", + "--flag2", + "--option1=value1", + "-option2=value2", + "--option3 value3", + "-option4 value4", + ]) + .env(vec![ + ("MY_VAR_1", "MY_VALUE_1"), + ("MY_VAR_2", "MY_VALUE_2"), + ("MY_VAR_3", "MY_VALUE_3"), + ]) + .injected_files(vec![ + TransferedFile::new("/file1", "/cfg/file1"), + TransferedFile::new("/file2", "/data/file2"), + ]), ) .await .unwrap(); @@ -1518,10 +1473,7 @@ mod tests { // spawn dummy node let node = namespace - .spawn_node(SpawnNodeOptions::new( - "mynode", - "./testing/dummy_node", - )) + .spawn_node(SpawnNodeOptions::new("mynode", "./testing/dummy_node")) .await .unwrap(); @@ -1561,7 +1513,7 @@ mod tests { .cmdline() .iter() .any(|args| args.iter().any(|arg| arg.contains(name))) - .then(|| process) + .then_some(process) } else { None } diff --git a/crates/support/src/fs/in_memory.rs b/crates/support/src/fs/in_memory.rs index 7f3fa04f7..05c9b5389 100644 --- a/crates/support/src/fs/in_memory.rs +++ b/crates/support/src/fs/in_memory.rs @@ -77,9 +77,9 @@ impl InMemoryFile { } pub fn mode(&self) -> u32 { - match self { - &Self::File { mode, .. } => mode, - &Self::Directory { mode, .. } => mode, + match *self { + Self::File { mode, .. } => mode, + Self::Directory { mode, .. } => mode, } } @@ -98,9 +98,8 @@ impl InMemoryFile { } pub fn set_mirror(&mut self) { - match self { - Self::File { mirror, .. } => *mirror = true, - _ => {}, + if let Self::File { mirror, .. } = self { + *mirror = true; }; } } diff --git a/crates/support/src/fs/local.rs b/crates/support/src/fs/local.rs index 1906f5588..ecf34334c 100644 --- a/crates/support/src/fs/local.rs +++ b/crates/support/src/fs/local.rs @@ -88,13 +88,13 @@ mod tests { } fn teardown(test_dir: String) { - std::fs::remove_dir_all(&test_dir).unwrap(); + std::fs::remove_dir_all(test_dir).unwrap(); } #[tokio::test] async fn create_dir_should_create_a_new_directory_at_path() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let new_dir = format!("{test_dir}/mynewdir"); fs.create_dir(&new_dir).await.unwrap(); @@ -107,7 +107,7 @@ mod tests { #[tokio::test] async fn create_dir_should_bubble_up_error_if_some_happens() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let new_dir = format!("{test_dir}/mynewdir"); // intentionally create new dir before calling function to force error @@ -121,7 +121,7 @@ mod tests { #[tokio::test] async fn create_dir_all_should_create_a_new_directory_and_all_of_it_ancestors_at_path() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let new_dir = format!("{test_dir}/the/path/to/mynewdir"); fs.create_dir_all(&new_dir).await.unwrap(); @@ -134,7 +134,7 @@ mod tests { #[tokio::test] async fn create_dir_all_should_bubble_up_error_if_some_happens() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let new_dir = format!("{test_dir}/the/path/to/mynewdir"); // intentionally create new file as ancestor before calling function to force error @@ -148,7 +148,7 @@ mod tests { #[tokio::test] async fn read_should_return_the_contents_of_the_file_at_path() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let file_path = format!("{test_dir}/myfile"); std::fs::write(&file_path, b"Test").unwrap(); @@ -161,7 +161,7 @@ mod tests { #[tokio::test] async fn read_should_bubble_up_error_if_some_happens() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let file_path = format!("{test_dir}/myfile"); // intentionally forget to create file to force error @@ -174,7 +174,7 @@ mod tests { #[tokio::test] async fn read_to_string_should_return_the_contents_of_the_file_at_path_as_string() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let file_path = format!("{test_dir}/myfile"); std::fs::write(&file_path, b"Test").unwrap(); @@ -187,7 +187,7 @@ mod tests { #[tokio::test] async fn read_to_string_should_bubble_up_error_if_some_happens() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let file_path = format!("{test_dir}/myfile"); // intentionally forget to create file to force error @@ -200,7 +200,7 @@ mod tests { #[tokio::test] async fn write_should_create_a_new_file_at_path_with_contents() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let file_path = format!("{test_dir}/myfile"); fs.write(&file_path, "Test").await.unwrap(); @@ -212,7 +212,7 @@ mod tests { #[tokio::test] async fn write_should_overwrite_an_existing_file_with_contents() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let file_path = format!("{test_dir}/myfile"); std::fs::write(&file_path, "Test").unwrap(); @@ -226,7 +226,7 @@ mod tests { #[tokio::test] async fn write_should_bubble_up_error_if_some_happens() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let file_path = format!("{test_dir}/myfile"); // intentionally create directory instead of file to force error @@ -240,7 +240,7 @@ mod tests { #[tokio::test] async fn append_should_create_a_new_file_at_path_with_contents() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let file_path = format!("{test_dir}/myfile"); fs.append(&file_path, "Test").await.unwrap(); @@ -252,7 +252,7 @@ mod tests { #[tokio::test] async fn append_should_updates_an_existing_file_by_appending_contents() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let file_path = format!("{test_dir}/myfile"); std::fs::write(&file_path, "Test").unwrap(); @@ -266,7 +266,7 @@ mod tests { #[tokio::test] async fn append_should_bubble_up_error_if_some_happens() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let file_path = format!("{test_dir}/myfile"); // intentionally create directory instead of file to force error @@ -280,7 +280,7 @@ mod tests { #[tokio::test] async fn copy_should_create_a_duplicate_of_source() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let from_path = format!("{test_dir}/myfile"); std::fs::write(&from_path, "Test").unwrap(); @@ -294,7 +294,7 @@ mod tests { #[tokio::test] async fn copy_should_ovewrite_destination_if_alread_exists() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let from_path = format!("{test_dir}/myfile"); std::fs::write(&from_path, "Test").unwrap(); @@ -309,7 +309,7 @@ mod tests { #[tokio::test] async fn copy_should_bubble_up_error_if_some_happens() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let from_path = format!("{test_dir}/nonexistentfile"); let to_path = format!("{test_dir}/mycopy"); @@ -322,10 +322,10 @@ mod tests { #[tokio::test] async fn set_mode_should_update_the_file_mode_at_path() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let path = format!("{test_dir}/myfile"); std::fs::write(&path, "Test").unwrap(); - assert!(std::fs::metadata(&path).unwrap().permissions().mode() != (FILE_BITS + 0400)); + assert!(std::fs::metadata(&path).unwrap().permissions().mode() != (FILE_BITS + 0o400)); fs.set_mode(&path, 0o400).await.unwrap(); @@ -339,7 +339,7 @@ mod tests { #[tokio::test] async fn set_mode_should_update_the_directory_mode_at_path() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let path = format!("{test_dir}/mydir"); std::fs::create_dir(&path).unwrap(); assert!(std::fs::metadata(&path).unwrap().permissions().mode() != (DIR_BITS + 0o700)); @@ -356,7 +356,7 @@ mod tests { #[tokio::test] async fn set_mode_should_bubble_up_error_if_some_happens() { let test_dir = setup(); - let fs = LocalFileSystem::default(); + let fs = LocalFileSystem; let path = format!("{test_dir}/somemissingfile"); // intentionnally don't create file From d6b0fd3c15ca450380e7eb0283d17f7d4305bab1 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Mon, 25 Sep 2023 19:23:16 +0300 Subject: [PATCH 64/69] chore: cargo fmt --- crates/provider/src/lib.rs | 5 ++++- crates/provider/src/native.rs | 3 +-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs index 6047a117b..b6f3e2098 100644 --- a/crates/provider/src/lib.rs +++ b/crates/provider/src/lib.rs @@ -1,7 +1,10 @@ pub mod native; pub mod shared; -use std::{net::IpAddr, path::PathBuf, process::ExitStatus, sync::Arc, time::Duration, collections::HashMap}; +use std::{ + collections::HashMap, net::IpAddr, path::PathBuf, process::ExitStatus, sync::Arc, + time::Duration, +}; use async_trait::async_trait; use shared::types::{ diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index b7d90c05c..719f83f99 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -607,9 +607,8 @@ mod tests { use support::fs::in_memory::{InMemoryFile, InMemoryFileSystem}; use tokio::time::timeout; - use crate::shared::types::TransferedFile; - use super::*; + use crate::shared::types::TransferedFile; #[test] fn provider_capabilities_method_should_return_provider_capabilities() { From 8963c0df602cef46250f4d1c0e184afd21456451 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 26 Sep 2023 14:12:17 +0300 Subject: [PATCH 65/69] feat: joined futures when possible --- crates/provider/src/native.rs | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 719f83f99..3c5af7b19 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -12,6 +12,7 @@ use std::{ use anyhow::anyhow; use async_trait::async_trait; use configuration::types::Port; +use futures::{future::try_join_all, try_join}; use nix::{ sys::signal::{kill, Signal}, unistd::Pid, @@ -169,19 +170,21 @@ impl ProviderNamespace for Nativ let data_dir = PathBuf::from(format!("{}{}", base_dir_raw, NODE_DATA_DIR)); let scripts_dir = PathBuf::from(format!("{}{}", base_dir_raw, NODE_SCRIPTS_DIR)); self.filesystem.create_dir(&base_dir).await?; - self.filesystem.create_dir(&config_dir).await?; - self.filesystem.create_dir(&data_dir).await?; - self.filesystem.create_dir(&scripts_dir).await?; + try_join!( + self.filesystem.create_dir(&config_dir), + self.filesystem.create_dir(&data_dir), + self.filesystem.create_dir(&scripts_dir), + )?; // copy injected files + let mut futures = vec![]; for file in options.injected_files { - self.filesystem - .copy( - file.local_path, - format!("{}{}", base_dir_raw, file.remote_path.to_string_lossy()), - ) - .await?; + futures.push(self.filesystem.copy( + file.local_path, + format!("{}{}", base_dir_raw, file.remote_path.to_string_lossy()), + )); } + try_join_all(futures).await?; let (process, stdout_reading_handle, stderr_reading_handle, log_writing_handle) = create_process_with_log_tasks( From 0a26f057a876fca31386e5c939475d544aef35ab Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 26 Sep 2023 14:21:55 +0300 Subject: [PATCH 66/69] feat: updated CI to run provider tests on one thread and rest multi-threads --- .github/workflows/ci.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e61455221..7d48ffb9c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -37,9 +37,12 @@ jobs: - name: Build run: cargo build - - name: Tests + - name: Tests (except provider crate) + run: cargo test --workspace --exclude provider + + - name: Tests (provider crate) # there should be a unique test thread for native provider tests (asserting spawned processes count) - run: cargo test -- --test-threads 1 + cargo test -p provider -- --test-threads 1 coverage: name: Zombienet SDK - coverage From 44be685119124467eeab4bb636693eb6f809af8b Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 26 Sep 2023 16:25:28 +0300 Subject: [PATCH 67/69] chore: fix pipeline docs --- .github/workflows/documentation.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index a40eee4ad..d22fb55ce 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -35,13 +35,15 @@ jobs: cargo doc --no-deps echo "" > target/doc/index.html + + - name: Move docs run: | mkdir -p ./doc mv ./target/doc/* ./doc git config user.email "github-action@users.noreply.github.com" git config user.name "GitHub Action" - git config user.password ${{ secrets.GH_PAGES_TOKEN }} + git config user.password "${{ secrets.GH_PAGES_TOKEN }}" git checkout --orphan gh-pages mkdir to_delete shopt -s extglob From b5388bff4ce66d9bd6898028684f36ec9c857857 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 26 Sep 2023 16:37:51 +0300 Subject: [PATCH 68/69] feat: fix pipeline missing run --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7d48ffb9c..b13312c4b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -42,7 +42,7 @@ jobs: - name: Tests (provider crate) # there should be a unique test thread for native provider tests (asserting spawned processes count) - cargo test -p provider -- --test-threads 1 + run: cargo test -p provider -- --test-threads 1 coverage: name: Zombienet SDK - coverage From 7f7296463e252702cc1ec810593cbae7b04640d7 Mon Sep 17 00:00:00 2001 From: l0r1s Date: Tue, 26 Sep 2023 16:39:29 +0300 Subject: [PATCH 69/69] chore: add comment for static setup --- crates/provider/src/native.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/provider/src/native.rs b/crates/provider/src/native.rs index 3c5af7b19..b73c4605b 100644 --- a/crates/provider/src/native.rs +++ b/crates/provider/src/native.rs @@ -269,6 +269,7 @@ impl ProviderNamespace for Nativ } async fn static_setup(&self) -> Result<(), ProviderError> { + // no static setup exists for native provider todo!() }