diff --git a/integration_tests/src/base_node_process.rs b/integration_tests/src/base_node_process.rs index eac503e6e1..ad3a3be42e 100644 --- a/integration_tests/src/base_node_process.rs +++ b/integration_tests/src/base_node_process.rs @@ -31,11 +31,12 @@ use std::{ }; use rand::rngs::OsRng; +use tari_app_utilities::identity_management::save_as_json; use tari_base_node::{run_base_node, BaseNodeConfig, MetricsConfig}; use tari_base_node_grpc_client::BaseNodeGrpcClient; use tari_common::configuration::{CommonConfig, MultiaddrList}; use tari_comms::{multiaddr::Multiaddr, peer_manager::PeerFeatures, NodeIdentity}; -use tari_comms_dht::DhtConfig; +use tari_comms_dht::{DbConnectionUrl, DhtConfig}; use tari_p2p::{auto_update::AutoUpdateConfig, Network, PeerSeedsConfig, TransportType}; use tari_shutdown::Shutdown; use tokio::task; @@ -96,7 +97,6 @@ pub async fn spawn_base_node_with_config( let grpc_port: u64; let temp_dir_path: PathBuf; let base_node_identity: NodeIdentity; - let base_node_address: Multiaddr; if let Some(node_ps) = world.base_nodes.get(&bn_name) { port = node_ps.port; @@ -116,8 +116,9 @@ pub async fn spawn_base_node_with_config( .join(format!("grpc_port_{}", grpc_port)) .join(bn_name.clone()); - base_node_address = Multiaddr::from_str(&format!("/ip4/127.0.0.1/tcp/{}", port)).unwrap(); + let base_node_address = Multiaddr::from_str(&format!("/ip4/127.0.0.1/tcp/{}", port)).unwrap(); base_node_identity = NodeIdentity::random(&mut OsRng, base_node_address, PeerFeatures::COMMUNICATION_NODE); + save_as_json(temp_dir_path.join("base_node.json"), &base_node_identity).unwrap(); }; println!("Base node identity: {}", base_node_identity); @@ -163,8 +164,8 @@ pub async fn spawn_base_node_with_config( base_node_config.base_node.metadata_auto_ping_interval = Duration::from_secs(15); base_node_config.base_node.data_dir = temp_dir_path.to_path_buf(); - base_node_config.base_node.identity_file = temp_dir_path.clone().join("base_node_id.json"); - base_node_config.base_node.tor_identity_file = temp_dir_path.clone().join("base_node_tor_id.json"); + base_node_config.base_node.identity_file = PathBuf::from("base_node_id.json"); + base_node_config.base_node.tor_identity_file = PathBuf::from("base_node_tor_id.json"); base_node_config.base_node.max_randomx_vms = 1; base_node_config.base_node.lmdb_path = temp_dir_path.to_path_buf(); @@ -179,6 +180,7 @@ pub async fn spawn_base_node_with_config( .listener_address .clone()]); base_node_config.base_node.p2p.dht = DhtConfig::default_local_test(); + base_node_config.base_node.p2p.dht.database_url = DbConnectionUrl::file(format!("{}-dht.sqlite", port)); base_node_config.base_node.p2p.dht.network_discovery.enabled = true; base_node_config.base_node.p2p.allow_test_addresses = true; base_node_config.base_node.storage.orphan_storage_capacity = 10; @@ -193,6 +195,7 @@ pub async fn spawn_base_node_with_config( "Initializing base node: name={}; port={}; grpc_port={}; is_seed_node={}", name_cloned, port, grpc_port, is_seed_node ); + let result = run_base_node(shutdown, Arc::new(base_node_identity), Arc::new(base_node_config)).await; if let Err(e) = result { panic!("{:?}", e); diff --git a/integration_tests/src/lib.rs b/integration_tests/src/lib.rs index fb04905d94..298d9ac968 100644 --- a/integration_tests/src/lib.rs +++ b/integration_tests/src/lib.rs @@ -53,7 +53,7 @@ pub fn get_port(range: Range) -> Option { pub async fn wait_for_service(port: u64) { // The idea is that if the port is taken it means the service is running. - // If it's not taken the port hasn't come up yet + // If the port is not taken the service hasn't come up yet let max_tries = 40; let mut attempts = 0; @@ -71,16 +71,12 @@ pub async fn wait_for_service(port: u64) { } } -pub async fn get_peer_addresses(world: &TariWorld, peers: &Vec) -> Vec { - let mut peer_addresses = vec![]; - for peer in peers { - let peer = world.base_nodes.get(peer.as_str()).unwrap(); - peer_addresses.push(format!( - "{}::{}", - peer.identity.public_key(), - peer.identity.first_public_address().expect("No public addresses") - )); - } - - peer_addresses +pub async fn get_peer_addresses(world: &TariWorld, peers: &[String]) -> Vec { + peers + .iter() + .map(|peer_string| { + let peer = world.base_nodes.get(peer_string.as_str()).unwrap().identity.to_peer(); + peer.to_short_string() + }) + .collect() } diff --git a/integration_tests/src/miner.rs b/integration_tests/src/miner.rs index e85764d227..455e2ac303 100644 --- a/integration_tests/src/miner.rs +++ b/integration_tests/src/miner.rs @@ -112,7 +112,7 @@ impl MinerProcess { ("miner.num_mining_threads".to_string(), "1".to_string()), ("miner.mine_on_tip_only".to_string(), "false".to_string()), ], - network: None, + network: Some(Network::LocalNet), }, mine_until_height: None, miner_max_blocks: blocks, diff --git a/integration_tests/src/wallet_process.rs b/integration_tests/src/wallet_process.rs index 16d77615aa..04843c0576 100644 --- a/integration_tests/src/wallet_process.rs +++ b/integration_tests/src/wallet_process.rs @@ -20,19 +20,18 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{path::PathBuf, str::FromStr, thread, time::Duration}; +use std::{path::PathBuf, process, str::FromStr, thread, time::Duration}; use tari_app_grpc::tari_rpc::SetBaseNodeRequest; use tari_app_utilities::common_cli_args::CommonCliArgs; use tari_common::configuration::{CommonConfig, MultiaddrList}; use tari_comms::multiaddr::Multiaddr; -use tari_comms_dht::DhtConfig; +use tari_comms_dht::{DbConnectionUrl, DhtConfig}; use tari_console_wallet::{run_wallet_with_cli, Cli}; use tari_p2p::{auto_update::AutoUpdateConfig, Network, PeerSeedsConfig, TransportType}; use tari_shutdown::Shutdown; use tari_wallet::{transaction_service::config::TransactionRoutingMechanism, WalletConfig}; use tari_wallet_grpc_client::WalletGrpcClient; -use tempfile::tempdir; use tokio::runtime; use tonic::transport::Channel; @@ -40,11 +39,12 @@ use crate::{get_peer_addresses, get_port, wait_for_service, TariWorld}; #[derive(Clone, Debug)] pub struct WalletProcess { + pub config: WalletConfig, + pub grpc_port: u64, + pub kill_signal: Shutdown, pub name: String, pub port: u64, - pub grpc_port: u64, pub temp_dir_path: PathBuf, - pub kill_signal: Shutdown, } impl Drop for WalletProcess { @@ -65,17 +65,24 @@ pub async fn spawn_wallet( let port: u64; let grpc_port: u64; let temp_dir_path: PathBuf; + let wallet_config: WalletConfig; if let Some(wallet_ps) = world.wallets.get(&wallet_name) { port = wallet_ps.port; grpc_port = wallet_ps.grpc_port; temp_dir_path = wallet_ps.temp_dir_path.clone(); + wallet_config = wallet_ps.config.clone(); } else { // each spawned wallet will use different ports port = get_port(18000..18499).unwrap(); grpc_port = get_port(18500..18999).unwrap(); - // create a new temporary directory - temp_dir_path = tempdir().unwrap().path().to_path_buf(); + + temp_dir_path = get_base_dir() + .join("wallets") + .join(format!("grpc_port_{}", grpc_port)) + .join(wallet_name.clone()); + + wallet_config = WalletConfig::default(); }; let base_node = base_node_name.map(|name| { @@ -97,11 +104,14 @@ pub async fn spawn_wallet( let temp_dir = temp_dir_path.clone(); + let mut common_config = CommonConfig::default(); + common_config.base_path = temp_dir_path.clone(); + let wallet_cfg = wallet_config.clone(); thread::spawn(move || { - let mut wallet_config = tari_console_wallet::ApplicationConfig { - common: CommonConfig::default(), + let mut wallet_app_config = tari_console_wallet::ApplicationConfig { + common: common_config, auto_update: AutoUpdateConfig::default(), - wallet: WalletConfig::default(), + wallet: wallet_cfg, peer_seeds: PeerSeedsConfig { peer_seeds: peer_addresses.into(), ..Default::default() @@ -110,38 +120,44 @@ pub async fn spawn_wallet( eprintln!("Using wallet temp_dir: {}", temp_dir_path.clone().display()); - wallet_config.wallet.identity_file = Some(temp_dir_path.clone().join("wallet_id.json")); - wallet_config.wallet.network = Network::LocalNet; - wallet_config.wallet.password = Some("test".into()); - wallet_config.wallet.grpc_enabled = true; - wallet_config.wallet.grpc_address = + wallet_app_config.wallet.identity_file = Some(temp_dir_path.clone().join("wallet_id.json")); + wallet_app_config.wallet.network = Network::LocalNet; + wallet_app_config.wallet.password = Some("test".into()); + wallet_app_config.wallet.grpc_enabled = true; + wallet_app_config.wallet.grpc_address = Some(Multiaddr::from_str(&format!("/ip4/127.0.0.1/tcp/{}", grpc_port)).unwrap()); - wallet_config.wallet.data_dir = temp_dir_path.clone().join("../../data").join("wallet"); - wallet_config.wallet.db_file = temp_dir_path.clone().join("db").join("console_wallet.db"); - wallet_config.wallet.contacts_auto_ping_interval = Duration::from_secs(2); - wallet_config + wallet_app_config.wallet.db_file = PathBuf::from("console_wallet.db"); + wallet_app_config.wallet.contacts_auto_ping_interval = Duration::from_secs(2); + wallet_app_config .wallet .base_node_service_config .base_node_monitor_refresh_interval = Duration::from_secs(15); - wallet_config.wallet.p2p.transport.transport_type = TransportType::Tcp; - wallet_config.wallet.p2p.transport.tcp.listener_address = + wallet_app_config.wallet.p2p.transport.transport_type = TransportType::Tcp; + wallet_app_config.wallet.p2p.transport.tcp.listener_address = Multiaddr::from_str(&format!("/ip4/127.0.0.1/tcp/{}", port)).unwrap(); - wallet_config.wallet.p2p.public_addresses = - MultiaddrList::from(vec![wallet_config.wallet.p2p.transport.tcp.listener_address.clone()]); - wallet_config.wallet.p2p.datastore_path = temp_dir_path.clone().join("peer_db").join("wallet"); - wallet_config.wallet.p2p.dht = DhtConfig::default_local_test(); - wallet_config.wallet.p2p.allow_test_addresses = true; + wallet_app_config.wallet.p2p.public_addresses = MultiaddrList::from(vec![wallet_app_config + .wallet + .p2p + .transport + .tcp + .listener_address + .clone()]); + wallet_app_config.wallet.p2p.dht = DhtConfig::default_local_test(); + wallet_app_config.wallet.p2p.dht.database_url = DbConnectionUrl::file(format!("{}-dht.sqlite", port)); + wallet_app_config.wallet.p2p.allow_test_addresses = true; if let Some(mech) = routing_mechanism { - wallet_config + wallet_app_config .wallet .transaction_service_config .transaction_routing_mechanism = mech; } // FIXME: wallet doesn't pick up the custom base node for some reason atm - wallet_config.wallet.custom_base_node = + wallet_app_config.wallet.custom_base_node = base_node_cloned.map(|(pubkey, port, _)| format!("{}::/ip4/127.0.0.1/tcp/{}", pubkey, port)); + wallet_app_config.wallet.set_base_path(temp_dir_path.clone()); + let rt = runtime::Builder::new_multi_thread().enable_all().build().unwrap(); let mut cli = cli.unwrap_or_else(get_default_cli); @@ -150,13 +166,14 @@ pub async fn spawn_wallet( cli.seed_words_file_name = Some(temp_dir_path.join(file_name)); } - if let Err(e) = run_wallet_with_cli(&mut send_to_thread_shutdown, rt, &mut wallet_config, cli) { + if let Err(e) = run_wallet_with_cli(&mut send_to_thread_shutdown, rt, &mut wallet_app_config, cli) { panic!("{:?}", e); } }); // make the new wallet able to be referenced by other processes world.wallets.insert(wallet_name.clone(), WalletProcess { + config: wallet_config, name: wallet_name.clone(), port, grpc_port, @@ -228,3 +245,8 @@ impl WalletProcess { self.kill_signal.trigger(); } } + +pub fn get_base_dir() -> PathBuf { + let crate_root = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + crate_root.join(format!("tests/temp/cucumber_{}", process::id())) +} diff --git a/integration_tests/src/world.rs b/integration_tests/src/world.rs index f53eeeb0c0..83b747bdb6 100644 --- a/integration_tests/src/world.rs +++ b/integration_tests/src/world.rs @@ -223,12 +223,5 @@ impl TariWorld { self.seed_nodes.as_slice() } - pub async fn after(&mut self, _scenario: &Scenario) { - self.base_nodes.clear(); - self.chat_clients.clear(); - self.ffi_wallets.clear(); - self.miners.clear(); - self.seed_nodes.clear(); - self.wallets.clear(); - } + pub async fn after(&mut self, _scenario: &Scenario) {} } diff --git a/integration_tests/tests/cucumber.rs b/integration_tests/tests/cucumber.rs index 77864aa6bc..1153b4ff9c 100644 --- a/integration_tests/tests/cucumber.rs +++ b/integration_tests/tests/cucumber.rs @@ -25,6 +25,7 @@ use std::{ fs, io, path::PathBuf, + process, str::{self}, sync::{Arc, Mutex}, }; @@ -115,4 +116,16 @@ fn main() { // If by any chance we have anything in the stdout buffer just log it. flush_stdout(&stdout_buffer); + + // Move the logs to the temp dir + let crate_root = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + let log_dir = crate_root.join("log"); + let test_run_dir = crate_root.join(format!("tests/temp/cucumber_{}/logs", process::id())); + fs::create_dir_all(&test_run_dir).unwrap(); + + for entry in fs::read_dir(log_dir).unwrap() { + let file = entry.unwrap(); + fs::copy(file.path(), test_run_dir.join(file.file_name())).unwrap(); + fs::remove_file(file.path()).unwrap(); + } } diff --git a/integration_tests/tests/features/BlockExplorerGRPC.feature b/integration_tests/tests/features/BlockExplorerGRPC.feature index 9a20d3f699..b9efb4215b 100644 --- a/integration_tests/tests/features/BlockExplorerGRPC.feature +++ b/integration_tests/tests/features/BlockExplorerGRPC.feature @@ -11,4 +11,4 @@ Feature: Block Explorer GRPC When I merge mine 2 blocks via PROXY Then all nodes are at height 2 When I request the difficulties of a node NODE -# Then difficulties are available + # Then difficulties are available diff --git a/integration_tests/tests/features/Propagation.feature b/integration_tests/tests/features/Propagation.feature index d60eb14a14..8ffddc9aa6 100644 --- a/integration_tests/tests/features/Propagation.feature +++ b/integration_tests/tests/features/Propagation.feature @@ -67,25 +67,26 @@ Feature: Block Propagation When mining node MINER mines 15 blocks Then all nodes are at height 20 - # Waiting for "When I stop node" step - @missing-step + @non-sync-propagation Scenario: Node should lag for while before syncing Given I have 1 seed nodes When I have a SHA3 miner MINER connected to all seed nodes When I have a lagging delayed node LAG1 connected to node MINER with blocks_behind_before_considered_lagging 6 + # Must ensure time for nodes to communicate or propagation will get missed + When I wait 10 seconds When mining node MINER mines 1 blocks - # Then all nodes are at height 1 - # When I stop node LAG1 - # When mining node MINER mines 5 blocks - # Then node MINER is at height 6 - # When I start base node LAG1 + Then all nodes are at height 1 + When I stop node LAG1 + When mining node MINER mines 5 blocks + Then node MINER is at height 6 + When I start base node LAG1 # Wait for node to so start and get into listening mode - # Then node LAG1 has reached initial sync - # #node was shutdown, so it never received the propagation messages - # Then node LAG1 is at height 1 - # Given mining node MINER mines 1 blocks - # Then node MINER is at height 7 - # Then all nodes are at height 7 + Then node LAG1 has reached initial sync + # node was shutdown, so it never received the propagation messages + Then node LAG1 is at height 1 + Given mining node MINER mines 1 blocks + Then node MINER is at height 7 + Then all nodes are at height 7 @critical @pruned Scenario: Pruned node should prune outputs diff --git a/integration_tests/tests/features/Reorgs.feature b/integration_tests/tests/features/Reorgs.feature index 264123f6bf..8d256362ae 100644 --- a/integration_tests/tests/features/Reorgs.feature +++ b/integration_tests/tests/features/Reorgs.feature @@ -121,10 +121,10 @@ Feature: Reorgs When I start base node NODE2 # Here is where it all goes wrong. the restarted node never syncs Then all nodes are at height 20 - # # Because TX1 should have been re_orged out we should be able to spend CB1 again - When I create a transaction TX2 spending CB1 to UTX2 - When I submit transaction TX2 to PNODE1 - Then PNODE1 has TX2 in MEMPOOL state + # Because TX1 should have been re_orged out we should be able to spend CB1 again + When I create a transaction TX2 spending CB1 to UTX2 + When I submit transaction TX2 to PNODE1 + Then PNODE1 has TX2 in MEMPOOL state @reorg @broken Scenario: Zero-conf reorg with spending @@ -134,40 +134,40 @@ Feature: Reorgs When I mine a block on NODE1 with coinbase CB1 When I mine 4 blocks on NODE1 When I create a custom fee transaction TX1 spending CB1 to UTX1 with fee 20 - When I create a custom fee transaction TX11 spending UTX1 to UTX11 with fee 20 - When I submit transaction TX1 to NODE1 - When I submit transaction TX11 to NODE1 + When I create a custom fee transaction TX11 spending UTX1 to UTX11 with fee 20 + When I submit transaction TX1 to NODE1 + When I submit transaction TX11 to NODE1 When I mine 1 blocks on NODE1 - Then NODE1 has TX1 in MINED state - And NODE1 has TX11 in MINED state - And all nodes are at height 20 - And I stop node NODE1 - And node NODE2 is at height 20 - When I mine a block on NODE2 with coinbase CB2 - When I mine 3 blocks on NODE2 - When I create a custom fee transaction TX2 spending CB2 to UTX2 with fee 20 - When I create a custom fee transaction TX21 spending UTX2 to UTX21 with fee 20 - When I submit transaction TX2 to NODE2 - When I submit transaction TX21 to NODE2 - When I mine 1 blocks on NODE2 - Then node NODE2 is at height 25 - And NODE2 has TX2 in MINED state - And NODE2 has TX21 in MINED state - And I stop node NODE2 - When I start base node NODE1 - And node NODE1 is at height 20 - When I mine a block on NODE1 with coinbase CB3 + Then NODE1 has TX1 in MINED state + And NODE1 has TX11 in MINED state + And all nodes are at height 20 + And I stop node NODE1 + And node NODE2 is at height 20 + When I mine a block on NODE2 with coinbase CB2 + When I mine 3 blocks on NODE2 + When I create a custom fee transaction TX2 spending CB2 to UTX2 with fee 20 + When I create a custom fee transaction TX21 spending UTX2 to UTX21 with fee 20 + When I submit transaction TX2 to NODE2 + When I submit transaction TX21 to NODE2 + When I mine 1 blocks on NODE2 + Then node NODE2 is at height 25 + And NODE2 has TX2 in MINED state + And NODE2 has TX21 in MINED state + And I stop node NODE2 + When I start base node NODE1 + And node NODE1 is at height 20 + When I mine a block on NODE1 with coinbase CB3 When I mine 3 blocks on NODE1 - When I create a custom fee transaction TX3 spending CB3 to UTX3 with fee 20 - When I create a custom fee transaction TX31 spending UTX3 to UTX31 with fee 20 - When I submit transaction TX3 to NODE1 - When I submit transaction TX31 to NODE1 + When I create a custom fee transaction TX3 spending CB3 to UTX3 with fee 20 + When I create a custom fee transaction TX31 spending UTX3 to UTX31 with fee 20 + When I submit transaction TX3 to NODE1 + When I submit transaction TX31 to NODE1 When I mine 1 blocks on NODE1 - Then NODE1 has TX3 in MINED state - And NODE1 has TX31 in MINED state - And node NODE1 is at height 25 - When I start base node NODE2 - Then all nodes are on the same chain at height 25 + Then NODE1 has TX3 in MINED state + And NODE1 has TX31 in MINED state + And node NODE1 is at height 25 + When I start base node NODE2 + Then all nodes are on the same chain at height 25 Scenario Outline: Massive multiple reorg # diff --git a/integration_tests/tests/features/WalletTransactions.feature b/integration_tests/tests/features/WalletTransactions.feature index 447355df52..27931889dc 100644 --- a/integration_tests/tests/features/WalletTransactions.feature +++ b/integration_tests/tests/features/WalletTransactions.feature @@ -98,36 +98,35 @@ Feature: Wallet Transactions Then all nodes are at height 10 Then I wait for wallet WALLET_A to have at least 20000000000 uT - # @flaky @missing-steps - # Scenario: Wallet imports spent outputs that become invalidated - # Given I have a seed node NODE - # When I have 1 base nodes connected to all seed nodes - # When I have wallet WALLET_A connected to all seed nodes - # When I have wallet WALLET_B connected to all seed nodes - # When I have wallet WALLET_C connected to all seed nodes - # When I have mining node MINER connected to base node NODE and wallet WALLET_A - # When mining node MINER mines 5 blocks - # Then all nodes are at height 5 - # Then I wait for wallet WALLET_A to have at least 10000000000 uT - # When I send 1000000 uT from wallet WALLET_A to wallet WALLET_B at fee 100 - # When mining node MINER mines 5 blocks - # Then all nodes are at height 10 - # Then I wait for wallet WALLET_B to have at least 1000000 uT - # When I send 900000 uT from wallet WALLET_B to wallet WALLET_A at fee 100 - # When mining node MINER mines 5 blocks - # Then all nodes are at height 15 - # When I wait for wallet WALLET_B to have at least 50000 uT - # Then I stop wallet WALLET_B - # When I wait 30 seconds - # Then I import WALLET_B spent outputs to WALLET_C - # Then I wait for wallet WALLET_C to have at least 1000000 uT - # Then I restart wallet WALLET_C - # Then I wait for wallet WALLET_C to have less than 1 uT - # Then I check if last imported transactions are invalid in wallet WALLET_C + Scenario: Wallet imports spent outputs that become invalidated + Given I have a seed node NODE + When I have 1 base nodes connected to all seed nodes + When I have wallet WALLET_A connected to all seed nodes + When I have wallet WALLET_B connected to all seed nodes + When I have wallet WALLET_C connected to all seed nodes + When I have mining node MINER connected to base node NODE and wallet WALLET_A + When mining node MINER mines 5 blocks + Then all nodes are at height 5 + Then I wait for wallet WALLET_A to have at least 10000000000 uT + When I send 1000000 uT from wallet WALLET_A to wallet WALLET_B at fee 100 + When mining node MINER mines 5 blocks + Then all nodes are at height 10 + Then I wait for wallet WALLET_B to have at least 1000000 uT + When I send 900000 uT from wallet WALLET_B to wallet WALLET_A at fee 100 + When mining node MINER mines 5 blocks + Then all nodes are at height 15 + When I wait for wallet WALLET_B to have at least 50000 uT + Then I stop wallet WALLET_B + When I wait 30 seconds + Then I import WALLET_B spent outputs to WALLET_C + #Then I wait for wallet WALLET_C to have at least 1000000 uT + Then I restart wallet WALLET_C + Then I wait for wallet WALLET_C to have less than 1 uT + Then I check if last imported transactions are invalid in wallet WALLET_C @flaky Scenario: Wallet imports reorged outputs that become invalidated - # # Chain 1 + # Chain 1 Given I have a seed node SEED_B When I have a base node B connected to seed SEED_B When I have wallet WB connected to base node B @@ -144,15 +143,15 @@ Feature: Wallet Transactions When I wait 30 seconds Then I import WALLET_RECEIVE_TX unspent outputs to WALLET_IMPORTED Then I wait for wallet WALLET_IMPORTED to have at least 1000000 uT - # # This triggers a validation of the imported outputs + # This triggers a validation of the imported outputs Then I restart wallet WALLET_IMPORTED - # # Chain 2 + # Chain 2 Given I have a seed node SEED_C When I have a base node C connected to seed SEED_C When I have wallet WC connected to base node C When I have mining node CM connected to base node C and wallet WC When mining node CM mines 10 blocks with min difficulty 1000 and max difficulty 9999999999 - # # Connect chain 1 and 2 + # Connect chain 1 and 2 Then node B is at height 8 When node C is at height 10 When I have a base node SA connected to nodes B,C diff --git a/integration_tests/tests/steps/node_steps.rs b/integration_tests/tests/steps/node_steps.rs index f9ea3896e9..6a4f12271b 100644 --- a/integration_tests/tests/steps/node_steps.rs +++ b/integration_tests/tests/steps/node_steps.rs @@ -693,7 +693,7 @@ async fn lagging_delayed_node(world: &mut TariWorld, delayed_node: String, node: let mut base_node_config = BaseNodeConfig::default(); base_node_config.state_machine.blocks_behind_before_considered_lagging = delay; - spawn_base_node_with_config(world, true, delayed_node, vec![node], base_node_config).await; + spawn_base_node_with_config(world, false, delayed_node, vec![node], base_node_config).await; } #[then(expr = "node {word} has reached initial sync")]