diff --git a/Cargo.toml b/Cargo.toml index 123eb69702976..cbbc345cf45d3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -279,7 +279,7 @@ homepage = "https://aptoslabs.com" license = "Apache-2.0" publish = false repository = "https://github.com/aptos-labs/aptos-core" -rust-version = "1.75.0" +rust-version = "1.78.0" [workspace.dependencies] # Internal crate dependencies. @@ -539,7 +539,12 @@ derive_more = "0.99.11" diesel = "2.1" # Use the crate version once this feature gets released on crates.io: # https://github.com/weiznich/diesel_async/commit/e165e8c96a6c540ebde2d6d7c52df5c5620a4bf1 -diesel-async = { git = "https://github.com/weiznich/diesel_async.git", rev = "d02798c67065d763154d7272dd0c09b39757d0f2", features = ["async-connection-wrapper", "postgres", "bb8", "tokio"] } +diesel-async = { git = "https://github.com/weiznich/diesel_async.git", rev = "d02798c67065d763154d7272dd0c09b39757d0f2", features = [ + "async-connection-wrapper", + "postgres", + "bb8", + "tokio", +] } diesel_migrations = { version = "2.1.0", features = ["postgres"] } difference = "2.0.0" digest = "0.9.0" diff --git a/api/src/context.rs b/api/src/context.rs index d8f17ab06d887..dad62cfc124f3 100644 --- a/api/src/context.rs +++ b/api/src/context.rs @@ -1177,13 +1177,18 @@ impl Context { E::internal_with_code(e, AptosErrorCode::InternalError, ledger_info) })?; - let gas_schedule_params = - match GasScheduleV2::fetch_config(&state_view).and_then(|gas_schedule| { - let feature_version = gas_schedule.feature_version; - let gas_schedule = gas_schedule.into_btree_map(); - AptosGasParameters::from_on_chain_gas_schedule(&gas_schedule, feature_version) + let gas_schedule_params = { + let may_be_params = + GasScheduleV2::fetch_config(&state_view).and_then(|gas_schedule| { + let feature_version = gas_schedule.feature_version; + let gas_schedule = gas_schedule.into_btree_map(); + AptosGasParameters::from_on_chain_gas_schedule( + &gas_schedule, + feature_version, + ) .ok() - }) { + }); + match may_be_params { Some(gas_schedule) => Ok(gas_schedule), None => GasSchedule::fetch_config(&state_view) .and_then(|gas_schedule| { @@ -1197,7 +1202,8 @@ impl Context { ledger_info, ) }), - }?; + }? + }; // Update the cache cache.gas_schedule_params = Some(gas_schedule_params.clone()); diff --git a/api/src/response.rs b/api/src/response.rs index e376ca42e9182..ab94491b083bc 100644 --- a/api/src/response.rs +++ b/api/src/response.rs @@ -77,6 +77,7 @@ macro_rules! generate_error_traits { $( pub trait [<$trait_name Error>]: AptosErrorResponse { // With ledger info and an error code + #[allow(unused)] fn [<$trait_name:snake _with_code>]( err: Err, error_code: aptos_api_types::AptosErrorCode, @@ -84,11 +85,13 @@ macro_rules! generate_error_traits { ) -> Self where Self: Sized; // With an error code and no ledger info headers (special case) + #[allow(unused)] fn [<$trait_name:snake _with_code_no_info>]( err: Err, error_code: aptos_api_types::AptosErrorCode, ) -> Self where Self: Sized; + #[allow(unused)] fn [<$trait_name:snake _with_vm_status>]( err: Err, error_code: aptos_api_types::AptosErrorCode, @@ -96,6 +99,7 @@ macro_rules! generate_error_traits { ledger_info: &aptos_api_types::LedgerInfo ) -> Self where Self: Sized; + #[allow(unused)] fn [<$trait_name:snake _from_aptos_error>]( aptos_error: aptos_api_types::AptosError, ledger_info: &aptos_api_types::LedgerInfo diff --git a/aptos-move/aptos-e2e-comparison-testing/src/lib.rs b/aptos-move/aptos-e2e-comparison-testing/src/lib.rs index 9f672e517acc8..83dbfb1af3613 100644 --- a/aptos-move/aptos-e2e-comparison-testing/src/lib.rs +++ b/aptos-move/aptos-e2e-comparison-testing/src/lib.rs @@ -72,11 +72,7 @@ impl IndexWriter { let file = if !path.exists() { File::create(path).expect("Error encountered while creating file!") } else { - OpenOptions::new() - .write(true) - .append(true) - .open(path) - .unwrap() + OpenOptions::new().append(true).open(path).unwrap() }; file }; diff --git a/aptos-move/aptos-release-builder/src/components/mod.rs b/aptos-move/aptos-release-builder/src/components/mod.rs index f5169c3b89e36..f814f10bda8f9 100644 --- a/aptos-move/aptos-release-builder/src/components/mod.rs +++ b/aptos-move/aptos-release-builder/src/components/mod.rs @@ -757,7 +757,7 @@ impl Default for ReleaseConfig { } } -pub fn get_execution_hash(result: &Vec<(String, String)>) -> Vec { +pub fn get_execution_hash(result: &[(String, String)]) -> Vec { if result.is_empty() { "vector::empty()".to_owned().into_bytes() } else { @@ -821,7 +821,7 @@ impl Default for ProposalMetadata { } } -fn get_signer_arg(is_testnet: bool, next_execution_hash: &Vec) -> &str { +fn get_signer_arg(is_testnet: bool, next_execution_hash: &[u8]) -> &str { if is_testnet && next_execution_hash.is_empty() { "framework_signer" } else { diff --git a/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs b/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs index 4fd4e5d5fe035..2e701e1ed0f3b 100644 --- a/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs +++ b/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs @@ -805,10 +805,7 @@ impl<'a> MoveTestAdapter<'a> for AptosTestAdapter<'a> { // TODO: Do we still need this? let _private_key = match (extra_args.private_key, named_addr_opt) { (Some(private_key), _) => self.resolve_private_key(&private_key), - (None, Some(named_addr)) => match self - .private_key_mapping - .get(&named_addr.as_str().to_string()) - { + (None, Some(named_addr)) => match self.private_key_mapping.get(named_addr.as_str()) { Some(private_key) => private_key.clone(), None => panic_missing_private_key_named("publish", named_addr.as_str()), }, diff --git a/aptos-move/aptos-vm/src/aptos_vm.rs b/aptos-move/aptos-vm/src/aptos_vm.rs index e5f0206c23709..ee2b68e4286dd 100644 --- a/aptos-move/aptos-vm/src/aptos_vm.rs +++ b/aptos-move/aptos-vm/src/aptos_vm.rs @@ -1732,7 +1732,8 @@ impl AptosVM { // Revalidate the transaction. let mut prologue_session = unwrap_or_discard!(PrologueSession::new(self, &txn_data, resolver)); - unwrap_or_discard!(prologue_session.execute(|session| { + + let exec_result = prologue_session.execute(|session| { let required_deposit = self.get_required_deposit( session, resolver, @@ -1750,7 +1751,8 @@ impl AptosVM { is_approved_gov_script, &mut traversal_context, ) - })); + }); + unwrap_or_discard!(exec_result); let storage_gas_params = unwrap_or_discard!(get_or_vm_startup_failure( &self.storage_gas_params, log_context diff --git a/aptos-move/block-executor/src/captured_reads.rs b/aptos-move/block-executor/src/captured_reads.rs index 277422a4e642a..aaec491043d24 100644 --- a/aptos-move/block-executor/src/captured_reads.rs +++ b/aptos-move/block-executor/src/captured_reads.rs @@ -1204,7 +1204,7 @@ mod test { let with_metadata_reads = with_metadata_reads_by_kind(); let resolved = DataRead::Resolved::(200); - let mixed_reads = vec![ + let mixed_reads = [ deletion_reads[0].clone(), with_metadata_reads[1].clone(), resolved, diff --git a/aptos-move/e2e-tests/src/executor.rs b/aptos-move/e2e-tests/src/executor.rs index 6b97f2a576a5b..45294c4d442dd 100644 --- a/aptos-move/e2e-tests/src/executor.rs +++ b/aptos-move/e2e-tests/src/executor.rs @@ -1268,9 +1268,9 @@ impl FakeExecutor { } pub fn assert_outputs_equal( - txns_output_1: &Vec, + txns_output_1: &[TransactionOutput], name1: &str, - txns_output_2: &Vec, + txns_output_2: &[TransactionOutput], name2: &str, ) { assert_eq!( diff --git a/aptos-node/src/tests.rs b/aptos-node/src/tests.rs index 36ab33550d2a5..f41079d37c2f5 100644 --- a/aptos-node/src/tests.rs +++ b/aptos-node/src/tests.rs @@ -80,7 +80,7 @@ fn test_create_single_node_test_config() { .unwrap(); let f = std::fs::OpenOptions::new() .write(true) - .create(true) + .create_new(true) .open(&config_override_path) .expect("Couldn't open file"); serde_yaml::to_writer(f, &config_override).unwrap(); diff --git a/config/src/config/node_config.rs b/config/src/config/node_config.rs index de05334025249..ecdcfde372271 100644 --- a/config/src/config/node_config.rs +++ b/config/src/config/node_config.rs @@ -121,7 +121,7 @@ impl NodeConfig { /// Sets the data directory for this config pub fn set_data_dir(&mut self, data_dir: PathBuf) { // Set the base directory - self.base.data_dir = data_dir.clone(); + self.base.data_dir.clone_from(&data_dir); // Set the data directory for each sub-module self.consensus.set_data_dir(data_dir.clone()); diff --git a/config/src/config/storage_config.rs b/config/src/config/storage_config.rs index 54f8d4570bd5b..0234d868489c5 100644 --- a/config/src/config/storage_config.rs +++ b/config/src/config/storage_config.rs @@ -344,7 +344,9 @@ impl StorageConfig { let mut state_merkle_db_paths = ShardedDbPaths::default(); if let Some(db_path_overrides) = self.db_path_overrides.as_ref() { - ledger_db_path = db_path_overrides.ledger_db_path.clone(); + db_path_overrides + .ledger_db_path + .clone_into(&mut ledger_db_path); if let Some(state_kv_db_path) = db_path_overrides.state_kv_db_path.as_ref() { state_kv_db_paths = ShardedDbPaths::new(state_kv_db_path); diff --git a/config/src/generator.rs b/config/src/generator.rs index 7fc4aa9f53018..0035830f70bc4 100644 --- a/config/src/generator.rs +++ b/config/src/generator.rs @@ -47,7 +47,7 @@ pub fn validator_swarm( let seeds = build_seed_for_network(seed_config, PeerRole::Validator); for node in &mut nodes { let network = node.validator_network.as_mut().unwrap(); - network.seeds = seeds.clone(); + network.seeds.clone_from(&seeds); } nodes.sort_by(|a, b| { diff --git a/config/src/network_id.rs b/config/src/network_id.rs index bbdf8b0cd80fd..4fd7ea23a36d6 100644 --- a/config/src/network_id.rs +++ b/config/src/network_id.rs @@ -115,6 +115,7 @@ impl<'de> Deserialize<'de> for NetworkId { enum ConvertNetworkId { Validator, Public, + #[allow(dead_code)] Private(String), // These are here for migration, since both need to have their representation changed // in the 2nd step of migration, we can move to these identifiers diff --git a/config/src/utils.rs b/config/src/utils.rs index fe68ff87b6eef..65b5dd443a791 100644 --- a/config/src/utils.rs +++ b/config/src/utils.rs @@ -164,6 +164,7 @@ fn open_counter_file() -> PortCounterFiles { .read(true) .write(true) .create(true) + .truncate(false) .open(counter_path()) { Ok(counter_file) => return PortCounterFiles::new(counter_file, lock_file), diff --git a/consensus/src/block_storage/block_tree.rs b/consensus/src/block_storage/block_tree.rs index 8b8201c0f02cb..5a9fc47f14d0a 100644 --- a/consensus/src/block_storage/block_tree.rs +++ b/consensus/src/block_storage/block_tree.rs @@ -175,7 +175,7 @@ impl BlockTree { pub(super) fn get_block(&self, block_id: &HashValue) -> Option> { self.get_linkable_block(block_id) - .map(|lb| Arc::clone(lb.executed_block())) + .map(|lb| lb.executed_block().clone()) } pub(super) fn ordered_root(&self) -> Arc { diff --git a/consensus/src/dag/tests/helpers.rs b/consensus/src/dag/tests/helpers.rs index 5b9f2ff881b79..9cd77cc915274 100644 --- a/consensus/src/dag/tests/helpers.rs +++ b/consensus/src/dag/tests/helpers.rs @@ -97,7 +97,7 @@ pub(crate) fn generate_dag_nodes( nodes_at_round.push(None); } } - previous_round = nodes_at_round.clone(); + previous_round.clone_from(&nodes_at_round); nodes.push(nodes_at_round); } nodes diff --git a/consensus/src/dag/tests/order_rule_tests.rs b/consensus/src/dag/tests/order_rule_tests.rs index fcd6dac5d0c16..a46ed9b7f0499 100644 --- a/consensus/src/dag/tests/order_rule_tests.rs +++ b/consensus/src/dag/tests/order_rule_tests.rs @@ -238,7 +238,7 @@ fn test_order_rule_basic() { for node in nodes.iter().flatten().flatten() { order_rule.process_new_node(node.metadata()); } - let expected_order = vec![ + let expected_order = [ // anchor (1, 0) has 1 votes, anchor (3, 1) has 2 votes and a path to (1, 0) vec![(1, 0)], // anchor (2, 1) has 3 votes diff --git a/consensus/src/quorum_store/utils.rs b/consensus/src/quorum_store/utils.rs index 7ee291da35e6c..d199101d2425a 100644 --- a/consensus/src/quorum_store/utils.rs +++ b/consensus/src/quorum_store/utils.rs @@ -248,7 +248,7 @@ impl ProofQueue { return; } let batch_key = BatchKey::from_info(proof.info()); - if self.batch_to_proof.get(&batch_key).is_some() { + if self.batch_to_proof.contains_key(&batch_key) { counters::inc_rejected_pos_count(counters::POS_DUPLICATE_LABEL); return; } diff --git a/consensus/src/rand/rand_gen/rand_store.rs b/consensus/src/rand/rand_gen/rand_store.rs index e107245d744c2..99c63b7a40b0f 100644 --- a/consensus/src/rand/rand_gen/rand_store.rs +++ b/consensus/src/rand/rand_gen/rand_store.rs @@ -486,7 +486,7 @@ mod tests { async fn test_rand_item() { let ctxt = TestContext::new(vec![1, 2, 3], 1); let (tx, _rx) = unbounded(); - let shares = vec![ + let shares = [ create_share_for_round(ctxt.target_epoch, 2, ctxt.authors[0]), create_share_for_round(ctxt.target_epoch, 1, ctxt.authors[1]), create_share_for_round(ctxt.target_epoch, 1, ctxt.authors[2]), @@ -528,7 +528,7 @@ mod tests { decision_tx, ); - let rounds = vec![vec![1], vec![2, 3], vec![5, 8, 13]]; + let rounds = [vec![1], vec![2, 3], vec![5, 8, 13]]; let blocks_1 = QueueItem::new(create_ordered_blocks(rounds[0].clone()), None); let blocks_2 = QueueItem::new(create_ordered_blocks(rounds[1].clone()), None); let metadata_1 = blocks_1.all_rand_metadata(); diff --git a/consensus/src/transaction_shuffler/fairness/conflict_key/mod.rs b/consensus/src/transaction_shuffler/fairness/conflict_key/mod.rs index 63dc3d58cedee..caaa2d75a37ad 100644 --- a/consensus/src/transaction_shuffler/fairness/conflict_key/mod.rs +++ b/consensus/src/transaction_shuffler/fairness/conflict_key/mod.rs @@ -71,7 +71,7 @@ impl MapByKeyId { } impl ConflictKeyRegistry { - pub fn build, Txn>(txns: &[Txn]) -> Self + pub fn build(txns: &[Txn]) -> Self where K: ConflictKey, { diff --git a/crates/aptos-api-tester/src/utils.rs b/crates/aptos-api-tester/src/utils.rs index 3141583b6e632..fb1ad99d72ae0 100644 --- a/crates/aptos-api-tester/src/utils.rs +++ b/crates/aptos-api-tester/src/utils.rs @@ -16,7 +16,7 @@ use aptos_logger::{error, info}; use aptos_rest_client::{error::RestError, Client, FaucetClient}; use aptos_sdk::types::LocalAccount; use aptos_types::account_address::AccountAddress; -use std::{env, num::ParseIntError, str::FromStr}; +use std::{env, fmt::Display, num::ParseIntError, str::FromStr}; // Test failure @@ -71,14 +71,14 @@ impl TestName { } } -impl ToString for TestName { - fn to_string(&self) -> String { +impl Display for TestName { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self { - TestName::NewAccount => "new_account".to_string(), - TestName::CoinTransfer => "coin_transfer".to_string(), - TestName::TokenV1Transfer => "tokenv1_transfer".to_string(), - TestName::PublishModule => "publish_module".to_string(), - TestName::ViewFunction => "view_function".to_string(), + TestName::NewAccount => write!(f, "new_account"), + TestName::CoinTransfer => write!(f, "coin_transfer"), + TestName::TokenV1Transfer => write!(f, "tokenv1_transfer"), + TestName::PublishModule => write!(f, "publish_module"), + TestName::ViewFunction => write!(f, "view_function"), } } } @@ -91,11 +91,11 @@ pub enum NetworkName { Devnet, } -impl ToString for NetworkName { - fn to_string(&self) -> String { +impl Display for NetworkName { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self { - NetworkName::Testnet => "testnet".to_string(), - NetworkName::Devnet => "devnet".to_string(), + NetworkName::Testnet => write!(f, "testnet"), + NetworkName::Devnet => write!(f, "devnet"), } } } diff --git a/crates/aptos-compression/src/metrics.rs b/crates/aptos-compression/src/metrics.rs index 9dda43025df71..e6869c2018b4b 100644 --- a/crates/aptos-compression/src/metrics.rs +++ b/crates/aptos-compression/src/metrics.rs @@ -95,8 +95,8 @@ fn observe_operation_time(operation: &str, client: &CompressionClient, start_tim /// Updates the compression metrics for the given data sets pub fn update_compression_metrics( client: &CompressionClient, - raw_data: &Vec, - compressed_data: &Vec, + raw_data: &[u8], + compressed_data: &[u8], ) { update_operation_metrics(COMPRESS, client, raw_data, compressed_data); } @@ -104,8 +104,8 @@ pub fn update_compression_metrics( /// Updates the decompression metrics for the given data sets pub fn update_decompression_metrics( client: &CompressionClient, - compressed_data: &Vec, - raw_data: &Vec, + compressed_data: &[u8], + raw_data: &[u8], ) { update_operation_metrics(DECOMPRESS, client, raw_data, compressed_data); } @@ -115,8 +115,8 @@ pub fn update_decompression_metrics( fn update_operation_metrics( operation: &str, client: &CompressionClient, - raw_data: &Vec, - compressed_data: &Vec, + raw_data: &[u8], + compressed_data: &[u8], ) { increment_compression_byte_count(operation, RAW_BYTES, client, raw_data.len() as u64); increment_compression_byte_count( diff --git a/crates/aptos-crypto/src/unit_tests/bls12381_test.rs b/crates/aptos-crypto/src/unit_tests/bls12381_test.rs index 2977c398b1c72..40be873cfa414 100644 --- a/crates/aptos-crypto/src/unit_tests/bls12381_test.rs +++ b/crates/aptos-crypto/src/unit_tests/bls12381_test.rs @@ -204,19 +204,19 @@ fn bls12381_aggsig_zero_messages_or_pks_does_not_verify() { // aggsig should NOT verify on zero messages and zero PKs let pubkeys: Vec<&PublicKey> = vec![]; - let messages = vec![]; + let messages = []; let msgs_refs = messages.iter().collect::>(); assert!(aggsig.verify_aggregate(&msgs_refs, &pubkeys).is_err()); // aggsig should NOT verify on zero PKs let pubkeys: Vec<&PublicKey> = vec![]; - let messages = vec![message]; + let messages = [message]; let msgs_refs = messages.iter().collect::>(); assert!(aggsig.verify_aggregate(&msgs_refs, &pubkeys).is_err()); // aggsig should NOT verify on zero messages let pubkeys: Vec<&PublicKey> = vec![&key_pair.public_key]; - let messages = vec![]; + let messages = []; let msgs_refs = messages.iter().collect::>(); assert!(aggsig.verify_aggregate(&msgs_refs, &pubkeys).is_err()); } diff --git a/crates/aptos-dkg/src/pvss/fiat_shamir.rs b/crates/aptos-dkg/src/pvss/fiat_shamir.rs index 30fe2c06eb725..57929b397b414 100644 --- a/crates/aptos-dkg/src/pvss/fiat_shamir.rs +++ b/crates/aptos-dkg/src/pvss/fiat_shamir.rs @@ -46,6 +46,8 @@ pub trait FiatShamirProtocol { /// Returns one or more scalars `r` useful for doing linear combinations (e.g., combining /// pairings in the SCRAPE multipairing check using coefficients $1, r, r^2, r^3, \ldots$ fn challenge_linear_combination_scalars(&mut self, num_scalars: usize) -> Vec; + + #[allow(dead_code)] fn challenge_linear_combination_128bit(&mut self, num_scalars: usize) -> Vec; } diff --git a/crates/aptos-dkg/src/pvss/schnorr.rs b/crates/aptos-dkg/src/pvss/schnorr.rs index 03a26da2b62d2..d8d51783908ab 100644 --- a/crates/aptos-dkg/src/pvss/schnorr.rs +++ b/crates/aptos-dkg/src/pvss/schnorr.rs @@ -47,7 +47,7 @@ where /// Computes the Fiat-Shamir challenge in the Schnorr PoK protocol given an instance $(g, pk = g^a)$ /// and the commitment $R = g^r$. #[allow(non_snake_case)] -fn schnorr_hash(c: Challenge) -> Scalar +fn schnorr_hash(c: Challenge) -> Scalar where Gr: Serialize, { diff --git a/crates/aptos-id-generator/src/lib.rs b/crates/aptos-id-generator/src/lib.rs index 96f11c5804198..30778048c04b1 100644 --- a/crates/aptos-id-generator/src/lib.rs +++ b/crates/aptos-id-generator/src/lib.rs @@ -35,6 +35,7 @@ impl U32IdGenerator { } } } + impl IdGenerator for U32IdGenerator { /// Retrieves the next ID, wrapping on overflow #[inline] @@ -43,6 +44,12 @@ impl IdGenerator for U32IdGenerator { } } +impl Default for U32IdGenerator { + fn default() -> Self { + Self::new() + } +} + /// A generic in order [`IdGenerator`] using an [`AtomicU64`] to guarantee uniqueness #[derive(Debug, Default)] pub struct U64IdGenerator { diff --git a/crates/aptos-rest-client/src/lib.rs b/crates/aptos-rest-client/src/lib.rs index 153b745ff6a68..199023c8771e6 100644 --- a/crates/aptos-rest-client/src/lib.rs +++ b/crates/aptos-rest-client/src/lib.rs @@ -1605,7 +1605,7 @@ impl Client { base: &str, limit_per_request: u64, ledger_version: Option, - cursor: Option, + cursor: &Option, ) -> AptosResult { let mut path = format!("{}?limit={}", base, limit_per_request); if let Some(ledger_version) = ledger_version { @@ -1637,11 +1637,11 @@ impl Client { base_path, limit_per_request, ledger_version, - cursor, + &cursor, )?; let raw_response = self.inner.get(url).send().await?; let response: Response> = self.json(raw_response).await?; - cursor = response.state().cursor.clone(); + cursor.clone_from(&response.state().cursor); if cursor.is_none() { break Ok(response.map(|mut v| { result.append(&mut v); @@ -1670,13 +1670,13 @@ impl Client { base_path, limit_per_request, ledger_version, - cursor, + &cursor, )?; let response: Response>> = self .get_bcs(url) .await? .and_then(|inner| bcs::from_bytes(&inner))?; - cursor = response.state().cursor.clone(); + cursor.clone_from(&response.state().cursor); if cursor.is_none() { break Ok(response.map(|mut v| { result.append(&mut v); diff --git a/crates/aptos-speculative-state-helper/src/tests/proptests.rs b/crates/aptos-speculative-state-helper/src/tests/proptests.rs index a07c79dde1bc3..f0d7aa2b3f9f5 100644 --- a/crates/aptos-speculative-state-helper/src/tests/proptests.rs +++ b/crates/aptos-speculative-state-helper/src/tests/proptests.rs @@ -74,7 +74,7 @@ fn prepare_work( let mut counter_values = vec![0; num_counters]; // Pre-process indices and deltas and put them into worker tasks. - let mut worker_tasks: Vec> = vec![Vec::with_capacity(200); 4]; + let mut worker_tasks: Vec> = (0..4).map(|_| Vec::with_capacity(200)).collect(); for (i, (mut idx, mut delta)) in counter_ops.iter().enumerate() { // Group based on modulo (num_counters + 1). Last group is for testing out of bounds. diff --git a/crates/aptos/src/common/init.rs b/crates/aptos/src/common/init.rs index ae9c12a040489..904ca225706ec 100644 --- a/crates/aptos/src/common/init.rs +++ b/crates/aptos/src/common/init.rs @@ -183,7 +183,7 @@ impl CliCommand<()> for InitTool { }; // Set the derivation_path to the one user chose - profile_config.derivation_path = derivation_path.clone(); + profile_config.derivation_path.clone_from(&derivation_path); // Private key let private_key = if self.is_hardware_wallet() { diff --git a/crates/bounded-executor/src/concurrent_stream.rs b/crates/bounded-executor/src/concurrent_stream.rs index 30463cf46eca4..890da0e2b46fd 100644 --- a/crates/bounded-executor/src/concurrent_stream.rs +++ b/crates/bounded-executor/src/concurrent_stream.rs @@ -35,6 +35,7 @@ where } #[rustversion::since(1.75)] +#[allow(dead_code)] pub trait ConcurrentStream: Stream { fn concurrent_map( self, diff --git a/crates/channel/src/message_queues_test.rs b/crates/channel/src/message_queues_test.rs index 8e77906e021da..7a7e474c2f04f 100644 --- a/crates/channel/src/message_queues_test.rs +++ b/crates/channel/src/message_queues_test.rs @@ -12,12 +12,6 @@ struct ProposalMsg { msg: String, } -/// This represents a vote message from a validator -#[derive(Debug, PartialEq, Eq)] -struct VoteMsg { - msg: String, -} - #[test] fn test_fifo() { let mut q = PerKeyQueue::new(QueueStyle::FIFO, NonZeroUsize!(3), None); diff --git a/crates/indexer/src/indexer/fetcher.rs b/crates/indexer/src/indexer/fetcher.rs index 9e4d68e916851..ba3615d46e1cd 100644 --- a/crates/indexer/src/indexer/fetcher.rs +++ b/crates/indexer/src/indexer/fetcher.rs @@ -264,7 +264,7 @@ async fn fetch_nexts( block_height_bcs = aptos_api_types::U64::from(block_height); } } - match converter + let res = converter .try_into_onchain_transaction(timestamp, raw_txn) .map(|mut txn| { match txn { @@ -297,7 +297,8 @@ async fn fetch_nexts( }, }; txn - }) { + }); + match res { Ok(transaction) => transactions.push(transaction), Err(err) => { UNABLE_TO_FETCH_TRANSACTION.inc(); diff --git a/crates/reliable-broadcast/src/tests.rs b/crates/reliable-broadcast/src/tests.rs index a44868436a539..826f0607a4f1d 100644 --- a/crates/reliable-broadcast/src/tests.rs +++ b/crates/reliable-broadcast/src/tests.rs @@ -29,6 +29,7 @@ use tokio_retry::strategy::FixedInterval; #[derive(Clone)] struct TestMessage(Vec); +#[allow(unused)] #[derive(Clone)] struct TestAck(Vec); diff --git a/crates/transaction-generator-lib/src/call_custom_modules.rs b/crates/transaction-generator-lib/src/call_custom_modules.rs index 71537fee71213..b540478e28966 100644 --- a/crates/transaction-generator-lib/src/call_custom_modules.rs +++ b/crates/transaction-generator-lib/src/call_custom_modules.rs @@ -161,7 +161,7 @@ impl CustomModulesDelegationGeneratorCreator { init_txn_factory: TransactionFactory, root_account: &dyn RootAccountHandle, txn_executor: &dyn ReliableTransactionSubmitter, - packages: &mut Vec<(Package, LocalAccount)>, + packages: &mut [(Package, LocalAccount)], workload: &mut dyn UserModuleTransactionGenerator, ) -> Arc { let mut rng = StdRng::from_entropy(); diff --git a/devtools/aptos-cargo-cli/src/common.rs b/devtools/aptos-cargo-cli/src/common.rs index 7c881770a8623..f00598dab3efe 100644 --- a/devtools/aptos-cargo-cli/src/common.rs +++ b/devtools/aptos-cargo-cli/src/common.rs @@ -111,7 +111,7 @@ impl SelectedPackageArgs { ); let response = reqwest::blocking::get(url)?.error_for_status()?; let response = response.text()?; - contents = response.clone(); + contents.clone_from(&response); // Write the contents of the file to the local directory fs::create_dir_all("target/aptos-x-tool")?; diff --git a/docker/builder/docker-bake-rust-all.hcl b/docker/builder/docker-bake-rust-all.hcl index 0fb9d3f624a08..fed87ea776b87 100644 --- a/docker/builder/docker-bake-rust-all.hcl +++ b/docker/builder/docker-bake-rust-all.hcl @@ -69,7 +69,7 @@ target "debian-base" { dockerfile = "docker/builder/debian-base.Dockerfile" contexts = { # Run `docker buildx imagetools inspect debian:bullseye` to find the latest multi-platform hash - debian = "docker-image://debian:bullseye@sha256:2c7a92a41cb814c00e7d455b2bc0c90ccdb9a4ced2ffdc10e562c7a84a186032" + debian = "docker-image://debian:bullseye@sha256:d584e02c85bc9b3bd8df01662e4f605a66e1b9a04f9dea0e288f56da474269a0" } } @@ -78,8 +78,8 @@ target "builder-base" { target = "builder-base" context = "." contexts = { - # Run `docker buildx imagetools inspect rust:1.75.0-bullseye` to find the latest multi-platform hash - rust = "docker-image://rust:1.75.0-bullseye@sha256:2576095c947f6d9cfb5f19d51b822cdeb557c5a5b6de9460f2bf1f913d0434ca" + # Run `docker buildx imagetools inspect rust:1.78.0-bullseye` to find the latest multi-platform hash + rust = "docker-image://rust:1.78.0-bullseye@sha256:c8f85185bd2e482d88e1b8a90705435309ca9d54ccc3bcccf24a32378b8ff1a8" } args = { PROFILE = "${PROFILE}" diff --git a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/stream_coordinator.rs b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/stream_coordinator.rs index c6bce7af37f02..3964f65ac2401 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/stream_coordinator.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/stream_coordinator.rs @@ -379,7 +379,7 @@ impl IndexerStreamCoordinator { } } let size_info = Self::get_size_info(&raw_txn); - match converter + let res = converter .try_into_onchain_transaction(timestamp, raw_txn) .map(|mut txn| { match txn { @@ -414,7 +414,8 @@ impl IndexerStreamCoordinator { }, }; txn - }) { + }); + match res { Ok(transaction) => transactions.push((transaction, size_info)), Err(err) => { UNABLE_TO_FETCH_TRANSACTION.inc(); diff --git a/execution/block-partitioner/src/test_utils.rs b/execution/block-partitioner/src/test_utils.rs index 24f21261014bc..28738795ca6c9 100644 --- a/execution/block-partitioner/src/test_utils.rs +++ b/execution/block-partitioner/src/test_utils.rs @@ -150,10 +150,7 @@ impl P2PBlockGenerator { /// /// Also print a summary of the partitioning result. #[cfg(test)] -pub fn verify_partitioner_output( - input: &Vec, - output: &PartitionedTransactions, -) { +pub fn verify_partitioner_output(input: &[AnalyzedTransaction], output: &PartitionedTransactions) { let old_txn_id_by_txn_hash: HashMap = HashMap::from_iter( input .iter() @@ -309,7 +306,7 @@ pub fn verify_partitioner_output( } #[cfg(test)] -fn is_sorted(arr: &Vec) -> bool { +fn is_sorted(arr: &[usize]) -> bool { let num = arr.len(); for i in 1..num { if arr[i - 1] >= arr[i] { diff --git a/execution/block-partitioner/src/v2/load_balance.rs b/execution/block-partitioner/src/v2/load_balance.rs index 16d2898b892ba..b40af19ac48c1 100644 --- a/execution/block-partitioner/src/v2/load_balance.rs +++ b/execution/block-partitioner/src/v2/load_balance.rs @@ -8,10 +8,7 @@ use std::collections::BinaryHeap; /// Time complexity: O(num_tasks * log2(num_workers)) /// /// Read more at https://en.wikipedia.org/wiki/Longest-processing-time-first_scheduling. -pub fn longest_processing_time_first( - task_costs: &Vec, - num_workers: usize, -) -> (u64, Vec) { +pub fn longest_processing_time_first(task_costs: &[u64], num_workers: usize) -> (u64, Vec) { assert!(num_workers >= 1); let num_tasks = task_costs.len(); let mut cost_tid_pairs: Vec<(u64, usize)> = task_costs @@ -39,22 +36,22 @@ pub fn longest_processing_time_first( #[test] fn test_longest_processing_time_first() { - let (actual, assignment) = longest_processing_time_first(&vec![1, 2, 3, 4, 5], 1); + let (actual, assignment) = longest_processing_time_first(&[1, 2, 3, 4, 5], 1); assert_eq!(15, actual); println!("{:?}", assignment); - let (actual, assignment) = longest_processing_time_first(&vec![1, 2, 3, 4, 5], 2); + let (actual, assignment) = longest_processing_time_first(&[1, 2, 3, 4, 5], 2); assert_eq!(8, actual); println!("{:?}", assignment); - let (actual, assignment) = longest_processing_time_first(&vec![1, 2, 3, 4, 5], 3); + let (actual, assignment) = longest_processing_time_first(&[1, 2, 3, 4, 5], 3); assert_eq!(5, actual); println!("{:?}", assignment); - let (actual, assignment) = longest_processing_time_first(&vec![1, 2, 3, 4, 5], 4); + let (actual, assignment) = longest_processing_time_first(&[1, 2, 3, 4, 5], 4); assert_eq!(5, actual); println!("{:?}", assignment); - let (actual, assignment) = longest_processing_time_first(&vec![1, 2, 3, 4, 5], 5); + let (actual, assignment) = longest_processing_time_first(&[1, 2, 3, 4, 5], 5); assert_eq!(5, actual); println!("{:?}", assignment); - let (actual, assignment) = longest_processing_time_first(&vec![6, 7, 8, 4, 5], 2); + let (actual, assignment) = longest_processing_time_first(&[6, 7, 8, 4, 5], 2); assert_eq!(17, actual); println!("{:?}", assignment); } diff --git a/experimental/execution/ptx-executor/src/common.rs b/experimental/execution/ptx-executor/src/common.rs index fd742dd6fa231..b1c9b21adc2bb 100644 --- a/experimental/execution/ptx-executor/src/common.rs +++ b/experimental/execution/ptx-executor/src/common.rs @@ -21,6 +21,7 @@ pub(crate) use hashbrown::{hash_map::Entry, HashMap, HashSet}; pub(crate) trait VersionedKeyHelper { fn key(&self) -> &StateKey; + #[allow(unused)] fn txn_idx(&self) -> TxnIdx; fn txn_idx_shifted(&self) -> TxnIdx; diff --git a/keyless/circuit/src/rsa.rs b/keyless/circuit/src/rsa.rs index 1971f229bb5f6..33905f3832e4f 100644 --- a/keyless/circuit/src/rsa.rs +++ b/keyless/circuit/src/rsa.rs @@ -127,7 +127,7 @@ fn common, &mut Vec, &mut Vec)>( } } -fn flip_random_bit(limbs: &mut Vec) { +fn flip_random_bit(limbs: &mut [u64]) { let mut rng = thread_rng(); let limb_idx = rng.gen_range(0, limbs.len()); let bit_idx = rng.gen_range(0, 64); diff --git a/mempool/src/core_mempool/index.rs b/mempool/src/core_mempool/index.rs index e2d4edc2b303e..6f55a3c47962f 100644 --- a/mempool/src/core_mempool/index.rs +++ b/mempool/src/core_mempool/index.rs @@ -346,7 +346,7 @@ impl MultiBucketTimelineIndex { /// Read transactions from the timeline from `start_id` (exclusive) to `end_id` (inclusive). pub(crate) fn timeline_range( &self, - start_end_pairs: &Vec<(u64, u64)>, + start_end_pairs: &[(u64, u64)], ) -> Vec<(AccountAddress, u64)> { assert_eq!(start_end_pairs.len(), self.timelines.len()); diff --git a/mempool/src/core_mempool/mempool.rs b/mempool/src/core_mempool/mempool.rs index 5a7fc6d289f81..c39b6f1bebc06 100644 --- a/mempool/src/core_mempool/mempool.rs +++ b/mempool/src/core_mempool/mempool.rs @@ -465,10 +465,7 @@ impl Mempool { } /// Read transactions from timeline from `start_id` (exclusive) to `end_id` (inclusive). - pub(crate) fn timeline_range( - &self, - start_end_pairs: &Vec<(u64, u64)>, - ) -> Vec { + pub(crate) fn timeline_range(&self, start_end_pairs: &[(u64, u64)]) -> Vec { self.transactions.timeline_range(start_end_pairs) } diff --git a/mempool/src/core_mempool/transaction_store.rs b/mempool/src/core_mempool/transaction_store.rs index 57b36e097c59c..1c674538263dd 100644 --- a/mempool/src/core_mempool/transaction_store.rs +++ b/mempool/src/core_mempool/transaction_store.rs @@ -605,10 +605,7 @@ impl TransactionStore { (batch, last_timeline_id.into()) } - pub(crate) fn timeline_range( - &self, - start_end_pairs: &Vec<(u64, u64)>, - ) -> Vec { + pub(crate) fn timeline_range(&self, start_end_pairs: &[(u64, u64)]) -> Vec { self.timeline_index .timeline_range(start_end_pairs) .iter() diff --git a/mempool/src/shared_mempool/priority.rs b/mempool/src/shared_mempool/priority.rs index 0981e74ad14a6..33aec7e08f644 100644 --- a/mempool/src/shared_mempool/priority.rs +++ b/mempool/src/shared_mempool/priority.rs @@ -506,7 +506,10 @@ mod test { // Set the prioritized peers let prioritized_peers = vec![validator_peer, vfn_peer, public_peer]; - *prioritized_peers_state.prioritized_peers.write() = prioritized_peers.clone(); + prioritized_peers_state + .prioritized_peers + .write() + .clone_from(&prioritized_peers); // Verify that the peer priorities are correct for (index, peer) in prioritized_peers.iter().enumerate() { diff --git a/mempool/src/tests/core_mempool_test.rs b/mempool/src/tests/core_mempool_test.rs index 975acd02d38c5..d13628b969085 100644 --- a/mempool/src/tests/core_mempool_test.rs +++ b/mempool/src/tests/core_mempool_test.rs @@ -317,7 +317,7 @@ fn test_commit_callback() { #[test] fn test_reset_sequence_number_on_failure() { let mut pool = setup_mempool().0; - let txns = vec![TestTransaction::new(1, 0, 1), TestTransaction::new(1, 1, 1)]; + let txns = [TestTransaction::new(1, 0, 1), TestTransaction::new(1, 1, 1)]; let hashes: Vec<_> = txns .iter() .cloned() diff --git a/network/framework/src/protocols/rpc/mod.rs b/network/framework/src/protocols/rpc/mod.rs index 2769993da793c..ee1ac3eed98d0 100644 --- a/network/framework/src/protocols/rpc/mod.rs +++ b/network/framework/src/protocols/rpc/mod.rs @@ -70,7 +70,7 @@ use bytes::Bytes; use error::RpcError; use futures::{ channel::oneshot, - future::{BoxFuture, FusedFuture, Future, FutureExt}, + future::{BoxFuture, FusedFuture, FutureExt}, sink::SinkExt, stream::{FuturesUnordered, StreamExt}, }; @@ -317,7 +317,7 @@ impl InboundRpcs { /// `futures::select!`. pub fn next_completed_response( &mut self, - ) -> impl Future> + FusedFuture + '_ { + ) -> impl FusedFuture> + '_ { self.inbound_rpc_tasks.select_next_some() } @@ -598,7 +598,7 @@ impl OutboundRpcs { /// `futures::select!`. pub fn next_completed_request( &mut self, - ) -> impl Future)> + FusedFuture + '_ { + ) -> impl FusedFuture)> + '_ { self.outbound_rpc_tasks.select_next_some() } diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 0b13a21a22d01..30329882ad821 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,6 +1,6 @@ [toolchain] -channel = "1.75.0" +channel = "1.78.0" # Note: we don't specify cargofmt in our toolchain because we rely on # the nightly version of cargofmt and verify formatting in CI/CD. -components = [ "cargo", "clippy", "rustc", "rust-docs", "rust-std" ] +components = ["cargo", "clippy", "rustc", "rust-docs", "rust-std"] diff --git a/scripts/update_docker_images.py b/scripts/update_docker_images.py index f85afdc825817..035e9ae2cc869 100755 --- a/scripts/update_docker_images.py +++ b/scripts/update_docker_images.py @@ -10,7 +10,7 @@ IMAGES = { "debian": "debian:bullseye", - "rust": "rust:1.75.0-bullseye", + "rust": "rust:1.78.0-bullseye", } diff --git a/state-sync/data-streaming-service/src/data_stream.rs b/state-sync/data-streaming-service/src/data_stream.rs index 0a949a3cea490..cb912221eb91b 100644 --- a/state-sync/data-streaming-service/src/data_stream.rs +++ b/state-sync/data-streaming-service/src/data_stream.rs @@ -228,8 +228,7 @@ impl DataStream { } self.notifications_to_responses - .get(notification_id) - .is_some() + .contains_key(notification_id) } /// Notifies the Aptos data client of a bad client response diff --git a/storage/aptosdb/src/db/include/aptosdb_internal.rs b/storage/aptosdb/src/db/include/aptosdb_internal.rs index fbf2ff0dd721a..ee327fa3cec29 100644 --- a/storage/aptosdb/src/db/include/aptosdb_internal.rs +++ b/storage/aptosdb/src/db/include/aptosdb_internal.rs @@ -219,19 +219,31 @@ impl AptosDB { fn get_raw_block_info_by_height(&self, block_height: u64) -> Result { if !self.skip_index_and_usage { - let (first_version, new_block_event) = self.event_store.get_event_by_key(&new_block_event_key(), block_height, self.get_synced_version()?)?; + let (first_version, new_block_event) = self.event_store.get_event_by_key( + &new_block_event_key(), + block_height, + self.get_synced_version()?, + )?; let new_block_event = bcs::from_bytes(new_block_event.event_data())?; - Ok(BlockInfo::from_new_block_event(first_version, &new_block_event)) + Ok(BlockInfo::from_new_block_event( + first_version, + &new_block_event, + )) } else { Ok(self .ledger_db .metadata_db() .get_block_info(block_height)? - .ok_or(AptosDbError::NotFound(format!("BlockInfo not found at height {block_height}")))?) + .ok_or(AptosDbError::NotFound(format!( + "BlockInfo not found at height {block_height}" + )))?) } } - fn get_raw_block_info_by_version(&self, version: Version) -> Result<(u64 /* block_height */, BlockInfo)> { + fn get_raw_block_info_by_version( + &self, + version: Version, + ) -> Result<(u64 /* block_height */, BlockInfo)> { let synced_version = self.get_synced_version()?; ensure!( version <= synced_version, @@ -239,12 +251,18 @@ impl AptosDB { ); if !self.skip_index_and_usage { - let (first_version, event_index, block_height) = self.event_store + let (first_version, event_index, block_height) = self + .event_store .lookup_event_before_or_at_version(&new_block_event_key(), version)? .ok_or_else(|| AptosDbError::NotFound("NewBlockEvent".to_string()))?; - let new_block_event = self.event_store.get_event_by_version_and_index(first_version, event_index)?; + let new_block_event = self + .event_store + .get_event_by_version_and_index(first_version, event_index)?; let new_block_event = bcs::from_bytes(new_block_event.event_data())?; - Ok((block_height, BlockInfo::from_new_block_event(first_version, &new_block_event))) + Ok(( + block_height, + BlockInfo::from_new_block_event(first_version, &new_block_event), + )) } else { let block_height = self .ledger_db @@ -256,7 +274,11 @@ impl AptosDB { } } - fn to_api_block_info(&self, block_height: u64, block_info: BlockInfo) -> Result<(Version, Version, NewBlockEvent)> { + fn to_api_block_info( + &self, + block_height: u64, + block_info: BlockInfo, + ) -> Result<(Version, Version, NewBlockEvent)> { // N.b. Must use committed_version because if synced version is used, we won't be able // to tell the end of the latest block. let committed_version = self.get_latest_ledger_info_version()?; @@ -281,7 +303,7 @@ impl AptosDB { Ok(( block_info.first_version(), last_version, - bcs::from_bytes(new_block_event.event_data())? + bcs::from_bytes(new_block_event.event_data())?, )) } } @@ -301,7 +323,7 @@ fn error_if_too_many_requested(num_requested: u64, max_allowed: u64) -> Result<( } thread_local! { - static ENTERED_GAUGED_API: Cell = Cell::new(false); + static ENTERED_GAUGED_API: Cell = const { Cell::new(false) }; } fn gauged_api(api_name: &'static str, api_impl: F) -> Result diff --git a/storage/aptosdb/src/pruner/db_pruner.rs b/storage/aptosdb/src/pruner/db_pruner.rs index 2372c13aee5d0..17cb40778ef5c 100644 --- a/storage/aptosdb/src/pruner/db_pruner.rs +++ b/storage/aptosdb/src/pruner/db_pruner.rs @@ -24,6 +24,7 @@ pub trait DBPruner: Send + Sync { /// Returns the target version for the current pruning round - this might be different from the /// target_version() because we need to keep max_version in account. + #[allow(unused)] fn get_current_batch_target(&self, max_versions: Version) -> Version { // Current target version might be less than the target version to ensure we don't prune // more than max_version in one go. diff --git a/storage/aptosdb/src/pruner/pruner_manager.rs b/storage/aptosdb/src/pruner/pruner_manager.rs index 703b06c9a8ed0..41086d9258460 100644 --- a/storage/aptosdb/src/pruner/pruner_manager.rs +++ b/storage/aptosdb/src/pruner/pruner_manager.rs @@ -34,6 +34,7 @@ pub trait PrunerManager: Sync { // in memory progress. fn save_min_readable_version(&self, min_readable_version: Version) -> Result<()>; + #[allow(unused)] fn is_pruning_pending(&self) -> bool; /// (For tests only.) Notifies the worker thread and waits for it to finish its job by polling diff --git a/storage/backup/backup-cli/src/metadata/view.rs b/storage/backup/backup-cli/src/metadata/view.rs index fd9c5bd3be643..d42c3a39061a3 100644 --- a/storage/backup/backup-cli/src/metadata/view.rs +++ b/storage/backup/backup-cli/src/metadata/view.rs @@ -115,14 +115,14 @@ impl MetadataView { .sorted() .rev() .find(|m| m.version <= target_version) - .map(Clone::clone)) + .cloned()) } pub fn expect_state_snapshot(&self, version: Version) -> Result { self.state_snapshot_backups .iter() .find(|m| m.version == version) - .map(Clone::clone) + .cloned() .ok_or_else(|| anyhow!("State snapshot not found at version {}", version)) } diff --git a/testsuite/forge/src/backend/k8s/cluster_helper.rs b/testsuite/forge/src/backend/k8s/cluster_helper.rs index 28588cd2ba3fd..a75996c0e4ab3 100644 --- a/testsuite/forge/src/backend/k8s/cluster_helper.rs +++ b/testsuite/forge/src/backend/k8s/cluster_helper.rs @@ -161,13 +161,13 @@ async fn wait_nodes_stateful_set( } /// Deletes a collection of resources in k8s as part of aptos-node -async fn delete_k8s_collection( +async fn delete_k8s_collection( api: Api, name: &'static str, label_selector: &str, ) -> Result<()> where - T: Clone + DeserializeOwned + Debug, + T: kube::Resource + Clone + DeserializeOwned + Debug, ::DynamicType: Default, { match api diff --git a/testsuite/forge/src/backend/local/node.rs b/testsuite/forge/src/backend/local/node.rs index a2384d0febd2b..92816a780218c 100644 --- a/testsuite/forge/src/backend/local/node.rs +++ b/testsuite/forge/src/backend/local/node.rs @@ -121,7 +121,6 @@ impl LocalNode { // Ensure log file exists let log_file = OpenOptions::new() .create(true) - .write(true) .append(true) .open(self.log_path())?; diff --git a/testsuite/forge/src/success_criteria.rs b/testsuite/forge/src/success_criteria.rs index 6283fbf8a57dc..fa92684e407fd 100644 --- a/testsuite/forge/src/success_criteria.rs +++ b/testsuite/forge/src/success_criteria.rs @@ -66,7 +66,7 @@ impl MetricsThreshold { pub fn ensure_metrics_threshold( &self, metrics_name: &str, - metrics: &Vec, + metrics: &[Sample], ) -> anyhow::Result<()> { if self.expect_empty { if !metrics.is_empty() { diff --git a/testsuite/fuzzer/fuzz.sh b/testsuite/fuzzer/fuzz.sh index bc4181541a95b..90f14237b41dd 100755 --- a/testsuite/fuzzer/fuzz.sh +++ b/testsuite/fuzzer/fuzz.sh @@ -17,7 +17,7 @@ function error() { function cargo_fuzz() { # Nightly version control - NIGHTLY_VERSION="nightly-2024-01-01" + NIGHTLY_VERSION="nightly-2024-04-01" rustup install $NIGHTLY_VERSION if [ -z "$1" ]; then error "error using cargo()" diff --git a/third_party/move/evm/move-to-yul/src/dispatcher_generator.rs b/third_party/move/evm/move-to-yul/src/dispatcher_generator.rs index 1ca6dbf6725b1..e728bce8e0f2c 100644 --- a/third_party/move/evm/move-to-yul/src/dispatcher_generator.rs +++ b/third_party/move/evm/move-to-yul/src/dispatcher_generator.rs @@ -67,7 +67,7 @@ impl Generator { ) { emitln!(ctx.writer, "if iszero(lt(calldatasize(), 4))"); let mut selectors = BTreeMap::new(); - let para_vec = vec!["calldataload(0)".to_string(), "224".to_string()]; + let para_vec = ["calldataload(0)".to_string(), "224".to_string()]; let shr224 = self.call_builtin_str(ctx, YulFunction::Shr, para_vec.iter().cloned()); ctx.emit_block(|| { emitln!(ctx.writer, "let selector := {}", shr224); @@ -528,7 +528,7 @@ impl Generator { .enumerate() { let is_static = ty.is_static(); - let local_typ_var = vec![ret_var[stack_pos].clone()]; + let local_typ_var = [ret_var[stack_pos].clone()]; let abi_decode_type = gen.generate_abi_decoding_type(ctx, ty, move_ty, from_memory); ctx.emit_block(|| { @@ -631,7 +631,7 @@ impl Generator { { let is_static = ty.is_static(); // TODO: consider the case size_on_stack is not 1 - let local_typ_var = vec![ret_var[stack_pos].clone()]; + let local_typ_var = [ret_var[stack_pos].clone()]; let abi_decode_type = gen.generate_abi_decoding_type(ctx, ty, move_ty, from_memory); ctx.emit_block(|| { @@ -1453,7 +1453,7 @@ impl Generator { .enumerate() { let is_static = ty.is_static(); - let local_typ_var = vec![ret_var[stack_pos].clone()]; + let local_typ_var = [ret_var[stack_pos].clone()]; let memory_func = ctx.memory_load_builtin_fun(move_ty); if local_typ_var.len() == 1 { emitln!( diff --git a/third_party/move/evm/move-to-yul/src/external_functions.rs b/third_party/move/evm/move-to-yul/src/external_functions.rs index 13efc1c441aba..b95742686d66d 100644 --- a/third_party/move/evm/move-to-yul/src/external_functions.rs +++ b/third_party/move/evm/move-to-yul/src/external_functions.rs @@ -122,7 +122,7 @@ impl NativeFunctions { let fun_sig = format!("{}", sig); let function_selector = format!("0x{:x}", Keccak256::digest(fun_sig.as_bytes()))[..10].to_string(); - let para_vec = vec![function_selector, "224".to_string()]; + let para_vec = [function_selector, "224".to_string()]; let shl224 = gen.parent .call_builtin_str(ctx, YulFunction::Shl, para_vec.iter().cloned()); diff --git a/third_party/move/move-binary-format/src/proptest_types/types.rs b/third_party/move/move-binary-format/src/proptest_types/types.rs index a1877c703c52e..b90a97902d5e8 100644 --- a/third_party/move/move-binary-format/src/proptest_types/types.rs +++ b/third_party/move/move-binary-format/src/proptest_types/types.rs @@ -23,6 +23,7 @@ use proptest::{ }; use std::collections::BTreeSet; +#[allow(dead_code)] #[derive(Debug)] struct TypeSignatureIndex(u16); diff --git a/third_party/move/move-bytecode-verifier/src/control_flow_v5.rs b/third_party/move/move-bytecode-verifier/src/control_flow_v5.rs index 701721117ef2b..bd2d39c073214 100644 --- a/third_party/move/move-bytecode-verifier/src/control_flow_v5.rs +++ b/third_party/move/move-bytecode-verifier/src/control_flow_v5.rs @@ -37,7 +37,7 @@ pub fn verify( fn verify_fallthrough( current_function: FunctionDefinitionIndex, - code: &Vec, + code: &[Bytecode], ) -> PartialVMResult<()> { // Check to make sure that the bytecode vector ends with a branching instruction. match code.last() { diff --git a/third_party/move/move-compiler-v2/src/cyclic_instantiation_checker.rs b/third_party/move/move-compiler-v2/src/cyclic_instantiation_checker.rs index a143ce26602bb..c2651e2d1565e 100644 --- a/third_party/move/move-compiler-v2/src/cyclic_instantiation_checker.rs +++ b/third_party/move/move-compiler-v2/src/cyclic_instantiation_checker.rs @@ -175,7 +175,7 @@ impl<'a> CyclicInstantiationChecker<'a> { &self, nid: NodeId, callee: QualifiedInstId, - callers_chain: &mut Vec<(Loc, QualifiedInstId)>, + callers_chain: &mut [(Loc, QualifiedInstId)], ) { let root = callers_chain[0].1.id; let mut labels = (0..callers_chain.len() - 1) diff --git a/third_party/move/move-compiler-v2/src/env_pipeline/spec_rewriter.rs b/third_party/move/move-compiler-v2/src/env_pipeline/spec_rewriter.rs index baeb4125b9257..5fd1f908654b8 100644 --- a/third_party/move/move-compiler-v2/src/env_pipeline/spec_rewriter.rs +++ b/third_party/move/move-compiler-v2/src/env_pipeline/spec_rewriter.rs @@ -198,8 +198,8 @@ pub fn run_spec_rewriter(env: &mut GlobalEnv) { // Store result back for qid in scc { let decl_mut = env.get_spec_fun_mut(qid); - decl_mut.callees = transitive_callees.clone(); - decl_mut.used_memory = transitive_usage.clone(); + decl_mut.callees.clone_from(&transitive_callees); + decl_mut.used_memory.clone_from(&transitive_usage); } } diff --git a/third_party/move/move-compiler-v2/src/file_format_generator/function_generator.rs b/third_party/move/move-compiler-v2/src/file_format_generator/function_generator.rs index 2767f6e0befc0..75e96731b18f8 100644 --- a/third_party/move/move-compiler-v2/src/file_format_generator/function_generator.rs +++ b/third_party/move/move-compiler-v2/src/file_format_generator/function_generator.rs @@ -720,12 +720,7 @@ impl<'a> FunctionGenerator<'a> { } } - fn gen_vector_load_push( - &mut self, - ctx: &BytecodeContext, - vec: &Vec, - vec_type: &Type, - ) { + fn gen_vector_load_push(&mut self, ctx: &BytecodeContext, vec: &[Constant], vec_type: &Type) { let fun_ctx = ctx.fun_ctx; let elem_type = if let Type::Vector(el) = vec_type { el.as_ref().clone() diff --git a/third_party/move/move-compiler-v2/src/inliner.rs b/third_party/move/move-compiler-v2/src/inliner.rs index d989fd99be354..eebb256cae61e 100644 --- a/third_party/move/move-compiler-v2/src/inliner.rs +++ b/third_party/move/move-compiler-v2/src/inliner.rs @@ -978,7 +978,7 @@ impl<'env, 'rewriter> InlinedRewriter<'env, 'rewriter> { /// /// (Helper for check_pattern_args_types_need_freezeref) fn check_params_args_types_vectors_need_freezeref( - params_types: &Vec, + params_types: &[Type], args_types: &Vec, ) -> Option> { // element is Some(true) if a FreezeRef is needed, Some(false) if not, and None if types diff --git a/third_party/move/move-compiler/src/cfgir/cfg.rs b/third_party/move/move-compiler/src/cfgir/cfg.rs index 8fd6104b7d5e2..8691a73e11c6a 100644 --- a/third_party/move/move-compiler/src/cfgir/cfg.rs +++ b/third_party/move/move-compiler/src/cfgir/cfg.rs @@ -21,7 +21,7 @@ use std::{ //************************************************************************************************** // CFG //************************************************************************************************** - +#[allow(dead_code)] pub trait CFG { fn successors(&self, label: Label) -> &BTreeSet