diff --git a/accounts-db/src/account_storage.rs b/accounts-db/src/account_storage.rs index 2beebd380aa825..ba3542f0ff49e8 100644 --- a/accounts-db/src/account_storage.rs +++ b/accounts-db/src/account_storage.rs @@ -103,7 +103,7 @@ impl AccountStorage { pub fn initialize(&mut self, all_storages: AccountStorageMap) { assert!(self.map.is_empty()); assert!(self.no_shrink_in_progress()); - self.map.extend(all_storages.into_iter()) + self.map.extend(all_storages) } /// remove the append vec at 'slot' diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index a950dd2a705702..d9f9cffe0c7855 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -3487,8 +3487,7 @@ impl AccountsDb { let (reclaims, pubkeys_removed_from_accounts_index2) = self.purge_keys_exact(pubkey_to_slot_set.iter()); - pubkeys_removed_from_accounts_index - .extend(pubkeys_removed_from_accounts_index2.into_iter()); + pubkeys_removed_from_accounts_index.extend(pubkeys_removed_from_accounts_index2); // Don't reset from clean, since the pubkeys in those stores may need to be unref'ed // and those stores may be used for background hashing. @@ -7358,7 +7357,7 @@ impl AccountsDb { let mut sort_time = Measure::start("sort_storages"); let min_root = self.accounts_index.min_alive_root(); let storages = SortedStorages::new_with_slots( - combined_maps.iter().zip(slots.into_iter()), + combined_maps.iter().zip(slots), min_root, Some(slot), ); @@ -7824,7 +7823,7 @@ impl AccountsDb { let (storages, slots) = self.get_snapshot_storages(base_slot.checked_add(1).unwrap()..=slot); let sorted_storages = - SortedStorages::new_with_slots(storages.iter().zip(slots.into_iter()), None, None); + SortedStorages::new_with_slots(storages.iter().zip(slots), None, None); let calculated_incremental_accounts_hash = self.calculate_incremental_accounts_hash( &calc_config, &sorted_storages, diff --git a/accounts-db/src/sorted_storages.rs b/accounts-db/src/sorted_storages.rs index b1dfa8a025b721..26741b321f7a9e 100644 --- a/accounts-db/src/sorted_storages.rs +++ b/accounts-db/src/sorted_storages.rs @@ -67,7 +67,7 @@ impl<'a> SortedStorages<'a> { let slots = source.iter().map(|storage| { storage.slot() // this must be unique. Will be enforced in new_with_slots }); - Self::new_with_slots(source.iter().zip(slots.into_iter()), None, None) + Self::new_with_slots(source.iter().zip(slots), None, None) } /// create [`SortedStorages`] from `source` iterator. diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index c70f0b9d52f35e..68e12e9c803d55 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -2385,7 +2385,7 @@ pub fn return_signers_data(tx: &Transaction, config: &ReturnSignersConfig) -> Cl tx.signatures .iter() .zip(tx.message.account_keys.iter()) - .zip(verify_results.into_iter()) + .zip(verify_results) .for_each(|((sig, key), res)| { if res { signers.push(format!("{key}={sig}")) diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index 262a36781850bd..95e4377a5d2a6e 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -1980,7 +1980,7 @@ pub fn process_show_validators( let validators: Vec<_> = current_validators .into_iter() - .chain(delinquent_validators.into_iter()) + .chain(delinquent_validators) .collect(); let (average_skip_rate, average_stake_weighted_skip_rate) = { diff --git a/cli/src/vote.rs b/cli/src/vote.rs index 22cef36d7bd509..bc0a9d4072472e 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -1385,7 +1385,7 @@ pub fn process_close_vote_account( if let Some(vote_account) = vote_account_status .current .into_iter() - .chain(vote_account_status.delinquent.into_iter()) + .chain(vote_account_status.delinquent) .next() { if vote_account.activated_stake != 0 { diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index a65ae2ea470ad1..3a5e5ee5dcfd4b 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -208,7 +208,7 @@ impl ShredFetchStage { turbine_disabled.clone(), ); - tvu_threads.extend(repair_receiver.into_iter()); + tvu_threads.extend(repair_receiver); tvu_threads.push(tvu_filter); tvu_threads.push(repair_handler); diff --git a/gossip/src/cluster_info_metrics.rs b/gossip/src/cluster_info_metrics.rs index 7c6a61db158c43..095848fd2932ca 100644 --- a/gossip/src/cluster_info_metrics.rs +++ b/gossip/src/cluster_info_metrics.rs @@ -676,7 +676,7 @@ pub(crate) fn submit_gossip_stats( .pull .votes .into_iter() - .chain(crds_stats.push.votes.into_iter()) + .chain(crds_stats.push.votes) .into_grouping_map() .aggregate(|acc, _slot, num_votes| Some(acc.unwrap_or_default() + num_votes)); submit_vote_stats("cluster_info_crds_stats_votes", &votes); diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index 63e1b4be15c5f4..dc44a307c48f3c 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -516,7 +516,7 @@ pub fn start_gossip_voter( let (labels, votes) = cluster_info.get_votes_with_labels(&mut cursor); let mut parsed_vote_iter: Vec<_> = labels .into_iter() - .zip(votes.into_iter()) + .zip(votes) .filter_map(&vote_filter) .collect(); diff --git a/metrics/src/metrics.rs b/metrics/src/metrics.rs index c4cde872398392..df761a6ac2b9b0 100644 --- a/metrics/src/metrics.rs +++ b/metrics/src/metrics.rs @@ -527,10 +527,7 @@ pub mod test_mocks { assert!(!points.is_empty()); let new_points = points.len(); - self.points_written - .lock() - .unwrap() - .extend(points.into_iter()); + self.points_written.lock().unwrap().extend(points); info!( "Writing {} points ({} total)", diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index ed45290dd84df5..94990c216488ef 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -332,7 +332,7 @@ fn create_memory_mapping<'a, 'b, C: ContextObject>( MemoryRegion::new_writable(heap.as_slice_mut(), MM_HEAP_START), ] .into_iter() - .chain(additional_regions.into_iter()) + .chain(additional_regions) .collect(); Ok(if let Some(cow_cb) = cow_cb { diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index c929ae2f14bd25..e34e3ab8fbe704 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -176,7 +176,7 @@ impl SnapshotAccountsDbFields { })?; let mut combined_storages = full_snapshot_storages; - combined_storages.extend(incremental_snapshot_storages.into_iter()); + combined_storages.extend(incremental_snapshot_storages); Ok(AccountsDbFields( combined_storages, diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 83b65a962188d6..82d6f354a4e998 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -300,7 +300,7 @@ pub fn bank_from_snapshot_archives( if let Some(ref mut unarchive_preparation_result) = unarchived_incremental_snapshot { let incremental_snapshot_storages = std::mem::take(&mut unarchive_preparation_result.storage); - storage.extend(incremental_snapshot_storages.into_iter()); + storage.extend(incremental_snapshot_storages); } let storage_and_next_append_vec_id = StorageAndNextAppendVecId { diff --git a/stake-accounts/src/stake_accounts.rs b/stake-accounts/src/stake_accounts.rs index 0f8a913e400d15..b40cb1dcec5b30 100644 --- a/stake-accounts/src/stake_accounts.rs +++ b/stake-accounts/src/stake_accounts.rs @@ -138,7 +138,7 @@ fn move_stake_account( new_withdraw_authority_pubkey, ); - instructions.extend(authorize_instructions.into_iter()); + instructions.extend(authorize_instructions); let message = Message::new(&instructions, Some(fee_payer_pubkey)); Some(message) } diff --git a/storage-bigtable/src/compression.rs b/storage-bigtable/src/compression.rs index e14a3b48462d8d..1609cc2c534d4a 100644 --- a/storage-bigtable/src/compression.rs +++ b/storage-bigtable/src/compression.rs @@ -48,28 +48,24 @@ pub fn decompress(data: &[u8]) -> Result, io::Error> { pub fn compress(method: CompressionMethod, data: &[u8]) -> Result, io::Error> { let mut compressed_data = bincode::serialize(&method).unwrap(); - compressed_data.extend( - match method { - CompressionMethod::Bzip2 => { - let mut e = bzip2::write::BzEncoder::new(Vec::new(), bzip2::Compression::best()); - e.write_all(data)?; - e.finish()? - } - CompressionMethod::Gzip => { - let mut e = - flate2::write::GzEncoder::new(Vec::new(), flate2::Compression::default()); - e.write_all(data)?; - e.finish()? - } - CompressionMethod::Zstd => { - let mut e = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap(); - e.write_all(data)?; - e.finish()? - } - CompressionMethod::NoCompression => data.to_vec(), + compressed_data.extend(match method { + CompressionMethod::Bzip2 => { + let mut e = bzip2::write::BzEncoder::new(Vec::new(), bzip2::Compression::best()); + e.write_all(data)?; + e.finish()? } - .into_iter(), - ); + CompressionMethod::Gzip => { + let mut e = flate2::write::GzEncoder::new(Vec::new(), flate2::Compression::default()); + e.write_all(data)?; + e.finish()? + } + CompressionMethod::Zstd => { + let mut e = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap(); + e.write_all(data)?; + e.finish()? + } + CompressionMethod::NoCompression => data.to_vec(), + }); Ok(compressed_data) } diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index 106af671502c91..125137c863f11f 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -485,7 +485,7 @@ impl AdminRpc for AdminRpcImpl { .staked_map_id; let mut write_staked_nodes = meta.staked_nodes_overrides.write().unwrap(); write_staked_nodes.clear(); - write_staked_nodes.extend(loaded_config.into_iter()); + write_staked_nodes.extend(loaded_config); info!("Staked nodes overrides loaded from {}", path); debug!("overrides map: {:?}", write_staked_nodes); Ok(()) diff --git a/validator/src/main.rs b/validator/src/main.rs index 1af9e2eafab9e9..f27d4fcc6537cb 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1450,7 +1450,7 @@ pub fn main() { if let Some(account_shrink_snapshot_paths) = account_shrink_snapshot_paths { account_snapshot_paths .into_iter() - .chain(account_shrink_snapshot_paths.into_iter()) + .chain(account_shrink_snapshot_paths) .collect() } else { account_snapshot_paths