Skip to content

Commit

Permalink
fixes errors from clippy::useless_conversion (solana-labs#29534)
Browse files Browse the repository at this point in the history
  • Loading branch information
behzadnouri authored and gnapoli23 committed Jan 10, 2023
1 parent 8740a3c commit 5910cde
Show file tree
Hide file tree
Showing 33 changed files with 46 additions and 135 deletions.
2 changes: 0 additions & 2 deletions accounts-cluster-bench/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,6 @@ fn make_create_message(
let space = maybe_space.unwrap_or_else(|| thread_rng().gen_range(0, 1000));

let instructions: Vec<_> = (0..num_instructions)
.into_iter()
.flat_map(|_| {
let program_id = if mint.is_some() {
inline_spl_token::id()
Expand Down Expand Up @@ -203,7 +202,6 @@ fn make_close_message(
spl_token: bool,
) -> Message {
let instructions: Vec<_> = (0..num_instructions)
.into_iter()
.filter_map(|_| {
let program_id = if spl_token {
inline_spl_token::id()
Expand Down
1 change: 0 additions & 1 deletion bench-streamer/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,6 @@ fn main() -> Result<()> {
}

let producer_threads: Vec<_> = (0..num_producers)
.into_iter()
.map(|_| producer(&addr, exit.clone()))
.collect();

Expand Down
8 changes: 4 additions & 4 deletions bucket_map/benches/bucket_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,15 +39,15 @@ DEFINE_NxM_BENCH!(dim_32x64, 32, 64);
/// Benchmark insert with Hashmap as baseline for N threads inserting M keys each
fn do_bench_insert_baseline_hashmap(bencher: &mut Bencher, n: usize, m: usize) {
let index = RwLock::new(HashMap::new());
(0..n).into_iter().into_par_iter().for_each(|i| {
(0..n).into_par_iter().for_each(|i| {
let key = Pubkey::new_unique();
index
.write()
.unwrap()
.insert(key, vec![(i, IndexValue::default())]);
});
bencher.iter(|| {
(0..n).into_iter().into_par_iter().for_each(|_| {
(0..n).into_par_iter().for_each(|_| {
for j in 0..m {
let key = Pubkey::new_unique();
index
Expand All @@ -62,12 +62,12 @@ fn do_bench_insert_baseline_hashmap(bencher: &mut Bencher, n: usize, m: usize) {
/// Benchmark insert with BucketMap with N buckets for N threads inserting M keys each
fn do_bench_insert_bucket_map(bencher: &mut Bencher, n: usize, m: usize) {
let index = BucketMap::new(BucketMapConfig::new(n));
(0..n).into_iter().into_par_iter().for_each(|i| {
(0..n).into_par_iter().for_each(|i| {
let key = Pubkey::new_unique();
index.update(&key, |_| Some((vec![(i, IndexValue::default())], 0)));
});
bencher.iter(|| {
(0..n).into_iter().into_par_iter().for_each(|_| {
(0..n).into_par_iter().for_each(|_| {
for j in 0..m {
let key = Pubkey::new_unique();
index.update(&key, |_| Some((vec![(j, IndexValue::default())], 0)));
Expand Down
7 changes: 2 additions & 5 deletions bucket_map/src/bucket_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@ impl<T: Clone + Copy + Debug> BucketMap<T> {

let stats = Arc::default();
let buckets = (0..config.max_buckets)
.into_iter()
.map(|_| {
Arc::new(BucketApi::new(
Arc::clone(&drives),
Expand Down Expand Up @@ -320,7 +319,7 @@ mod tests {
fn bucket_map_test_grow_read() {
let config = BucketMapConfig::new(1 << 2);
let index = BucketMap::new(config);
let keys: Vec<Pubkey> = (0..100).into_iter().map(|_| Pubkey::new_unique()).collect();
let keys: Vec<Pubkey> = (0..100).map(|_| Pubkey::new_unique()).collect();
for k in 0..keys.len() {
let key = &keys[k];
let i = read_be_u64(key.as_ref());
Expand All @@ -339,7 +338,7 @@ mod tests {
fn bucket_map_test_n_delete() {
let config = BucketMapConfig::new(1 << 2);
let index = BucketMap::new(config);
let keys: Vec<Pubkey> = (0..20).into_iter().map(|_| Pubkey::new_unique()).collect();
let keys: Vec<Pubkey> = (0..20).map(|_| Pubkey::new_unique()).collect();
for key in keys.iter() {
let i = read_be_u64(key.as_ref());
index.update(key, |_| Some((vec![i], 0)));
Expand All @@ -366,7 +365,6 @@ mod tests {
use std::sync::Mutex;
solana_logger::setup();
let maps = (0..2)
.into_iter()
.map(|max_buckets_pow2| {
let config = BucketMapConfig::new(1 << max_buckets_pow2);
BucketMap::new(config)
Expand All @@ -379,7 +377,6 @@ mod tests {
let gen_rand_value = || {
let count = thread_rng().gen_range(0, max_slot_list_len);
let v = (0..count)
.into_iter()
.map(|x| (x as usize, x as usize /*thread_rng().gen::<usize>()*/))
.collect::<Vec<_>>();
let rc = thread_rng().gen::<RefCount>();
Expand Down
2 changes: 1 addition & 1 deletion bucket_map/src/bucket_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ impl BucketStorage {

let increment = self.capacity_pow2 - old_bucket.capacity_pow2;
let index_grow = 1 << increment;
(0..old_cap as usize).into_iter().for_each(|i| {
(0..old_cap as usize).for_each(|i| {
let old_ix = i * old_bucket.cell_size as usize;
let new_ix = old_ix * index_grow;
let dst_slice: &[u8] = &self.mmap[new_ix..new_ix + old_bucket.cell_size as usize];
Expand Down
4 changes: 2 additions & 2 deletions bucket_map/tests/bucket_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,12 @@ fn bucket_map_test_mt() {
drives: Some(paths.clone()),
..BucketMapConfig::default()
});
(0..threads).into_iter().into_par_iter().for_each(|_| {
(0..threads).into_par_iter().for_each(|_| {
let key = Pubkey::new_unique();
index.update(&key, |_| Some((vec![0u64], 0)));
});
let mut timer = Measure::start("bucket_map_test_mt");
(0..threads).into_iter().into_par_iter().for_each(|_| {
(0..threads).into_par_iter().for_each(|_| {
for _ in 0..items {
let key = Pubkey::new_unique();
let ix: u64 = index.bucket_ix(&key) as u64;
Expand Down
1 change: 0 additions & 1 deletion client/src/connection_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -531,7 +531,6 @@ mod tests {
0
};
let addrs = (0..MAX_CONNECTIONS)
.into_iter()
.map(|_| {
let addr = get_addr(&mut rng);
connection_cache.get_connection(&addr);
Expand Down
1 change: 0 additions & 1 deletion core/benches/sigverify_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ fn run_bench_packet_discard(num_ips: usize, bencher: &mut Bencher) {
let mut total = 0;

let ips: Vec<_> = (0..num_ips)
.into_iter()
.map(|_| {
let mut addr = [0u16; 8];
thread_rng().fill(&mut addr);
Expand Down
1 change: 0 additions & 1 deletion core/src/serve_repair.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2185,7 +2185,6 @@ mod tests {
let request_slot = MAX_ANCESTOR_RESPONSES as Slot;
let repair = AncestorHashesRepairType(request_slot);
let mut response: Vec<SlotHash> = (0..request_slot)
.into_iter()
.map(|slot| (slot, Hash::new_unique()))
.collect();
assert!(repair.verify_response(&AncestorHashesResponse::Hashes(response.clone())));
Expand Down
7 changes: 1 addition & 6 deletions core/src/unprocessed_transaction_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1082,12 +1082,7 @@ mod tests {
let expected_ports: Vec<_> = (0..256).collect();
let mut forwarded_ports: Vec<_> = forward_packet_batches_by_accounts
.iter_batches()
.flat_map(|batch| {
batch
.get_forwardable_packets()
.into_iter()
.map(|p| p.meta().port)
})
.flat_map(|batch| batch.get_forwardable_packets().map(|p| p.meta().port))
.collect();
forwarded_ports.sort_unstable();
assert_eq!(expected_ports, forwarded_ports);
Expand Down
1 change: 0 additions & 1 deletion core/src/verified_vote_packets.rs
Original file line number Diff line number Diff line change
Expand Up @@ -717,7 +717,6 @@ mod tests {
slot,
confirmation_count,
})
.into_iter()
.collect::<VecDeque<Lockout>>();
let vote = VoteTransaction::from(VoteStateUpdate::new(
slots,
Expand Down
1 change: 0 additions & 1 deletion ledger/src/blockstore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9141,7 +9141,6 @@ pub mod tests {

fn make_large_tx_entry(num_txs: usize) -> Entry {
let txs: Vec<_> = (0..num_txs)
.into_iter()
.map(|_| {
let keypair0 = Keypair::new();
let to = solana_sdk::pubkey::new_rand();
Expand Down
1 change: 0 additions & 1 deletion metrics/benches/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ use {
#[bench]
fn bench_write_points(bencher: &mut Bencher) {
let points = (0..10)
.into_iter()
.map(|_| {
DataPoint::new("measurement")
.add_field_i64("i", 0)
Expand Down
21 changes: 6 additions & 15 deletions runtime/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3704,11 +3704,7 @@ impl AccountsDb {
});

// sort by pubkey to keep account index lookups close
let mut stored_accounts = stored_accounts
.drain()
.into_iter()
.map(|(_k, v)| v)
.collect::<Vec<_>>();
let mut stored_accounts = stored_accounts.drain().map(|(_k, v)| v).collect::<Vec<_>>();
stored_accounts.sort_unstable_by(|a, b| a.pubkey().cmp(b.pubkey()));

GetUniqueAccountsResult {
Expand Down Expand Up @@ -6594,7 +6590,7 @@ impl AccountsDb {
) -> Vec<AccountInfo> {
let mut calc_stored_meta_time = Measure::start("calc_stored_meta");
let slot = accounts.target_slot();
(0..accounts.len()).into_iter().for_each(|index| {
(0..accounts.len()).for_each(|index| {
let pubkey = accounts.pubkey(index);
self.read_only_accounts_cache.remove(*pubkey, slot);
});
Expand Down Expand Up @@ -7731,7 +7727,7 @@ impl AccountsDb {
let update = |start, end| {
let mut reclaims = Vec::with_capacity((end - start) / 2);

(start..end).into_iter().for_each(|i| {
(start..end).for_each(|i| {
let info = infos[i];
let pubkey_account = (accounts.pubkey(i), accounts.account(i));
let pubkey = pubkey_account.0;
Expand Down Expand Up @@ -14627,7 +14623,7 @@ pub mod tests {
accounts_db.write_cache_limit_bytes = write_cache_limit_bytes;
let accounts_db = Arc::new(accounts_db);

let slots: Vec<_> = (0..num_slots as Slot).into_iter().collect();
let slots: Vec<_> = (0..num_slots as Slot).collect();
let stall_slot = num_slots as Slot;
let scan_stall_key = Pubkey::new_unique();
let keys: Vec<Pubkey> = std::iter::repeat_with(Pubkey::new_unique)
Expand Down Expand Up @@ -14911,9 +14907,7 @@ pub mod tests {
} else {
// Slots less than `requested_flush_root` and `scan_root` were cleaned in the cache before being flushed
// to storage, should only contain one account
std::iter::once(keys[*slot as usize])
.into_iter()
.collect::<HashSet<Pubkey>>()
std::iter::once(keys[*slot as usize]).collect::<HashSet<Pubkey>>()
};

assert_eq!(slot_accounts, expected_accounts);
Expand Down Expand Up @@ -15010,9 +15004,7 @@ pub mod tests {
} else {
// If clean was specified, only the latest slot should have all the updates.
// All these other slots have been cleaned before flush
std::iter::once(keys[*slot as usize])
.into_iter()
.collect::<HashSet<Pubkey>>()
std::iter::once(keys[*slot as usize]).collect::<HashSet<Pubkey>>()
};
assert_eq!(slot_accounts, expected_accounts);
}
Expand Down Expand Up @@ -17333,7 +17325,6 @@ pub mod tests {
fn get_all_accounts(db: &AccountsDb, slots: Range<Slot>) -> Vec<(Pubkey, AccountSharedData)> {
slots
.clone()
.into_iter()
.filter_map(|slot| {
let storages = db.get_storages_for_slot(slot);
storages.map(|storages| {
Expand Down
31 changes: 5 additions & 26 deletions runtime/src/accounts_hash.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1341,7 +1341,7 @@ pub mod tests {
let key_b = Pubkey::new(&[2u8; 32]);
let key_c = Pubkey::new(&[3u8; 32]);
const COUNT: usize = 6;
let hashes = (0..COUNT).into_iter().map(|i| Hash::new(&[i as u8; 32]));
let hashes = (0..COUNT).map(|i| Hash::new(&[i as u8; 32]));
// create this vector
// abbbcc
let keys = [key_a, key_b, key_b, key_b, key_c, key_c];
Expand Down Expand Up @@ -1670,13 +1670,7 @@ pub mod tests {
let input: Vec<Vec<Vec<u64>>> = vec![vec![vec![0, 1], vec![], vec![2, 3, 4], vec![]]];
let cumulative = CumulativeOffsets::from_raw_2d(&input);

let src: Vec<_> = input
.clone()
.into_iter()
.flatten()
.into_iter()
.flatten()
.collect();
let src: Vec<_> = input.clone().into_iter().flatten().flatten().collect();
let len = src.len();
assert_eq!(cumulative.total_count, len);
assert_eq!(cumulative.cumulative_offsets.len(), 2); // 2 non-empty vectors
Expand All @@ -1701,13 +1695,7 @@ pub mod tests {
let input = vec![vec![vec![], vec![0, 1], vec![], vec![2, 3, 4], vec![]]];
let cumulative = CumulativeOffsets::from_raw_2d(&input);

let src: Vec<_> = input
.clone()
.into_iter()
.flatten()
.into_iter()
.flatten()
.collect();
let src: Vec<_> = input.clone().into_iter().flatten().flatten().collect();
let len = src.len();
assert_eq!(cumulative.total_count, len);
assert_eq!(cumulative.cumulative_offsets.len(), 2); // 2 non-empty vectors
Expand Down Expand Up @@ -1741,13 +1729,7 @@ pub mod tests {
];
let cumulative = CumulativeOffsets::from_raw_2d(&input);

let src: Vec<_> = input
.clone()
.into_iter()
.flatten()
.into_iter()
.flatten()
.collect();
let src: Vec<_> = input.clone().into_iter().flatten().flatten().collect();
let len = src.len();
assert_eq!(cumulative.total_count, len);
assert_eq!(cumulative.cumulative_offsets.len(), 2); // 2 non-empty vectors
Expand Down Expand Up @@ -1841,10 +1823,7 @@ pub mod tests {
hash_counts.extend(threshold - 1..=threshold + target);

for hash_count in hash_counts {
let hashes: Vec<_> = (0..hash_count)
.into_iter()
.map(|_| Hash::new_unique())
.collect();
let hashes: Vec<_> = (0..hash_count).map(|_| Hash::new_unique()).collect();

test_hashing(hashes, FANOUT);
}
Expand Down
12 changes: 1 addition & 11 deletions runtime/src/accounts_index.rs
Original file line number Diff line number Diff line change
Expand Up @@ -754,7 +754,6 @@ impl<T: IndexValue> AccountsIndex<T> {
let bin_calculator = PubkeyBinCalculator24::new(bins);
let storage = AccountsIndexStorage::new(bins, &config, exit);
let account_maps = (0..bins)
.into_iter()
.map(|bin| Arc::clone(&storage.in_mem[bin]))
.collect::<Vec<_>>();
(account_maps, bin_calculator, storage)
Expand Down Expand Up @@ -1582,7 +1581,6 @@ impl<T: IndexValue> AccountsIndex<T> {
let random_offset = thread_rng().gen_range(0, bins);
let use_disk = self.storage.storage.disk.is_some();
let mut binned = (0..bins)
.into_iter()
.map(|mut pubkey_bin| {
// opposite of (pubkey_bin + random_offset) % bins
pubkey_bin = if pubkey_bin < random_offset {
Expand Down Expand Up @@ -1637,7 +1635,6 @@ impl<T: IndexValue> AccountsIndex<T> {
/// return Vec<Vec<>> because the internal vecs are already allocated per bin
pub fn retrieve_duplicate_keys_from_startup(&self) -> Vec<Vec<(Slot, Pubkey)>> {
(0..self.bins())
.into_iter()
.map(|pubkey_bin| {
let r_account_maps = &self.account_maps[pubkey_bin];
r_account_maps.retrieve_duplicate_keys_from_startup()
Expand Down Expand Up @@ -3961,14 +3958,7 @@ pub mod tests {
);
assert_eq!((bins - 1, usize::MAX), iter.bin_start_and_range());

assert_eq!(
(0..2)
.into_iter()
.skip(1)
.take(usize::MAX)
.collect::<Vec<_>>(),
vec![1]
);
assert_eq!((0..2).skip(1).take(usize::MAX).collect::<Vec<_>>(), vec![1]);
}

#[test]
Expand Down
2 changes: 0 additions & 2 deletions runtime/src/accounts_index_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ impl BgThreads {
let local_exit = Arc::new(AtomicBool::default());
let handles = Some(
(0..threads)
.into_iter()
.map(|idx| {
// the first thread we start is special
let can_advance_age = can_advance_age && idx == 0;
Expand Down Expand Up @@ -164,7 +163,6 @@ impl<T: IndexValue> AccountsIndexStorage<T> {
let storage = Arc::new(BucketMapHolder::new(bins, config, threads));

let in_mem = (0..bins)
.into_iter()
.map(|bin| Arc::new(InMemAccountsIndex::new(&storage, bin)))
.collect::<Vec<_>>();

Expand Down
2 changes: 1 addition & 1 deletion runtime/src/bank.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6272,7 +6272,7 @@ impl Bank {
) {
assert!(!self.freeze_started());
let mut m = Measure::start("stakes_cache.check_and_store");
(0..accounts.len()).into_iter().for_each(|i| {
(0..accounts.len()).for_each(|i| {
self.stakes_cache
.check_and_store(accounts.pubkey(i), accounts.account(i))
});
Expand Down
Loading

0 comments on commit 5910cde

Please sign in to comment.