Skip to content

Commit

Permalink
ok_or and map_or audit
Browse files Browse the repository at this point in the history
  • Loading branch information
gelash committed Oct 6, 2024
1 parent cc50aaa commit 54c63b7
Show file tree
Hide file tree
Showing 35 changed files with 138 additions and 126 deletions.
4 changes: 2 additions & 2 deletions api/src/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -858,7 +858,7 @@ impl Context {
} else {
self.indexer_reader
.as_ref()
.ok_or(anyhow!("Indexer reader is None"))
.ok_or_else(|| anyhow!("Indexer reader is None"))
.map_err(|err| {
E::internal_with_code(err, AptosErrorCode::InternalError, ledger_info)
})?
Expand Down Expand Up @@ -957,7 +957,7 @@ impl Context {
} else {
self.indexer_reader
.as_ref()
.ok_or(anyhow!("Internal indexer reader doesn't exist"))?
.ok_or_else(|| anyhow!("Internal indexer reader doesn't exist"))?
.get_events(event_key, start, order, limit as u64, ledger_version)?
};
if order == Order::Descending {
Expand Down
14 changes: 9 additions & 5 deletions aptos-move/aptos-vm/src/verifier/transaction_arg_validation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -455,11 +455,15 @@ fn validate_and_construct(
)?;
let mut ret_vals = serialized_result.return_values;
// We know ret_vals.len() == 1
let deserialize_error = VMStatus::error(
StatusCode::INTERNAL_TYPE_ERROR,
Some(String::from("Constructor did not return value")),
);
Ok(ret_vals.pop().ok_or(deserialize_error)?.0)
Ok(ret_vals
.pop()
.ok_or_else(|| {
VMStatus::error(
StatusCode::INTERNAL_TYPE_ERROR,
Some(String::from("Constructor did not return value")),
)
})?

Check warning on line 465 in aptos-move/aptos-vm/src/verifier/transaction_arg_validation.rs

View check run for this annotation

Codecov / codecov/patch

aptos-move/aptos-vm/src/verifier/transaction_arg_validation.rs#L458-L465

Added lines #L458 - L465 were not covered by tests
.0)
}

// String is a vector of bytes, so both string and vector carry a length in the serialized format.
Expand Down
15 changes: 9 additions & 6 deletions aptos-move/block-executor/src/executor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -119,13 +119,16 @@ where
let sync_view = LatestView::new(base_view, ViewState::Sync(parallel_state), idx_to_execute);
let execute_result = executor.execute_transaction(&sync_view, txn, idx_to_execute);

let mut prev_modified_keys = last_input_output
.modified_keys::<true>(idx_to_execute)
.map_or(HashMap::new(), |keys| keys.collect());
let mut prev_modified_keys = match last_input_output.modified_keys::<true>(idx_to_execute) {
Some(keys) => keys.collect(),
None => HashMap::new(),

Check warning on line 124 in aptos-move/block-executor/src/executor.rs

View check run for this annotation

Codecov / codecov/patch

aptos-move/block-executor/src/executor.rs#L122-L124

Added lines #L122 - L124 were not covered by tests
};

let mut prev_modified_delayed_fields = last_input_output
.delayed_field_keys(idx_to_execute)
.map_or(HashSet::new(), |keys| keys.collect());
let mut prev_modified_delayed_fields =
match last_input_output.delayed_field_keys(idx_to_execute) {
Some(keys) => keys.collect(),
None => HashSet::new(),

Check warning on line 130 in aptos-move/block-executor/src/executor.rs

View check run for this annotation

Codecov / codecov/patch

aptos-move/block-executor/src/executor.rs#L127-L130

Added lines #L127 - L130 were not covered by tests
};

let mut read_set = sync_view.take_parallel_reads();
if read_set.is_incorrect_use() {
Expand Down
10 changes: 5 additions & 5 deletions aptos-move/block-executor/src/txn_last_input_output.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ macro_rules! forward_on_success_or_skip_rest {
$self.outputs[$txn_idx as usize]
.load()
.as_ref()
.map_or(vec![], |txn_output| match txn_output.as_ref() {
.map_or_else(Vec::new, |txn_output| match txn_output.as_ref() {

Check warning on line 38 in aptos-move/block-executor/src/txn_last_input_output.rs

View check run for this annotation

Codecov / codecov/patch

aptos-move/block-executor/src/txn_last_input_output.rs#L38

Added line #L38 was not covered by tests
ExecutionStatus::Success(t) | ExecutionStatus::SkipRest(t) => t.$f(),
ExecutionStatus::Abort(_)
| ExecutionStatus::SpeculativeExecutionAbortError(_)
Expand Down Expand Up @@ -367,9 +367,9 @@ impl<T: Transaction, O: TransactionOutput<Txn = T>, E: Debug + Send + Clone>
&self,
txn_idx: TxnIndex,
) -> Box<dyn Iterator<Item = (T::Event, Option<MoveTypeLayout>)>> {
self.outputs[txn_idx as usize].load().as_ref().map_or(
Box::new(empty::<(T::Event, Option<MoveTypeLayout>)>()),
|txn_output| match txn_output.as_ref() {
match self.outputs[txn_idx as usize].load().as_ref() {
None => Box::new(empty::<(T::Event, Option<MoveTypeLayout>)>()),
Some(txn_output) => match txn_output.as_ref() {

Check warning on line 372 in aptos-move/block-executor/src/txn_last_input_output.rs

View check run for this annotation

Codecov / codecov/patch

aptos-move/block-executor/src/txn_last_input_output.rs#L370-L372

Added lines #L370 - L372 were not covered by tests
ExecutionStatus::Success(t) | ExecutionStatus::SkipRest(t) => {
let events = t.get_events();
Box::new(events.into_iter())
Expand All @@ -380,7 +380,7 @@ impl<T: Transaction, O: TransactionOutput<Txn = T>, E: Debug + Send + Clone>
Box::new(empty::<(T::Event, Option<MoveTypeLayout>)>())
},
},
)
}
}

pub(crate) fn take_resource_write_set(
Expand Down
20 changes: 9 additions & 11 deletions aptos-move/framework/src/natives/aggregator_natives/helpers_v1.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,19 +56,17 @@ pub(crate) fn unpack_aggregator_struct(

let pop_with_err = |vec: &mut Vec<Value>, msg: &str| {
vec.pop()
.map_or(Err(extension_error(msg)), |v| v.value_as::<u128>())
.map_or_else(|| Err(extension_error(msg)), |v| v.value_as::<u128>())
};

let limit = pop_with_err(&mut fields, "unable to pop 'limit' field")?;
let key = fields
.pop()
.map_or(Err(extension_error("unable to pop `handle` field")), |v| {
v.value_as::<AccountAddress>()
})?;
let handle = fields
.pop()
.map_or(Err(extension_error("unable to pop `handle` field")), |v| {
v.value_as::<AccountAddress>()
})?;
let key = fields.pop().map_or_else(
|| Err(extension_error("unable to pop `handle` field")),
|v| v.value_as::<AccountAddress>(),
)?;
let handle = fields.pop().map_or_else(
|| Err(extension_error("unable to pop `handle` field")),
|v| v.value_as::<AccountAddress>(),
)?;
Ok((TableHandle(handle), key, limit))
}
6 changes: 3 additions & 3 deletions config/src/config/node_config_loader.rs
Original file line number Diff line number Diff line change
Expand Up @@ -159,9 +159,9 @@ fn get_chain_id(node_config: &NodeConfig) -> Result<ChainId, Error> {
// TODO: can we make this less hacky?

// Load the genesis transaction from disk
let genesis_txn = get_genesis_txn(node_config).ok_or(Error::InvariantViolation(
"The genesis transaction was not found!".to_string(),
))?;
let genesis_txn = get_genesis_txn(node_config).ok_or_else(|| {
Error::InvariantViolation("The genesis transaction was not found!".to_string())
})?;

// Extract the chain ID from the genesis transaction
match genesis_txn {
Expand Down
2 changes: 1 addition & 1 deletion config/src/config/secure_backend_config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ impl VaultConfig {
let path = self
.ca_certificate
.as_ref()
.ok_or(Error::Missing("ca_certificate"))?;
.ok_or_else(|| Error::Missing("ca_certificate"))?;
read_file(path)
}
}
Expand Down
2 changes: 1 addition & 1 deletion consensus/consensus-types/src/sync_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ impl Display for SyncInfo {
self.highest_timeout_round(),
self.highest_commit_round(),
self.highest_quorum_cert,
self.highest_ordered_cert.as_ref().map_or("None".to_string(), |cert| cert.to_string()),
self.highest_ordered_cert.as_ref().map_or_else(|| "None".to_string(), |cert| cert.to_string()),
self.highest_commit_cert,
)
}
Expand Down
12 changes: 7 additions & 5 deletions consensus/src/dag/adapter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -297,11 +297,13 @@ impl StorageAdapter {
usize::try_from(*index)
.map_err(|_err| anyhow!("index {} out of bounds", index))
.and_then(|index| {
validators.get(index).cloned().ok_or(anyhow!(
"index {} is larger than number of validators {}",
index,
validators.len()
))
validators.get(index).cloned().ok_or_else(|| {
anyhow!(
"index {} is larger than number of validators {}",
index,
validators.len()
)
})
})
})
.collect()
Expand Down
12 changes: 7 additions & 5 deletions consensus/src/liveness/leader_reputation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -283,11 +283,13 @@ impl NewBlockEventAggregation {
usize::try_from(*index)
.map_err(|_err| format!("index {} out of bounds", index))
.and_then(|index| {
validators.get(index).ok_or(format!(
"index {} is larger than number of validators {}",
index,
validators.len()
))
validators.get(index).ok_or_else(|| {
format!(
"index {} is larger than number of validators {}",
index,
validators.len()
)
})
})
})
.collect()
Expand Down
2 changes: 1 addition & 1 deletion consensus/src/liveness/round_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,7 @@ impl RoundState {
round = self.current_round,
"{:?} passed since the previous deadline.",
now.checked_sub(self.current_round_deadline)
.map_or("0 ms".to_string(), |v| format!("{:?}", v))
.map_or_else(|| "0 ms".to_string(), |v| format!("{:?}", v))
);
debug!(
round = self.current_round,
Expand Down
4 changes: 2 additions & 2 deletions consensus/src/persistent_liveness_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -436,8 +436,8 @@ impl PersistentLivenessStorage for StorageWriteProxy {
}
info!(
"Starting up the consensus state machine with recovery data - [last_vote {}], [highest timeout certificate: {}]",
initial_data.last_vote.as_ref().map_or("None".to_string(), |v| v.to_string()),
initial_data.highest_2chain_timeout_certificate().as_ref().map_or("None".to_string(), |v| v.to_string()),
initial_data.last_vote.as_ref().map_or_else(|| "None".to_string(), |v| v.to_string()),
initial_data.highest_2chain_timeout_certificate().as_ref().map_or_else(|| "None".to_string(), |v| v.to_string()),
);

LivenessStorageData::FullRecoveryData(initial_data)
Expand Down
4 changes: 2 additions & 2 deletions crates/aptos-dkg/src/weighted_vuf/pinkas/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ impl WeightedVUF for PinkasWUF {
pis.push(
apks[player.id]
.as_ref()
.ok_or(anyhow!("Missing APK for player {}", player.get_id()))?
.ok_or_else(|| anyhow!("Missing APK for player {}", player.get_id()))?

Check warning on line 249 in crates/aptos-dkg/src/weighted_vuf/pinkas/mod.rs

View check run for this annotation

Codecov / codecov/patch

crates/aptos-dkg/src/weighted_vuf/pinkas/mod.rs#L249

Added line #L249 was not covered by tests
.0
.pi,
);
Expand Down Expand Up @@ -299,7 +299,7 @@ impl PinkasWUF {

let apk = apks[player.id]
.as_ref()
.ok_or(anyhow!("Missing APK for player {}", player.get_id()))?;
.ok_or_else(|| anyhow!("Missing APK for player {}", player.get_id()))?;

Check warning on line 302 in crates/aptos-dkg/src/weighted_vuf/pinkas/mod.rs

View check run for this annotation

Codecov / codecov/patch

crates/aptos-dkg/src/weighted_vuf/pinkas/mod.rs#L302

Added line #L302 was not covered by tests

rks.push(&apk.0.rks);
shares.push(share);
Expand Down
6 changes: 3 additions & 3 deletions crates/aptos/src/account/multisig_account.rs
Original file line number Diff line number Diff line change
Expand Up @@ -200,11 +200,11 @@ impl CliCommand<serde_json::Value> for VerifyProposal {
.to_hex_literal()
// If full payload not provided, get payload hash directly from transaction proposal:
} else {
view_json_option_str(&multisig_transaction["payload_hash"])?.ok_or(
view_json_option_str(&multisig_transaction["payload_hash"])?.ok_or_else(|| {
CliError::UnexpectedError(
"Neither payload nor payload hash provided on-chain".to_string(),
),
)?
)
})?
};
// Get verification result based on if expected and actual payload hashes match.
if expected_payload_hash.eq(&actual_payload_hash) {
Expand Down
6 changes: 3 additions & 3 deletions crates/aptos/src/common/init.rs
Original file line number Diff line number Diff line change
Expand Up @@ -231,9 +231,9 @@ impl CliCommand<()> for InitTool {
let public_key = if self.is_hardware_wallet() {
let pub_key = match aptos_ledger::get_public_key(
derivation_path
.ok_or(CliError::UnexpectedError(
"Invalid derivation path".to_string(),
))?
.ok_or_else(|| {
CliError::UnexpectedError("Invalid derivation path".to_string())
})?
.as_str(),
false,
) {
Expand Down
4 changes: 1 addition & 3 deletions crates/aptos/src/common/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2218,9 +2218,7 @@ impl TryInto<MemberId> for &EntryFunctionArguments {
fn try_into(self) -> Result<MemberId, Self::Error> {
self.function_id
.clone()
.ok_or(CliError::CommandArgumentError(
"No function ID provided".to_string(),
))
.ok_or_else(|| CliError::CommandArgumentError("No function ID provided".to_string()))
}
}

Expand Down
21 changes: 10 additions & 11 deletions crates/aptos/src/governance/delegation_pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -222,13 +222,13 @@ async fn is_partial_governance_voting_enabled_for_delegation_pool(
None,
)
.await?;
response.inner()[0]
.as_bool()
.ok_or(CliError::UnexpectedError(
response.inner()[0].as_bool().ok_or_else(|| {
CliError::UnexpectedError(
"Unexpected response from node when checking if partial governance_voting is \
enabled for delegation pool"
.to_string(),
))
)
})
}

async fn get_remaining_voting_power(
Expand All @@ -255,14 +255,13 @@ async fn get_remaining_voting_power(
None,
)
.await?;
let remaining_voting_power_str =
response.inner()[0]
.as_str()
.ok_or(CliError::UnexpectedError(format!(
"Unexpected response from node when getting remaining voting power of {}\
let remaining_voting_power_str = response.inner()[0].as_str().ok_or_else(|| {
CliError::UnexpectedError(format!(
"Unexpected response from node when getting remaining voting power of {}\
in delegation pool {}",
pool_address, voter_address
)))?;
pool_address, voter_address
))
})?;
remaining_voting_power_str.parse().map_err(|err| {
CliError::UnexpectedError(format!(
"Unexpected response from node when getting remaining voting power of {}\
Expand Down
2 changes: 1 addition & 1 deletion crates/aptos/src/move_tool/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1569,7 +1569,7 @@ async fn submit_chunked_publish_transactions(
match result {
Ok(tx_summary) => {
let tx_hash = tx_summary.transaction_hash.to_string();
let status = tx_summary.success.map_or("".to_string(), |success| {
let status = tx_summary.success.map_or_else(String::new, |success| {
if success {
"Success".to_string()
} else {
Expand Down
4 changes: 3 additions & 1 deletion keyless/common/src/input_processing/witness_gen.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,9 @@ pub trait PathStr {

impl PathStr for NamedTempFile {
fn path_str(&self) -> Result<&str> {
self.path().to_str().ok_or(anyhow!("tempfile path error"))
self.path()
.to_str()
.ok_or_else(|| anyhow!("tempfile path error"))
}
}

Expand Down
4 changes: 2 additions & 2 deletions mempool/src/shared_mempool/network.rs
Original file line number Diff line number Diff line change
Expand Up @@ -370,7 +370,7 @@ impl<NetworkClient: NetworkClientInterface<MempoolSyncMsg>> MempoolNetworkInterf
// If we don't have any info about the node, we shouldn't broadcast to it
let state = sync_states
.get_mut(&peer)
.ok_or(BroadcastError::PeerNotFound(peer))?;
.ok_or_else(|| BroadcastError::PeerNotFound(peer))?;

// If backoff mode is on for this peer, only execute broadcasts that were scheduled as a backoff broadcast.
// This is to ensure the backoff mode is actually honored (there is a chance a broadcast was scheduled
Expand Down Expand Up @@ -607,7 +607,7 @@ impl<NetworkClient: NetworkClientInterface<MempoolSyncMsg>> MempoolNetworkInterf
let mut sync_states = self.sync_states.write();
let state = sync_states
.get_mut(&peer)
.ok_or(BroadcastError::PeerNotFound(peer))?;
.ok_or_else(|| BroadcastError::PeerNotFound(peer))?;

// Update peer sync state with info from above broadcast.
state.update(&message_id);
Expand Down
2 changes: 1 addition & 1 deletion secure/storage/src/on_disk.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ impl OnDiskStorage {
// working directory provided by PathBuf::new().
let file_dir = file_path
.parent()
.map_or(PathBuf::new(), |p| p.to_path_buf());
.map_or_else(PathBuf::new, |p| p.to_path_buf());

Self {
file_path,
Expand Down
9 changes: 6 additions & 3 deletions state-sync/data-streaming-service/src/data_stream.rs
Original file line number Diff line number Diff line change
Expand Up @@ -588,9 +588,12 @@ impl<T: AptosDataClientInterface + Send + Clone + 'static> DataStream<T> {
.advertised_data
.highest_synced_ledger_info()
.map(|ledger_info| ledger_info.ledger_info().version())
.ok_or(aptos_data_client::error::Error::UnexpectedErrorEncountered(
"The highest synced ledger info is missing from the global data summary!".into(),
))?;
.ok_or_else(|| {
aptos_data_client::error::Error::UnexpectedErrorEncountered(
"The highest synced ledger info is missing from the global data summary!"
.into(),
)
})?;

// If the stream is not lagging behind, reset the lag and return
if highest_response_version >= highest_advertised_version {
Expand Down
2 changes: 1 addition & 1 deletion storage/aptosdb/src/db/fake_aptosdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1117,7 +1117,7 @@ mod tests {
let signed_transaction = transaction_with_proof
.transaction
.try_as_signed_user_txn()
.ok_or(anyhow!("not user transaction"))?;
.ok_or_else(|| anyhow!("not user transaction"))?;

ensure!(
transaction_with_proof.version == version,
Expand Down
Loading

0 comments on commit 54c63b7

Please sign in to comment.