diff --git a/lib/src/builder/execute/ethereum.rs b/lib/src/builder/execute/ethereum.rs index cf03e8a7..c3ab92dc 100644 --- a/lib/src/builder/execute/ethereum.rs +++ b/lib/src/builder/execute/ethereum.rs @@ -69,8 +69,8 @@ impl TxExecStrategy for EthTxExecStrategy { .unwrap(); debug!("Block no. {}", header.number); - debug!(" EVM spec ID: {:?}", spec_id); - debug!(" Timestamp: {}", dt); + debug!(" EVM spec ID: {spec_id:?}"); + debug!(" Timestamp: {dt}"); trace!( " Transactions: {}", block_builder.input.state_input.transactions.len() @@ -97,7 +97,7 @@ impl TxExecStrategy for EthTxExecStrategy { .with_spec_id(spec_id) .modify_block_env(|blk_env| { // set the EVM block environment - blk_env.number = header.number.try_into().unwrap(); + blk_env.number = U256::from(header.number); blk_env.coinbase = block_builder.input.state_input.beneficiary; blk_env.timestamp = header.timestamp; blk_env.difficulty = U256::ZERO; @@ -131,9 +131,9 @@ impl TxExecStrategy for EthTxExecStrategy { #[cfg(not(target_os = "zkvm"))] { let tx_hash = tx.hash(); - trace!("Tx no. {} (hash: {})", tx_no, tx_hash); + trace!("Tx no. {tx_no} (hash: {tx_hash})"); trace!(" Type: {}", tx.essence.tx_type()); - trace!(" Fr: {:?}", tx_from); + trace!(" Fr: {tx_from:?}"); trace!(" To: {:?}", tx.essence.to().unwrap_or_default()); } @@ -141,14 +141,14 @@ impl TxExecStrategy for EthTxExecStrategy { let block_available_gas = block_builder.input.state_input.gas_limit - cumulative_gas_used; if block_available_gas < tx.essence.gas_limit() { - bail!("Error at transaction {}: gas exceeds block limit", tx_no); + bail!("Error at transaction {tx_no}: gas exceeds block limit"); } // process the transaction fill_eth_tx_env(&mut evm.env_mut().tx, &tx.essence, tx_from); let ResultAndState { result, state } = evm .transact() - .map_err(|evm_err| anyhow!("Error at transaction {}: {:?}", tx_no, evm_err)) + .map_err(|evm_err| anyhow!("Error at transaction {tx_no}: {evm_err:?}")) // todo: change unrecoverable panic to host-side recoverable `Result` .expect("Block construction failure."); @@ -156,7 +156,7 @@ impl TxExecStrategy for EthTxExecStrategy { cumulative_gas_used = cumulative_gas_used.checked_add(gas_used).unwrap(); #[cfg(not(target_os = "zkvm"))] - trace!(" Ok: {:?}", result); + trace!(" Ok: {result:?}"); // create the receipt from the EVM result let receipt = Receipt::new( @@ -186,8 +186,7 @@ impl TxExecStrategy for EthTxExecStrategy { if account.is_touched() { // log account trace!( - " State {:?} (is_selfdestructed={}, is_loaded_as_not_existing={}, is_created={}, is_empty={})", - address, + " State {address:?} (is_selfdestructed={}, is_loaded_as_not_existing={}, is_created={}, is_empty={})", account.is_selfdestructed(), account.is_loaded_as_not_existing(), account.is_created(), @@ -203,7 +202,7 @@ impl TxExecStrategy for EthTxExecStrategy { // log state changes for (addr, slot) in &account.storage { if slot.is_changed() { - trace!(" Storage address: {:?}", addr); + trace!(" Storage address: {addr:?}"); trace!(" Before: {:?}", slot.original_value()); trace!(" After: {:?}", slot.present_value()); } @@ -229,7 +228,7 @@ impl TxExecStrategy for EthTxExecStrategy { { trace!("Withdrawal no. {}", withdrawal.index); trace!(" Recipient: {:?}", withdrawal.address); - trace!(" Value: {}", amount_wei); + trace!(" Value: {amount_wei}"); } // Credit withdrawal amount increase_account_balance(&mut evm.context.evm.db, withdrawal.address, amount_wei) @@ -247,11 +246,7 @@ impl TxExecStrategy for EthTxExecStrategy { header.receipts_root = receipt_trie.hash(); header.logs_bloom = logs_bloom; header.gas_used = cumulative_gas_used; - header.withdrawals_root = if spec_id < SpecId::SHANGHAI { - None - } else { - Some(withdrawals_trie.hash()) - }; + header.withdrawals_root = (spec_id >= SpecId::SHANGHAI).then_some(withdrawals_trie.hash()); // Leak memory, save cycles guest_mem_forget([tx_trie, receipt_trie, withdrawals_trie]); @@ -325,13 +320,7 @@ where // Read account from database let mut account: Account = db .basic(address) - .map_err(|db_err| { - anyhow!( - "Error increasing account balance for {}: {:?}", - address, - db_err - ) - })? + .map_err(|db_err| anyhow!("Error increasing account balance for {address}: {db_err:?}"))? .unwrap_or_default() .into(); // Credit withdrawal amount diff --git a/lib/src/builder/execute/optimism.rs b/lib/src/builder/execute/optimism.rs index b72b39a1..f31bc2f7 100644 --- a/lib/src/builder/execute/optimism.rs +++ b/lib/src/builder/execute/optimism.rs @@ -70,8 +70,8 @@ impl TxExecStrategy for OpTxExecStrategy { .unwrap(); trace!("Block no. {}", header.number); - trace!(" EVM spec ID: {:?}", spec_id); - trace!(" Timestamp: {}", dt); + trace!(" EVM spec ID: {spec_id:?}"); + trace!(" Timestamp: {dt}"); trace!( " Transactions: {}", block_builder.input.state_input.transactions.len() @@ -95,7 +95,7 @@ impl TxExecStrategy for OpTxExecStrategy { .with_spec_id(spec_id) .modify_block_env(|blk_env| { // set the EVM block environment - blk_env.number = header.number.try_into().unwrap(); + blk_env.number = U256::from(header.number); blk_env.coinbase = block_builder.input.state_input.beneficiary; blk_env.timestamp = header.timestamp; blk_env.difficulty = U256::ZERO; @@ -129,9 +129,9 @@ impl TxExecStrategy for OpTxExecStrategy { #[cfg(not(target_os = "zkvm"))] { let tx_hash = tx.hash(); - trace!("Tx no. {} (hash: {})", tx_no, tx_hash); + trace!("Tx no. {tx_no} (hash: {tx_hash})"); trace!(" Type: {}", tx.essence.tx_type()); - trace!(" Fr: {:?}", tx_from); + trace!(" Fr: {tx_from:?}"); trace!(" To: {:?}", tx.essence.to().unwrap_or_default()); } @@ -139,7 +139,7 @@ impl TxExecStrategy for OpTxExecStrategy { let block_available_gas = block_builder.input.state_input.gas_limit - cumulative_gas_used; if block_available_gas < tx.essence.gas_limit() { - bail!("Error at transaction {}: gas exceeds block limit", tx_no); + bail!("Error at transaction {tx_no}: gas exceeds block limit"); } // cache account nonce if the transaction is a deposit, starting with Canyon @@ -176,15 +176,15 @@ impl TxExecStrategy for OpTxExecStrategy { // process the transaction let ResultAndState { result, state } = evm .transact() - .map_err(|evm_err| anyhow!("Error at transaction {}: {:?}", tx_no, evm_err)) + .map_err(|evm_err| anyhow!("Error at transaction {tx_no}: {evm_err:?}")) // todo: change unrecoverable panic to host-side recoverable `Result` .expect("Block construction failure."); - let gas_used = result.gas_used().try_into().unwrap(); + let gas_used = U256::from(result.gas_used()); cumulative_gas_used = cumulative_gas_used.checked_add(gas_used).unwrap(); #[cfg(not(target_os = "zkvm"))] - trace!(" Ok: {:?}", result); + trace!(" Ok: {result:?}"); // create the receipt from the EVM result let mut receipt = Receipt::new( @@ -203,8 +203,7 @@ impl TxExecStrategy for OpTxExecStrategy { if account.is_touched() { // log account trace!( - " State {:?} (is_selfdestructed={}, is_loaded_as_not_existing={}, is_created={})", - address, + " State {address:?} (is_selfdestructed={}, is_loaded_as_not_existing={}, is_created={})", account.is_selfdestructed(), account.is_loaded_as_not_existing(), account.is_created() @@ -219,7 +218,7 @@ impl TxExecStrategy for OpTxExecStrategy { // log state changes for (addr, slot) in &account.storage { if slot.is_changed() { - trace!(" Storage address: {:?}", addr); + trace!(" Storage address: {addr:?}"); trace!(" Before: {:?}", slot.original_value()); trace!(" After: {:?}", slot.present_value()); } @@ -249,11 +248,7 @@ impl TxExecStrategy for OpTxExecStrategy { header.receipts_root = receipt_trie.hash(); header.logs_bloom = logs_bloom; header.gas_used = cumulative_gas_used; - header.withdrawals_root = if spec_id < SpecId::CANYON { - None - } else { - Some(EMPTY_ROOT) - }; + header.withdrawals_root = (spec_id >= SpecId::CANYON).then_some(EMPTY_ROOT); // Leak memory, save cycles guest_mem_forget([tx_trie, receipt_trie]); diff --git a/lib/src/builder/prepare.rs b/lib/src/builder/prepare.rs index e8a4450d..6ad96635 100644 --- a/lib/src/builder/prepare.rs +++ b/lib/src/builder/prepare.rs @@ -51,16 +51,14 @@ impl HeaderPrepStrategy for EthHeaderPrepStrategy { block_builder.input.state_input.parent_header.gas_limit / GAS_LIMIT_BOUND_DIVISOR; if diff >= limit { bail!( - "Invalid gas limit: expected {} +- {}, got {}", + "Invalid gas limit: expected {} +- {limit}, got {}", block_builder.input.state_input.parent_header.gas_limit, - limit, block_builder.input.state_input.gas_limit, ); } if block_builder.input.state_input.gas_limit < MIN_GAS_LIMIT { bail!( - "Invalid gas limit: expected >= {}, got {}", - MIN_GAS_LIMIT, + "Invalid gas limit: expected >= {MIN_GAS_LIMIT}, got {}", block_builder.input.state_input.gas_limit, ); } @@ -76,11 +74,7 @@ impl HeaderPrepStrategy for EthHeaderPrepStrategy { // Validate extra data let extra_data_bytes = block_builder.input.state_input.extra_data.len(); if extra_data_bytes > MAX_EXTRA_DATA_BYTES { - bail!( - "Invalid extra data: expected <= {}, got {}", - MAX_EXTRA_DATA_BYTES, - extra_data_bytes, - ) + bail!("Invalid extra data: expected <= {MAX_EXTRA_DATA_BYTES}, got {extra_data_bytes}",) } // Validate number let parent_number = block_builder.input.state_input.parent_header.number; @@ -92,7 +86,7 @@ impl HeaderPrepStrategy for EthHeaderPrepStrategy { let spec_id = block_builder .chain_spec .active_fork(number, ×tamp) - .unwrap_or_else(|err| panic!("Invalid version: {:#}", err)); + .unwrap_or_else(|err| panic!("Invalid version: {err:#}")); block_builder.spec_id = Some(spec_id); // Derive header block_builder.header = Some(Header { diff --git a/lib/src/host/mpt.rs b/lib/src/host/mpt.rs index 4cf10465..c206f45a 100644 --- a/lib/src/host/mpt.rs +++ b/lib/src/host/mpt.rs @@ -42,18 +42,18 @@ pub fn mpt_from_proof(proof_nodes: &[MptNode]) -> Result { // find the child that references the next node let resolved: MptNode = match node.as_data().clone() { MptNodeData::Branch(mut children) => { - if let Some(child) = children.iter_mut().flatten().find( + let Some(child) = children.iter_mut().flatten().find( |child| matches!(child.as_data(), MptNodeData::Digest(d) if d == child_ref), - ) { - *child = Box::new(replacement); - } else { - bail!("node {} does not reference the successor", i); - } + ) else { + bail!("node {i} does not reference the successor"); + }; + *child = Box::new(replacement); + MptNodeData::Branch(children).into() } MptNodeData::Extension(prefix, child) => { if !matches!(child.as_data(), MptNodeData::Digest(d) if d == child_ref) { - bail!("node {} does not reference the successor", i); + bail!("node {i} does not reference the successor"); } MptNodeData::Extension(prefix, Box::new(replacement)).into() } diff --git a/lib/src/host/preflight.rs b/lib/src/host/preflight.rs index e64f38dc..a47455ca 100644 --- a/lib/src/host/preflight.rs +++ b/lib/src/host/preflight.rs @@ -309,7 +309,7 @@ fn proofs_to_tries( let fini_proofs = proofs .get(&address) - .with_context(|| format!("missing fini_proofs for address {:#}", &address))?; + .with_context(|| format!("missing fini_proofs for address {address:#}"))?; // assure that addresses can be deleted from the state trie add_orphaned_leafs(address, &fini_proofs.account_proof, &mut state_nodes)?; @@ -366,15 +366,17 @@ fn add_orphaned_leafs( proof: &[impl AsRef<[u8]>], nodes_by_reference: &mut HashMap, ) -> Result<()> { - if !proof.is_empty() { - let proof_nodes = parse_proof(proof).context("invalid proof encoding")?; - if is_not_included(&keccak(key), &proof_nodes)? { - // add the leaf node to the nodes - let leaf = proof_nodes.last().unwrap(); - shorten_node_path(leaf).into_iter().for_each(|node| { - nodes_by_reference.insert(node.reference(), node); - }); - } + if proof.is_empty() { + return Ok(()); + } + + let proof_nodes = parse_proof(proof).context("invalid proof encoding")?; + if is_not_included(&keccak(key), &proof_nodes)? { + // add the leaf node to the nodes + let leaf = proof_nodes.last().unwrap(); + shorten_node_path(leaf).into_iter().for_each(|node| { + nodes_by_reference.insert(node.reference(), node); + }); } Ok(()) diff --git a/lib/src/host/provider/file_provider.rs b/lib/src/host/provider/file_provider.rs index 63307903..7ecb015b 100644 --- a/lib/src/host/provider/file_provider.rs +++ b/lib/src/host/provider/file_provider.rs @@ -144,56 +144,56 @@ impl Provider for FileProvider { fn get_full_block(&mut self, query: &BlockQuery) -> Result> { match self.full_blocks.get(query) { Some(val) => Ok(val.clone()), - None => Err(anyhow!("No data for {:?}", query)), + None => Err(anyhow!("No data for {query:?}")), } } fn get_partial_block(&mut self, query: &BlockQuery) -> Result> { match self.partial_blocks.get(query) { Some(val) => Ok(val.clone()), - None => Err(anyhow!("No data for {:?}", query)), + None => Err(anyhow!("No data for {query:?}")), } } fn get_block_receipts(&mut self, query: &BlockQuery) -> Result> { match self.receipts.get(query) { Some(val) => Ok(val.clone()), - None => Err(anyhow!("No data for {:?}", query)), + None => Err(anyhow!("No data for {query:?}")), } } fn get_proof(&mut self, query: &ProofQuery) -> Result { match self.proofs.get(query) { Some(val) => Ok(val.clone()), - None => Err(anyhow!("No data for {:?}", query)), + None => Err(anyhow!("No data for {query:?}")), } } fn get_transaction_count(&mut self, query: &AccountQuery) -> Result { match self.transaction_count.get(query) { Some(val) => Ok(*val), - None => Err(anyhow!("No data for {:?}", query)), + None => Err(anyhow!("No data for {query:?}")), } } fn get_balance(&mut self, query: &AccountQuery) -> Result { match self.balance.get(query) { Some(val) => Ok(*val), - None => Err(anyhow!("No data for {:?}", query)), + None => Err(anyhow!("No data for {query:?}")), } } fn get_code(&mut self, query: &AccountQuery) -> Result { match self.code.get(query) { Some(val) => Ok(val.clone()), - None => Err(anyhow!("No data for {:?}", query)), + None => Err(anyhow!("No data for {query:?}")), } } fn get_storage(&mut self, query: &StorageQuery) -> Result { match self.storage.get(query) { Some(val) => Ok(*val), - None => Err(anyhow!("No data for {:?}", query)), + None => Err(anyhow!("No data for {query:?}")), } } } diff --git a/lib/src/host/provider/rpc_provider.rs b/lib/src/host/provider/rpc_provider.rs index 3bd5756c..1976720c 100644 --- a/lib/src/host/provider/rpc_provider.rs +++ b/lib/src/host/provider/rpc_provider.rs @@ -45,7 +45,7 @@ impl Provider for RpcProvider { } fn get_full_block(&mut self, query: &BlockQuery) -> Result> { - debug!("Querying RPC for full block: {:?}", query); + debug!("Querying RPC for full block: {query:?}"); let response = self .tokio_handle @@ -53,12 +53,12 @@ impl Provider for RpcProvider { match response { Some(out) => Ok(out), - None => Err(anyhow!("No data for {:?}", query)), + None => Err(anyhow!("No data for {query:?}")), } } fn get_partial_block(&mut self, query: &BlockQuery) -> Result> { - debug!("Querying RPC for partial block: {:?}", query); + debug!("Querying RPC for partial block: {query:?}"); let response = self .tokio_handle @@ -66,12 +66,12 @@ impl Provider for RpcProvider { match response { Some(out) => Ok(out), - None => Err(anyhow!("No data for {:?}", query)), + None => Err(anyhow!("No data for {query:?}")), } } fn get_block_receipts(&mut self, query: &BlockQuery) -> Result> { - debug!("Querying RPC for block receipts: {:?}", query); + debug!("Querying RPC for block receipts: {query:?}"); let response = self .tokio_handle @@ -81,7 +81,7 @@ impl Provider for RpcProvider { } fn get_proof(&mut self, query: &ProofQuery) -> Result { - debug!("Querying RPC for inclusion proof: {:?}", query); + debug!("Querying RPC for inclusion proof: {query:?}"); let out = self.tokio_handle.block_on(self.http_client.get_proof( query.address, @@ -93,7 +93,7 @@ impl Provider for RpcProvider { } fn get_transaction_count(&mut self, query: &AccountQuery) -> Result { - debug!("Querying RPC for transaction count: {:?}", query); + debug!("Querying RPC for transaction count: {query:?}"); let out = self.tokio_handle.block_on( self.http_client @@ -104,7 +104,7 @@ impl Provider for RpcProvider { } fn get_balance(&mut self, query: &AccountQuery) -> Result { - debug!("Querying RPC for balance: {:?}", query); + debug!("Querying RPC for balance: {query:?}"); let out = self.tokio_handle.block_on( self.http_client @@ -115,7 +115,7 @@ impl Provider for RpcProvider { } fn get_code(&mut self, query: &AccountQuery) -> Result { - debug!("Querying RPC for code: {:?}", query); + debug!("Querying RPC for code: {query:?}"); let out = self.tokio_handle.block_on( self.http_client @@ -126,7 +126,7 @@ impl Provider for RpcProvider { } fn get_storage(&mut self, query: &StorageQuery) -> Result { - debug!("Querying RPC for storage: {:?}", query); + debug!("Querying RPC for storage: {query:?}"); let out = self.tokio_handle.block_on(self.http_client.get_storage_at( query.address, diff --git a/lib/src/host/verify.rs b/lib/src/host/verify.rs index 391961e6..002d9ae9 100644 --- a/lib/src/host/verify.rs +++ b/lib/src/host/verify.rs @@ -70,12 +70,11 @@ impl Verifier for preflight::Data { for (address, address_errors) in &errors { error!( - "Verify found {:?} error(s) for address {:?}", + "Verify found {:?} error(s) for address {address:?}", address_errors.len(), - address ); for error in address_errors { - error!(" Error: {:?}", error); + error!(" Error: {error:?}"); } } @@ -83,8 +82,7 @@ impl Verifier for preflight::Data { let errors_len = errors.len(); if errors_len > 0 { error!( - "Verify found {:?} account(s) with error(s) ({}% correct)", - errors_len, + "Verify found {errors_len:?} account(s) with error(s) ({}% correct)", (100.0 * (accounts_len - errors_len) as f64 / accounts_len as f64) ); } @@ -132,10 +130,7 @@ fn verify_header(header: &Header, exp_header: &Header) -> Result<()> { let found_hash = header.hash(); let expected_hash = exp_header.hash(); if found_hash.as_slice() != expected_hash.as_slice() { - error!( - "Final block hash mismatch {} (expected {})", - found_hash, expected_hash, - ); + error!("Final block hash mismatch {found_hash} (expected {expected_hash})",); bail!("Invalid block hash"); } @@ -223,7 +218,7 @@ fn verify_state_trie( address_errors.push(VerifyError::UnresolvedAccount); } Err(err) => { - bail!("Error while fetching account {:?}: {:?}", address, err); + bail!("Error while fetching account {address:?}: {err:?}"); } } diff --git a/lib/src/mem_db.rs b/lib/src/mem_db.rs index b84b88c6..2f2d5f92 100644 --- a/lib/src/mem_db.rs +++ b/lib/src/mem_db.rs @@ -91,12 +91,11 @@ impl MemDb { } pub fn storage_keys(&self) -> HashMap> { - let mut out = HashMap::new(); - for (address, account) in &self.accounts { - out.insert(*address, account.storage.keys().cloned().collect()); - } - - out + HashMap::from_iter( + self.accounts + .iter() + .map(|(address, account)| (*address, account.storage.keys().cloned().collect())), + ) } /// Insert account info without overriding its storage. diff --git a/lib/src/optimism/batcher.rs b/lib/src/optimism/batcher.rs index 0166f58e..f11fb19e 100644 --- a/lib/src/optimism/batcher.rs +++ b/lib/src/optimism/batcher.rs @@ -96,12 +96,12 @@ impl State { fn deque_next_epoch_if_none(&mut self) -> Result<()> { if self.next_epoch.is_none() { while let Some(next_epoch) = self.op_epoch_queue.pop_front() { - if next_epoch.number <= self.epoch.number { - continue; - } else if next_epoch.number == self.epoch.number + 1 { + if next_epoch.number == self.epoch.number + 1 { self.next_epoch = Some(next_epoch); break; - } else { + } + + if next_epoch.number > self.epoch.number + 1 { bail!("Epoch gap!"); } } @@ -336,18 +336,16 @@ impl Batcher { Ordering::Greater => { #[cfg(not(target_os = "zkvm"))] log::trace!( - "Future batch: {} = batch.timestamp > next_timestamp = {}", + "Future batch: {} = batch.timestamp > next_timestamp = {next_timestamp}", &batch.essence.timestamp, - &next_timestamp ); return BatchStatus::Future; } Ordering::Less => { #[cfg(not(target_os = "zkvm"))] log::trace!( - "Batch too old: {} = batch.timestamp < next_timestamp = {}", + "Batch too old: {} = batch.timestamp < next_timestamp = {next_timestamp}", &batch.essence.timestamp, - &next_timestamp ); return BatchStatus::Drop; } @@ -391,29 +389,29 @@ impl Batcher { return BatchStatus::Drop; } - let batch_origin = if batch.essence.epoch_num == epoch.number { + let batch_origin = match batch.essence.epoch_num { // From the spec: // "batch.epoch_num == epoch.number: define batch_origin as epoch" - epoch - } else if batch.essence.epoch_num == epoch.number + 1 { + n if n == epoch.number => epoch, // From the spec: // "batch.epoch_num == epoch.number+1:" // " If known, then define batch_origin as next_epoch" // " If next_epoch is not known -> undecided" - match next_epoch { + n if n == epoch.number + 1 => match next_epoch { Some(epoch) => epoch, None => return BatchStatus::Undecided, - } - } else { + }, // From the spec: // "batch.epoch_num > epoch.number+1 -> drop" - #[cfg(not(target_os = "zkvm"))] - log::warn!( - "Batch epoch number is too large: {} > {}", - batch.essence.epoch_num, - epoch.number + 1 - ); - return BatchStatus::Drop; + _ => { + #[cfg(not(target_os = "zkvm"))] + log::warn!( + "Batch epoch number is too large: {} > {}", + batch.essence.epoch_num, + epoch.number + 1 + ); + return BatchStatus::Drop; + } }; // From the spec: @@ -465,20 +463,20 @@ impl Batcher { // epoch.number == batch.epoch_num: this implies the batch does not already // advance the L1 origin, and must thus be checked against next_epoch." if epoch.number == batch.essence.epoch_num { - if let Some(next_epoch) = next_epoch { - // From the spec: - // "If batch.timestamp >= next_epoch.time -> drop" - if batch.essence.timestamp >= next_epoch.timestamp { - #[cfg(not(target_os = "zkvm"))] - log::warn!("Sequencer drift detected; drop; batch timestamp is too far into the future. {} >= {}", batch.essence.timestamp, next_epoch.timestamp); - return BatchStatus::Drop; - } - } else { + let Some(next_epoch) = next_epoch else { // From the spec: // "If next_epoch is not known -> undecided" #[cfg(not(target_os = "zkvm"))] log::debug!("Sequencer drift detected, but next epoch is not known; undecided"); return BatchStatus::Undecided; + }; + + // From the spec: + // "If batch.timestamp >= next_epoch.time -> drop" + if batch.essence.timestamp >= next_epoch.timestamp { + #[cfg(not(target_os = "zkvm"))] + log::warn!("Sequencer drift detected; drop; batch timestamp is too far into the future. {} >= {}", batch.essence.timestamp, next_epoch.timestamp); + return BatchStatus::Drop; } } } diff --git a/lib/src/optimism/batcher_channel.rs b/lib/src/optimism/batcher_channel.rs index cf8c14df..478ee8ef 100644 --- a/lib/src/optimism/batcher_channel.rs +++ b/lib/src/optimism/batcher_channel.rs @@ -85,10 +85,7 @@ impl BatcherChannels { Ok(frames) => frames, Err(_err) => { #[cfg(not(target_os = "zkvm"))] - log::warn!( - "failed to decode all frames; skip entire batcher tx: {:#}", - _err - ); + log::warn!("failed to decode all frames; skip entire batcher tx: {_err:#}"); continue; } }; @@ -165,9 +162,10 @@ impl BatcherChannels { #[cfg(not(target_os = "zkvm"))] log::warn!("frame's channel is timed out; ignored"); return; - } else if let Err(_err) = channel.add_frame(frame) { + } + if let Err(_err) = channel.add_frame(frame) { #[cfg(not(target_os = "zkvm"))] - log::warn!("failed to add frame to channel; ignored: {:#}", _err); + log::warn!("failed to add frame to channel; ignored: {_err:#}"); return; } } @@ -309,10 +307,7 @@ impl Channel { let mut batches = Vec::new(); if let Err(_err) = self.decode_batches(block_number, &mut batches) { #[cfg(not(target_os = "zkvm"))] - log::warn!( - "failed to decode all batches; skipping rest of channel: {:#}", - _err - ); + log::warn!("failed to decode all batches; skipping rest of channel: {_err:#}"); } batches @@ -385,7 +380,7 @@ impl Frame { .data() .split_first() .context("empty transaction data")?; - ensure!(version == &0, "invalid transaction version: {}", version); + ensure!(version == &0, "invalid transaction version: {version}"); let mut frames = Vec::new(); while !rollup_payload.is_empty() { diff --git a/lib/src/optimism/deposits.rs b/lib/src/optimism/deposits.rs index 76804f72..e2d4e3f1 100644 --- a/lib/src/optimism/deposits.rs +++ b/lib/src/optimism/deposits.rs @@ -89,10 +89,7 @@ pub fn can_contain(address: &Address, bloom: &Bloom) -> bool { return false; } let input = BloomInput::Raw(TRANSACTION_DEPOSITED_SIGNATURE.as_slice()); - if !bloom.contains_input(input) { - return false; - } - true + bloom.contains_input(input) } /// Converts a deposit log into a transaction. diff --git a/lib/src/optimism/mod.rs b/lib/src/optimism/mod.rs index 9ea5f86a..5f7d2b57 100644 --- a/lib/src/optimism/mod.rs +++ b/lib/src/optimism/mod.rs @@ -142,9 +142,8 @@ impl DeriveMachine { #[cfg(not(target_os = "zkvm"))] log::debug!( - "Fetched Op head (block no {}) {}", + "Fetched Op head (block no {}) {op_head_block_hash}", derive_input.op_head_block_no, - op_head_block_hash ); // the first transaction in a block MUST be a L1 attributes deposited transaction @@ -154,10 +153,7 @@ impl DeriveMachine { .context("block is empty")? .essence; if let Err(err) = validate_l1_attributes_deposited_tx(&chain_config, l1_attributes_tx) { - bail!( - "First transaction in block is not a valid L1 attributes deposited transaction: {}", - err - ) + bail!("First transaction in block is not a valid L1 attributes deposited transaction: {err}"); } // decode the L1 attributes deposited transaction let set_l1_block_values = { @@ -179,8 +175,7 @@ impl DeriveMachine { ); #[cfg(not(target_os = "zkvm"))] log::debug!( - "Fetched Eth head (block no {}) {}", - eth_block_no, + "Fetched Eth head (block no {eth_block_no}) {}", set_l1_block_values.hash ); @@ -312,7 +307,7 @@ impl DeriveMachine { } Err(_err) => { #[cfg(not(target_os = "zkvm"))] - log::warn!("Skipping undecodable transaction: {:#}", _err); + log::warn!("Skipping undecodable transaction: {_err:#}"); decoding_error = true; break; } @@ -425,9 +420,8 @@ impl DeriveMachine { // obtain verified op block header #[cfg(not(target_os = "zkvm"))] log::info!( - "Derived Op block {} w/ hash {}", + "Derived Op block {} w/ hash {new_block_hash}", new_block_head.number, - new_block_hash ); self.op_batcher.state.safe_head = L2BlockInfo { diff --git a/lib/src/optimism/system_config.rs b/lib/src/optimism/system_config.rs index d5574165..81112432 100644 --- a/lib/src/optimism/system_config.rs +++ b/lib/src/optimism/system_config.rs @@ -142,8 +142,5 @@ pub fn can_contain(address: &Address, bloom: &Bloom) -> bool { return false; } let input = BloomInput::Raw(CONFIG_UPDATE_SIGNATURE.as_slice()); - if !bloom.contains_input(input) { - return false; - } - true + bloom.contains_input(input) }