From bd30ee29e33e3bc938633696e5bab90c05a02192 Mon Sep 17 00:00:00 2001 From: AntonD3 Date: Thu, 12 Dec 2024 16:56:11 +0100 Subject: [PATCH 1/5] Support l1 -> l2 transactions --- core/bin/zksync_server/src/main.rs | 2 +- core/lib/mempool/src/mempool_store.rs | 12 ++++++-- .../src/implementations/layers/eth_watch.rs | 1 + .../zkos_vm_runner/src/zkos_conversions.rs | 30 ++++++++++++++++--- 4 files changed, 38 insertions(+), 7 deletions(-) diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index 509900fcdd6a..2ebc22133b25 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -44,7 +44,7 @@ struct Cli { /// Comma-separated list of components to launch. #[arg( long, - default_value = "api,state_keeper" + default_value = "api,eth,state_keeper" )] components: ComponentsToRun, /// Path to the yaml config. If set, it will be used instead of env vars. diff --git a/core/lib/mempool/src/mempool_store.rs b/core/lib/mempool/src/mempool_store.rs index b3207c75f2a2..70176b456dd1 100644 --- a/core/lib/mempool/src/mempool_store.rs +++ b/core/lib/mempool/src/mempool_store.rs @@ -154,8 +154,16 @@ impl MempoolStore { pub fn next_transaction( &mut self, filter: &L2TxFilter, - ) -> Option<(Transaction, TransactionTimeRangeConstraint)> { // todo: ignore prio txs for now - // todo: priority transactions + ) -> Option<(Transaction, TransactionTimeRangeConstraint)> { + if let Some(transaction) = self.l1_transactions.remove(&self.next_priority_id) { + self.next_priority_id += 1; + // L1 transactions can't use block.timestamp in AA and hence do not need to have a constraint + return Some(( + transaction.into(), + TransactionTimeRangeConstraint::default(), + )); + } + let mut removed = 0; // We want to fetch the next transaction that would match the fee requirements. let tx_pointer = self diff --git a/core/node/node_framework/src/implementations/layers/eth_watch.rs b/core/node/node_framework/src/implementations/layers/eth_watch.rs index e19828d85ccd..48d8810a95e9 100644 --- a/core/node/node_framework/src/implementations/layers/eth_watch.rs +++ b/core/node/node_framework/src/implementations/layers/eth_watch.rs @@ -59,6 +59,7 @@ impl WiringLayer for EthWatchLayer { let main_pool = input.master_pool.get().await?; let client = input.eth_client.0; + println!("{:?}", self.contracts_config); let eth_client = EthHttpQueryClient::new( client, self.contracts_config.diamond_proxy_addr, diff --git a/core/node/zkos_vm_runner/src/zkos_conversions.rs b/core/node/zkos_vm_runner/src/zkos_conversions.rs index eb054e7a2d27..0d24423a27ba 100644 --- a/core/node/zkos_vm_runner/src/zkos_conversions.rs +++ b/core/node/zkos_vm_runner/src/zkos_conversions.rs @@ -92,8 +92,6 @@ impl From for TransactionData { U256::zero() }; - // todo: second `reserved` value should be non-zero for deployment tx - // Ethereum transactions do not sign gas per pubdata limit, and so for them we need to use // some default value. We use the maximum possible value that is allowed by the bootloader // (i.e. we can not use u64::MAX, because the bootloader requires gas per pubdata for such @@ -137,8 +135,32 @@ impl From for TransactionData { raw_bytes: execute_tx.raw_bytes.map(|a| a.0), } } - ExecuteTransactionCommon::L1(_) => { - unimplemented!("l1 transactions are not supported for zk os") + ExecuteTransactionCommon::L1(common_data) => { + // TODO: cleanup - double check gas fields, and sender, use constant for tx type + TransactionData { + tx_type: 255, + from: common_data.sender, + to: execute_tx.execute.contract_address, + gas_limit: common_data.gas_limit, + pubdata_price_limit: common_data.gas_per_pubdata_limit, + max_fee_per_gas: common_data.max_fee_per_gas, + max_priority_fee_per_gas: U256::zero(), + paymaster: Address::zero(), + nonce: U256::zero(), + value: execute_tx.execute.value, + reserved: [ + common_data.to_mint, + U256::from_big_endian(common_data.refund_recipient.as_bytes()), + U256::zero(), + U256::zero(), + ], + data: execute_tx.execute.calldata, + signature: vec![], + factory_deps: execute_tx.execute.factory_deps, + paymaster_input: vec![], + reserved_dynamic: vec![], + raw_bytes: execute_tx.raw_bytes.map(|a| a.0), + } } ExecuteTransactionCommon::ProtocolUpgrade(_) => { unreachable!() From 0ce6ea5d293a6aca111a6d0554faa8ef2d5820d1 Mon Sep 17 00:00:00 2001 From: AntonD3 Date: Thu, 12 Dec 2024 16:57:58 +0100 Subject: [PATCH 2/5] Remove debug println --- core/node/node_framework/src/implementations/layers/eth_watch.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/core/node/node_framework/src/implementations/layers/eth_watch.rs b/core/node/node_framework/src/implementations/layers/eth_watch.rs index 48d8810a95e9..e19828d85ccd 100644 --- a/core/node/node_framework/src/implementations/layers/eth_watch.rs +++ b/core/node/node_framework/src/implementations/layers/eth_watch.rs @@ -59,7 +59,6 @@ impl WiringLayer for EthWatchLayer { let main_pool = input.master_pool.get().await?; let client = input.eth_client.0; - println!("{:?}", self.contracts_config); let eth_client = EthHttpQueryClient::new( client, self.contracts_config.diamond_proxy_addr, From 017d738b17875038d2b00db038d7e40c846f85a5 Mon Sep 17 00:00:00 2001 From: AntonD3 Date: Tue, 17 Dec 2024 16:51:47 +0100 Subject: [PATCH 3/5] Update contracts --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index 46d75088e7dd..5dc2f80e268a 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 46d75088e7ddb534101874c3ec15b877da1cb417 +Subproject commit 5dc2f80e268a24a3730fa13c5e9496a3bf176dbb From 6444d4646ddcac73c3cba1d2447c4414041d5897 Mon Sep 17 00:00:00 2001 From: AntonD3 Date: Wed, 18 Dec 2024 13:05:41 +0100 Subject: [PATCH 4/5] l1 settlement works with mock data --- contracts | 2 +- core/lib/dal/sqlx-data.json | 3 + core/lib/dal/src/blocks_dal.rs | 26 +++--- core/lib/dal/src/models/storage_block.rs | 88 ++++++++++++++++--- core/lib/eth_client/src/clients/http/query.rs | 1 + .../structures/commit_batch_info.rs | 6 +- core/node/eth_sender/src/eth_tx_aggregator.rs | 3 +- core/node/eth_sender/src/eth_tx_manager.rs | 1 + etc/env/file_based/genesis.yaml | 7 +- 9 files changed, 110 insertions(+), 27 deletions(-) create mode 100644 core/lib/dal/sqlx-data.json diff --git a/contracts b/contracts index 5dc2f80e268a..cbcbd2af24dc 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 5dc2f80e268a24a3730fa13c5e9496a3bf176dbb +Subproject commit cbcbd2af24dc28ba88f7ee3428ce21a54b855a4f diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json new file mode 100644 index 000000000000..95c8c858baaf --- /dev/null +++ b/core/lib/dal/sqlx-data.json @@ -0,0 +1,3 @@ +{ + "db": "PostgreSQL" +} \ No newline at end of file diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 9db5264c425b..3e038d9e826b 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -27,6 +27,7 @@ use zksync_types::{ writes::TreeWrite, Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256, }; +use zksync_types::commitment::L1BatchMetadata; use zksync_vm_interface::CircuitStatistic; pub use crate::models::storage_block::{L1BatchMetadataError, L1BatchWithOptionalMetadata}; @@ -1338,7 +1339,9 @@ impl BlocksDal<'_, '_> { WHERE number = 0 OR eth_commit_tx_id IS NOT NULL + /* TODO(zk os): uncomment/update for zk os AND commitment IS NOT NULL + */ ORDER BY number DESC LIMIT @@ -1348,10 +1351,11 @@ impl BlocksDal<'_, '_> { .instrument("get_last_committed_to_eth_l1_batch") .fetch_one(self.storage) .await?; - // genesis batch is first generated without commitment, we should wait for the tree to set it. - if batch.commitment.is_none() { - return Ok(None); - } + // TODO(zk os): uncomment/update for zk os + // // genesis batch is first generated without commitment, we should wait for the tree to set it. + // if batch.commitment.is_none() { + // return Ok(None); + // } self.map_storage_l1_batch(batch).await } @@ -1999,6 +2003,7 @@ impl BlocksDal<'_, '_> { WHERE eth_commit_tx_id IS NULL AND number != 0 + /* TODO(zk os): uncomment/update for zk os AND protocol_versions.bootloader_code_hash = $1 AND protocol_versions.default_account_code_hash = $2 AND commitment IS NOT NULL @@ -2011,16 +2016,16 @@ impl BlocksDal<'_, '_> { AND ( data_availability.inclusion_data IS NOT NULL OR $4 IS FALSE - ) + ) */ ORDER BY number LIMIT - $5 + $1 "#, - bootloader_hash.as_bytes(), - default_aa_hash.as_bytes(), - protocol_version_id as i32, - with_da_inclusion_info, + // bootloader_hash.as_bytes(), + // default_aa_hash.as_bytes(), + // protocol_version_id as i32, + // with_da_inclusion_info, limit as i64, ) .instrument("get_ready_for_commit_l1_batches") @@ -2164,6 +2169,7 @@ impl BlocksDal<'_, '_> { .await?; let Ok(metadata) = storage_batch.clone().try_into() else { + println!("{:?}", L1BatchMetadata::try_from(storage_batch.clone())); return Ok(None); }; diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 159ed71cc3e9..810607d422e9 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -207,52 +207,52 @@ impl TryFrom for L1BatchMetadata { fn try_from(batch: StorageL1Batch) -> Result { Ok(Self { root_hash: H256::from_slice( - &batch.hash.ok_or(L1BatchMetadataError::Incomplete("hash"))?, + &batch.hash.unwrap_or(vec![0u8; 32]), ), rollup_last_leaf_index: batch .rollup_last_leaf_index - .ok_or(L1BatchMetadataError::Incomplete("rollup_last_leaf_index"))? + .unwrap_or_default() as u64, initial_writes_compressed: batch.compressed_initial_writes, repeated_writes_compressed: batch.compressed_repeated_writes, l2_l1_merkle_root: H256::from_slice( &batch .l2_l1_merkle_root - .ok_or(L1BatchMetadataError::Incomplete("l2_l1_merkle_root"))?, + .unwrap_or(vec![0u8; 32]), ), aux_data_hash: H256::from_slice( &batch .aux_data_hash - .ok_or(L1BatchMetadataError::Incomplete("aux_data_hash"))?, + .unwrap_or(vec![0u8; 32]), ), meta_parameters_hash: H256::from_slice( &batch .meta_parameters_hash - .ok_or(L1BatchMetadataError::Incomplete("meta_parameters_hash"))?, + .unwrap_or(vec![0u8; 32]), ), pass_through_data_hash: H256::from_slice( &batch .pass_through_data_hash - .ok_or(L1BatchMetadataError::Incomplete("pass_through_data_hash"))?, + .unwrap_or(vec![0u8; 32]), ), commitment: H256::from_slice( &batch .commitment - .ok_or(L1BatchMetadataError::Incomplete("commitment"))?, + .unwrap_or(vec![0u8; 32]), ), block_meta_params: L1BatchMetaParameters { zkporter_is_available: batch .zkporter_is_available - .ok_or(L1BatchMetadataError::Incomplete("zkporter_is_available"))?, + .unwrap_or_default(), bootloader_code_hash: H256::from_slice( &batch .bootloader_code_hash - .ok_or(L1BatchMetadataError::Incomplete("bootloader_code_hash"))?, + .unwrap_or(vec![0u8; 32]), ), default_aa_code_hash: H256::from_slice( &batch .default_aa_code_hash - .ok_or(L1BatchMetadataError::Incomplete("default_aa_code_hash"))?, + .unwrap_or(vec![0u8; 32]), ), evm_emulator_code_hash: batch .evm_emulator_code_hash @@ -272,6 +272,74 @@ impl TryFrom for L1BatchMetadata { aggregation_root: batch.aggregation_root.map(|v| H256::from_slice(&v)), da_inclusion_data: batch.inclusion_data, }) + // TODO(zk os): uncomment, for now used mock data if not present to test + // Ok(Self { + // root_hash: H256::from_slice( + // &batch.hash.ok_or(L1BatchMetadataError::Incomplete("hash"))?, + // ), + // rollup_last_leaf_index: batch + // .rollup_last_leaf_index + // .ok_or(L1BatchMetadataError::Incomplete("rollup_last_leaf_index"))? + // as u64, + // initial_writes_compressed: batch.compressed_initial_writes, + // repeated_writes_compressed: batch.compressed_repeated_writes, + // l2_l1_merkle_root: H256::from_slice( + // &batch + // .l2_l1_merkle_root + // .ok_or(L1BatchMetadataError::Incomplete("l2_l1_merkle_root"))?, + // ), + // aux_data_hash: H256::from_slice( + // &batch + // .aux_data_hash + // .ok_or(L1BatchMetadataError::Incomplete("aux_data_hash"))?, + // ), + // meta_parameters_hash: H256::from_slice( + // &batch + // .meta_parameters_hash + // .ok_or(L1BatchMetadataError::Incomplete("meta_parameters_hash"))?, + // ), + // pass_through_data_hash: H256::from_slice( + // &batch + // .pass_through_data_hash + // .ok_or(L1BatchMetadataError::Incomplete("pass_through_data_hash"))?, + // ), + // commitment: H256::from_slice( + // &batch + // .commitment + // .ok_or(L1BatchMetadataError::Incomplete("commitment"))?, + // ), + // block_meta_params: L1BatchMetaParameters { + // zkporter_is_available: batch + // .zkporter_is_available + // .ok_or(L1BatchMetadataError::Incomplete("zkporter_is_available"))?, + // bootloader_code_hash: H256::from_slice( + // &batch + // .bootloader_code_hash + // .ok_or(L1BatchMetadataError::Incomplete("bootloader_code_hash"))?, + // ), + // default_aa_code_hash: H256::from_slice( + // &batch + // .default_aa_code_hash + // .ok_or(L1BatchMetadataError::Incomplete("default_aa_code_hash"))?, + // ), + // evm_emulator_code_hash: batch + // .evm_emulator_code_hash + // .as_deref() + // .map(H256::from_slice), + // protocol_version: batch + // .protocol_version + // .map(|v| (v as u16).try_into().unwrap()), + // }, + // state_diffs_compressed: batch.compressed_state_diffs.unwrap_or_default(), + // events_queue_commitment: batch.events_queue_commitment.map(|v| H256::from_slice(&v)), + // bootloader_initial_content_commitment: batch + // .bootloader_initial_content_commitment + // .map(|v| H256::from_slice(&v)), + // state_diff_hash: batch.state_diff_hash.map(|v| H256::from_slice(&v)), + // local_root: batch.local_root.map(|v| H256::from_slice(&v)), + // aggregation_root: batch.aggregation_root.map(|v| H256::from_slice(&v)), + // da_inclusion_data: batch.inclusion_data, + // }) } } diff --git a/core/lib/eth_client/src/clients/http/query.rs b/core/lib/eth_client/src/clients/http/query.rs index de115cf6e7a6..9ac08f18d8e9 100644 --- a/core/lib/eth_client/src/clients/http/query.rs +++ b/core/lib/eth_client/src/clients/http/query.rs @@ -175,6 +175,7 @@ where let failure_info = match result { Err(err) => { if let ClientError::Call(call_err) = err.as_ref() { + println!("{:?}", err); let revert_code = call_err.code().into(); let message_len = "execution reverted: ".len().min(call_err.message().len()); diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index 6438aeb7f55c..50f9b5a7288f 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -142,7 +142,8 @@ impl<'a> CommitBatchInfo<'a> { self.l1_batch_with_metadata .metadata .bootloader_initial_content_commitment - .unwrap() + // TODO(zk os): temporary default to test here + .unwrap_or_default() .as_bytes() .to_vec(), ), @@ -151,7 +152,8 @@ impl<'a> CommitBatchInfo<'a> { self.l1_batch_with_metadata .metadata .events_queue_commitment - .unwrap() + // TODO(zk os): temporary default to test here + .unwrap_or_default() .as_bytes() .to_vec(), ), diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index ac9ed4aaaadb..9d8179e900cc 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -568,7 +568,8 @@ impl EthTxAggregator { .header .pubdata_input .clone() - .unwrap() + // TODO(zk os): temporary mock value to test here + .unwrap_or(vec![0u8; 1]) .chunks(ZK_SYNC_BYTES_PER_BLOB) .map(|blob| { let kzg_info = KzgInfo::new(blob); diff --git a/core/node/eth_sender/src/eth_tx_manager.rs b/core/node/eth_sender/src/eth_tx_manager.rs index 6992bea1007c..567e640926ab 100644 --- a/core/node/eth_sender/src/eth_tx_manager.rs +++ b/core/node/eth_sender/src/eth_tx_manager.rs @@ -585,6 +585,7 @@ impl EthTxManager { tracing::debug!("No new {operator_type:?} transactions to send"); } for tx in new_eth_tx { + println!("got here {:?}", tx); let result = self.send_eth_tx(storage, &tx, 0, current_block).await; // If one of the transactions doesn't succeed, this means we should return // as new transactions have increasing nonces, so they will also result in an error diff --git a/etc/env/file_based/genesis.yaml b/etc/env/file_based/genesis.yaml index 9617b011d2c7..553554b6997a 100644 --- a/etc/env/file_based/genesis.yaml +++ b/etc/env/file_based/genesis.yaml @@ -1,6 +1,7 @@ -genesis_root: 0x9b30c35100835c0d811c9d385cc9804816dbceb4461b8fe4cbb8d0d5ecdacdec -genesis_rollup_leaf_index: 54 -genesis_batch_commitment: 0x043d432c1b668e54ada198d683516109e45e4f7f81f216ff4c4f469117732e50 +# TODO(zk os): genesis data taken from server +genesis_root: 0x98A48E4ED1736188384AE8A79DD21C4D6687E5FD22CA18148906D78736C0D86A +genesis_rollup_leaf_index: 1 +genesis_batch_commitment: 0xE9F37F8CC6768BC9A6A9E04749C26E80C7197BFD7BDF6B36483C2CC5FCEB2C82 genesis_protocol_version: 25 default_aa_hash: 0x01000523eadd3061f8e701acda503defb7ac3734ae3371e4daf7494651d8b523 bootloader_hash: 0x010008e15394cd83a8d463d61e00b4361afbc27c932b07a9d2100861b7d05e78 From 611889a63a8df13af15974c6fa46ef62ce0adf6b Mon Sep 17 00:00:00 2001 From: AntonD3 Date: Fri, 20 Dec 2024 17:43:52 +0100 Subject: [PATCH 5/5] Temporary fix to get deployment address in receipt --- .../lib/dal/src/models/storage_transaction.rs | 1 + core/lib/dal/src/transactions_web3_dal.rs | 60 ++++++++++++++++++- 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index cceebc85cf2b..056c8ec1f0d9 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -339,6 +339,7 @@ impl From<&StorageTransaction> for TransactionTimeRangeConstraint { #[derive(sqlx::FromRow)] pub(crate) struct StorageTransactionReceipt { pub error: Option, + pub nonce: Option, pub tx_format: Option, pub index_in_block: Option, pub block_hash: Vec, diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index d1c334e4f6b8..88eea83da7ee 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -12,6 +12,7 @@ use zksync_types::{ api, api::TransactionReceipt, block::build_bloom, Address, BloomInput, L2BlockNumber, L2ChainId, Transaction, CONTRACT_DEPLOYER_ADDRESS, H256, U256, }; +use zksync_types::web3::keccak256; use zksync_vm_interface::VmEvent; use crate::{ @@ -45,7 +46,7 @@ impl TransactionsWeb3Dal<'_, '_> { // Clarification for first part of the query(`WITH` clause): // Looking for `ContractDeployed` event in the events table // to find the address of deployed contract - let st_receipts: Vec = sqlx::query_as!( + let mut st_receipts: Vec = sqlx::query_as!( StorageTransactionReceipt, r#" WITH @@ -69,6 +70,7 @@ impl TransactionsWeb3Dal<'_, '_> { transactions.l1_batch_tx_index, transactions.miniblock_number AS "block_number!", transactions.error, + transactions.nonce, transactions.effective_gas_price, transactions.initiator_address, transactions.data -> 'to' AS "transfer_to?", @@ -102,6 +104,62 @@ impl TransactionsWeb3Dal<'_, '_> { let block_timestamps: Vec> = st_receipts.iter().map(|x| x.block_timestamp).collect(); + // TODO(zk os): temporary dirty hack to derive deployment address + fn derive_create_address(address: &[u8], nonce: u64) -> Vec { + let nonce_bytes = nonce.to_be_bytes(); + let skip_nonce_len = nonce_bytes.iter().take_while(|el| **el == 0).count(); + let nonce_len = 8 - skip_nonce_len; + + let rlp_encoded = if nonce_len == 1 && nonce_bytes[7] < 128 { + // we encode + // - 0xc0 + payload len + // - 0x80 + 20(address len) + // - address + // - one byte nonce + + let payload_len = 22; + + let mut encoding = Vec::with_capacity(23); + encoding.push(0xc0u8 + (payload_len as u8)); + encoding.push(0x80u8 + 20u8); + encoding.extend(address); + encoding.push(nonce_bytes[7]); + encoding + } else { + // we encode + // - 0xc0 + payload len + // - 0x80 + 20(address len) + // - address + // - 0x80 + length of nonce + // - nonce + + let payload_len = 22 + nonce_len; + + let mut encoding = Vec::with_capacity(23); + encoding.push(0xc0u8 + (payload_len as u8)); + encoding.push(0x80u8 + 20u8); + encoding.extend(address); + encoding.push(0x80u8 + (nonce_len as u8)); + encoding.extend(nonce_bytes); + encoding + }; + let mut hash = keccak256(rlp_encoded.as_slice()); + for byte in &mut hash[0..12] { + *byte = 0; + } + hash.to_vec() + } + + st_receipts.iter_mut().for_each(|receipt| { + let is_deployment_tx = match serde_json::from_value::>(receipt.execute_contract_address.clone().unwrap()).expect("invalid address value in the database") { + Some(to) => to == CONTRACT_DEPLOYER_ADDRESS, + None => true, + }; + if is_deployment_tx { + // nonce may not work for l1 tx + receipt.contract_address = Some(derive_create_address(receipt.initiator_address.as_slice(), receipt.nonce.unwrap_or_default() as u64)); + } + }); let mut receipts: Vec = st_receipts.into_iter().map(Into::into).collect();