Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Add Unit Tests for get_batch_pubdata endpoint #160

Open
wants to merge 15 commits into
base: feat_get_pubdata_endpoint
Choose a base branch
from
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ impl<'a> Tokenizable for CommitBatchInfoRollup<'a> {
Token::Tuple(encode_l1_commit(
&self.l1_batch_with_metadata.header,
&self.l1_batch_with_metadata.metadata,
Some(&self.l1_batch_with_metadata),
Some(self.l1_batch_with_metadata),
))
}
}
Expand Down
11 changes: 10 additions & 1 deletion core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -648,7 +648,16 @@ impl ZksNamespace {
.get_l1_batch_metadata(l1_batch_number)
.await
.map_err(|err| internal_error(METHOD_NAME, err))?
.map(|l1_batch_with_metadata| l1_batch_with_metadata.construct_pubdata().into());
.map(|l1_batch_with_metadata| {
let partial_pubdata = l1_batch_with_metadata.construct_pubdata();
// The encoding of empty pubdata results in a vector of zeroes. We should return
// `None in this case.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think you accidentally left out the character ` before None.

if partial_pubdata == vec![0u8; partial_pubdata.len()] {

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could we consider using iter().all() to compare if all elements of partial_pubdata are zeroes?
This approach can potentially optimize performance by avoiding unnecessary allocation of a new vector of zeroes, particularly beneficial for larger vectors. The suggested change would look like:

if partial_pubdata.iter().all(|&x| x == 0) {
    Bytes::default()
} else {
    partial_pubdata.into()
}

Your thoughts on this optimization would be appreciated. Thank you!

Bytes::default()
} else {
partial_pubdata.into()
}
});

method_latency.observe();
Ok(pubdata)
Expand Down
119 changes: 111 additions & 8 deletions core/lib/zksync_core/src/api_server/web3/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,27 +5,26 @@ use async_trait::async_trait;
use jsonrpsee::core::ClientError;
use multivm::zk_evm_latest::ethereum_types::U256;
use tokio::sync::watch;
use zksync_config::configs::{
api::Web3JsonRpcConfig,
chain::{NetworkConfig, StateKeeperConfig},
ContractsConfig,
};
use zksync_config::configs::{api::Web3JsonRpcConfig, chain::NetworkConfig, ContractsConfig};
use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool, StorageProcessor};
use zksync_health_check::CheckHealth;
use zksync_types::{
api,
block::MiniblockHeader,
block::{L1BatchHeader, MiniblockHeader},
commitment::{L1BatchMetadata, L1BatchWithMetadata},
fee::TransactionExecutionMetrics,
get_nonce_key,
l2::L2Tx,
l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log},
storage::get_code_key,
tokens::{TokenInfo, TokenMetadata},
tx::{
tx_execution_info::TxExecutionStatus, ExecutionMetrics, IncludedTxLocation,
TransactionExecutionResult,
},
utils::{storage_key_for_eth_balance, storage_key_for_standard_token_balance},
AccountTreeId, Address, L1BatchNumber, Nonce, StorageKey, StorageLog, VmEvent, H256, U64,
AccountTreeId, Address, Bytes, L1BatchNumber, Nonce, StorageKey, StorageLog, VmEvent, H256,
U64,
};
use zksync_utils::u256_to_h256;
use zksync_web3_decl::{
Expand Down Expand Up @@ -254,7 +253,7 @@ impl StorageInitialization {
async fn test_http_server(test: impl HttpTest) {
let pool = ConnectionPool::test_pool().await;
let network_config = NetworkConfig::for_tests();
let mut storage = pool.access_storage().await.unwrap();
let mut storage: StorageProcessor<'_> = pool.access_storage().await.unwrap();
test.storage_initialization()
.prepare_storage(&network_config, &mut storage)
.await
Expand Down Expand Up @@ -933,3 +932,107 @@ impl HttpTest for AllAccountBalancesTest {
async fn getting_all_account_balances() {
test_http_server(AllAccountBalancesTest).await;
}

#[derive(Debug)]
struct GetPubdataTest;

impl GetPubdataTest {
async fn insert_l1_batch_with_metadata(
storage: &mut StorageProcessor<'_>,
l1_batch_header: L1BatchHeader,
l1_batch_metadata: L1BatchMetadata,
) -> L1BatchHeader {
// Save L1 batch to the database
storage
.blocks_dal()
.insert_mock_l1_batch(&l1_batch_header)
.await
.unwrap();

storage
.blocks_dal()
.save_l1_batch_tree_data(l1_batch_header.number, &l1_batch_metadata.tree_data())
.await
.unwrap();
storage
.blocks_dal()
.save_l1_batch_commitment_artifacts(
l1_batch_header.number,
&l1_batch_metadata_to_commitment_artifacts(&l1_batch_metadata),
)
.await
.unwrap();

storage
.blocks_dal()
.insert_miniblock(&create_miniblock(l1_batch_header.number.0))
.await
.unwrap();

storage
.blocks_dal()
.mark_miniblocks_as_executed_in_l1_batch(l1_batch_header.number)
.await
.unwrap();

l1_batch_header
}

fn build_l2_to_l1_logs() -> Vec<UserL2ToL1Log> {
let l2_to_l1_log = L2ToL1Log {
shard_id: 1,
tx_number_in_block: 1,
is_service: false,
sender: Address::repeat_byte(1),
key: H256::repeat_byte(1),
value: H256::repeat_byte(1),
};
let user_l2_to_l1_log = UserL2ToL1Log(l2_to_l1_log);

vec![user_l2_to_l1_log]
}
}

#[async_trait]
impl HttpTest for GetPubdataTest {
async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> {
let l1_batch_number = L1BatchNumber(1);
// Assert pubdata is None if L1BatchWithMetadata is not found
let pubdata = client.get_batch_pubdata(l1_batch_number).await?;
assert_eq!(pubdata, None);

// Assert pubdata is expected if L1BatchWithMetadata is found
// Create L1 batch header and add pubdata (l2_to_l1_logs, l2_to_l1_messages)
let mut l1_batch_header: L1BatchHeader = create_l1_batch(l1_batch_number.0);
l1_batch_header.l2_to_l1_logs = Self::build_l2_to_l1_logs();
l1_batch_header.l2_to_l1_messages = vec![vec![1, 2, 3]];

// Create metadata
let l1_batch_metadata: L1BatchMetadata = create_l1_batch_metadata(l1_batch_number.0);

// Build L1BatchWithMetadata
let l1_batch_with_metadata = L1BatchWithMetadata {
header: l1_batch_header.clone(),
metadata: l1_batch_metadata.clone(),
raw_published_factory_deps: vec![],
};

let expected_pubdata: Bytes = l1_batch_with_metadata.construct_pubdata().into();

let mut storage = pool.access_storage().await?;
Self::insert_l1_batch_with_metadata(&mut storage, l1_batch_header, l1_batch_metadata).await;

// Call the endpoint
let pubdata = client.get_batch_pubdata(l1_batch_number).await?;

// Assert pubdata is equal to expected pubdata
assert_eq!(pubdata, Some(expected_pubdata));

Ok(())
}
}

#[tokio::test]
async fn get_batch_pubdata_impl() {
test_http_server(GetPubdataTest).await;
}
1 change: 1 addition & 0 deletions core/lib/zksync_core/src/api_server/web3/tests/vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
use std::sync::atomic::{AtomicU32, Ordering};

use multivm::interface::{ExecutionResult, VmRevertReason};
use zksync_config::configs::chain::StateKeeperConfig;
use zksync_types::{
get_intrinsic_constants, transaction_request::CallRequest, L2ChainId, PackedEthSignature, U256,
};
Expand Down
Loading