diff --git a/Cargo.lock b/Cargo.lock index 7e0f7136da1e..98b751295415 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -128,9 +128,9 @@ dependencies = [ [[package]] name = "alloy-chains" -version = "0.1.46" +version = "0.1.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836cf02383d9ebb35502d379bcd1ae803155094077eaab9c29131d888cd5fa3e" +checksum = "18c5c520273946ecf715c0010b4e3503d7eba9893cd9ce6b7fff5654c4a3c470" dependencies = [ "alloy-primitives", "num_enum 0.7.3", @@ -174,9 +174,9 @@ dependencies = [ [[package]] name = "alloy-core" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b72bf30967a232bec83809bea1623031f6285a013096229330c68c406192a4ca" +checksum = "47ef9e96462d0b9fee9008c53c1f3d017b9498fcdef3ad8d728db98afef47955" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -187,9 +187,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5228b189b18b85761340dc9eaac0141148a8503657b36f9bc3a869413d987ca" +checksum = "85132f2698b520fab3f54beed55a44389f7006a7b557a0261e1e69439dcc1572" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -256,9 +256,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31a0f0d51db8a1a30a4d98a9f90e090a94c8f44cb4d9eafc7e03aa6d00aae984" +checksum = "ded610181f3dad5810f6ff12d1a99994cf9b42d2fcb7709029352398a5da5ae6" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -315,9 +315,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8edae627382349b56cd6a7a2106f4fd69b243a9233e560c55c2e03cabb7e1d3c" +checksum = "fd58d377699e6cfeab52c4a9d28bdc4ef37e2bd235ff2db525071fe37a2e9af5" dependencies = [ "alloy-rlp", "bytes", @@ -533,9 +533,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841eabaa4710f719fddbc24c95d386eae313f07e6da4babc25830ee37945be0c" +checksum = "8a1b42ac8f45e2f49f4bcdd72cbfde0bb148f5481d403774ffa546e48b83efc1" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", @@ -547,9 +547,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-expander" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6672337f19d837b9f7073c45853aeb528ed9f7dd6a4154ce683e9e5cb7794014" +checksum = "06318f1778e57f36333e850aa71bd1bb5e560c10279e236622faae0470c50412" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", @@ -566,9 +566,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dff37dd20bfb118b777c96eda83b2067f4226d2644c5cfa00187b3bc01770ba" +checksum = "eaebb9b0ad61a41345a22c9279975c0cdd231b97947b10d7aad1cf0a7181e4a5" dependencies = [ "alloy-json-abi", "const-hex", @@ -583,9 +583,9 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b853d42292dbb159671a3edae3b2750277ff130f32b726fe07dc2b17aa6f2b5" +checksum = "12c71028bfbfec210e24106a542aad3def7caf1a70e2c05710e92a98481980d3" dependencies = [ "serde", "winnow 0.6.20", @@ -593,9 +593,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa828bb1b9a6dc52208fbb18084fb9ce2c30facc2bfda6a5d922349b4990354f" +checksum = "374d7fb042d68ddfe79ccb23359de3007f6d4d53c13f703b64fb0db422132111" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -4132,6 +4132,26 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "get_all_blobs" +version = "0.1.0" +dependencies = [ + "alloy", + "anyhow", + "axum 0.7.7", + "futures 0.3.31", + "hex", + "kzgpad-rs", + "prost 0.13.3", + "reqwest 0.12.9", + "rlp", + "rustls 0.23.16", + "serde", + "serde_json", + "tokio", + "tonic 0.12.3", +] + [[package]] name = "getrandom" version = "0.2.15" @@ -8050,6 +8070,19 @@ dependencies = [ "security-framework", ] +[[package]] +name = "rustls-native-certs" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.2.0", + "rustls-pki-types", + "schannel", + "security-framework", +] + [[package]] name = "rustls-pemfile" version = "1.0.4" @@ -9726,9 +9759,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16320d4a2021ba1a32470b3759676114a918885e9800e68ad60f2c67969fba62" +checksum = "edf42e81491fb8871b74df3d222c64ae8cbc1269ea509fa768a3ed3e1b0ac8cb" dependencies = [ "paste", "proc-macro2 1.0.89", @@ -10313,8 +10346,11 @@ dependencies = [ "percent-encoding", "pin-project", "prost 0.13.3", + "rustls-native-certs 0.8.0", + "rustls-pemfile 2.2.0", "socket2", "tokio", + "tokio-rustls 0.26.0", "tokio-stream", "tower 0.4.13", "tower-layer", diff --git a/Cargo.toml b/Cargo.toml index 87e0de13129f..0be6954d525c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,6 +79,7 @@ members = [ "core/tests/test_account", "core/tests/loadnext", "core/tests/vm-benchmark", + "get_all_blobs", ] resolver = "2" diff --git a/contracts b/contracts index 43cc01e430dd..64ed0ab97ff4 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 43cc01e430dd24e35f80a5ae9b1ee708c1075df2 +Subproject commit 64ed0ab97ff4e9d2a265522025bdb8e1a4a4d2eb diff --git a/core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json b/core/lib/dal/.sqlx/query-1df6a0c44dafb0d8932e9c9162b634d167462ce5de6a9c240f990856be8c4c88.json similarity index 88% rename from core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json rename to core/lib/dal/.sqlx/query-1df6a0c44dafb0d8932e9c9162b634d167462ce5de6a9c240f990856be8c4c88.json index f4e08abe31c5..294799d4906c 100644 --- a/core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json +++ b/core/lib/dal/.sqlx/query-1df6a0c44dafb0d8932e9c9162b634d167462ce5de6a9c240f990856be8c4c88.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n is_sealed\n AND number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -162,6 +162,11 @@ "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" + }, + { + "ordinal": 32, + "name": "blob_id?", + "type_info": "Text" } ], "parameters": { @@ -201,8 +206,9 @@ true, true, true, - true + true, + false ] }, - "hash": "77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9" + "hash": "1df6a0c44dafb0d8932e9c9162b634d167462ce5de6a9c240f990856be8c4c88" } diff --git a/core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json b/core/lib/dal/.sqlx/query-47c31073d726572d282232bf550f900a8e5e705543f529e48d9fe96c35ddde75.json similarity index 80% rename from core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json rename to core/lib/dal/.sqlx/query-47c31073d726572d282232bf550f900a8e5e705543f529e48d9fe96c35ddde75.json index 9a93ba45978e..64dbd1dcd019 100644 --- a/core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json +++ b/core/lib/dal/.sqlx/query-47c31073d726572d282232bf550f900a8e5e705543f529e48d9fe96c35ddde75.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", "describe": { "columns": [ { @@ -162,6 +162,11 @@ "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" + }, + { + "ordinal": 32, + "name": "blob_id?", + "type_info": "Text" } ], "parameters": { @@ -202,8 +207,9 @@ true, true, true, - true + true, + false ] }, - "hash": "a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789" + "hash": "47c31073d726572d282232bf550f900a8e5e705543f529e48d9fe96c35ddde75" } diff --git a/core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json b/core/lib/dal/.sqlx/query-57686ab3e929331f7efafff78fa48d3973cf8ce53871a2fab4febac60bb56583.json similarity index 85% rename from core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json rename to core/lib/dal/.sqlx/query-57686ab3e929331f7efafff78fa48d3973cf8ce53871a2fab4febac60bb56583.json index f97ea8a6ccd5..f310b82954da 100644 --- a/core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json +++ b/core/lib/dal/.sqlx/query-57686ab3e929331f7efafff78fa48d3973cf8ce53871a2fab4febac60bb56583.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -162,6 +162,11 @@ "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" + }, + { + "ordinal": 32, + "name": "blob_id?", + "type_info": "Text" } ], "parameters": { @@ -201,8 +206,9 @@ true, true, true, - true + true, + false ] }, - "hash": "c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b" + "hash": "57686ab3e929331f7efafff78fa48d3973cf8ce53871a2fab4febac60bb56583" } diff --git a/core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json b/core/lib/dal/.sqlx/query-6d4746aab463789bdd3ccb251f6b6cc4a3da487ee4a928de1513b13b7b918575.json similarity index 86% rename from core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json rename to core/lib/dal/.sqlx/query-6d4746aab463789bdd3ccb251f6b6cc4a3da487ee4a928de1513b13b7b918575.json index 48adcd412676..2dd50bd6b4d9 100644 --- a/core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json +++ b/core/lib/dal/.sqlx/query-6d4746aab463789bdd3ccb251f6b6cc4a3da487ee4a928de1513b13b7b918575.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -162,6 +162,11 @@ "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" + }, + { + "ordinal": 32, + "name": "blob_id?", + "type_info": "Text" } ], "parameters": { @@ -199,8 +204,9 @@ true, true, true, - true + true, + false ] }, - "hash": "1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7" + "hash": "6d4746aab463789bdd3ccb251f6b6cc4a3da487ee4a928de1513b13b7b918575" } diff --git a/core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json b/core/lib/dal/.sqlx/query-8a3f130f3b1309b30b3f23bc3cff186551207484769344d211d6c9d2fc452ef3.json similarity index 81% rename from core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json rename to core/lib/dal/.sqlx/query-8a3f130f3b1309b30b3f23bc3cff186551207484769344d211d6c9d2fc452ef3.json index 8a68b1a9b9bd..b95fb8c82321 100644 --- a/core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json +++ b/core/lib/dal/.sqlx/query-8a3f130f3b1309b30b3f23bc3cff186551207484769344d211d6c9d2fc452ef3.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -162,6 +162,11 @@ "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" + }, + { + "ordinal": 32, + "name": "blob_id?", + "type_info": "Text" } ], "parameters": { @@ -204,8 +209,9 @@ true, true, true, - true + true, + false ] }, - "hash": "b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd" + "hash": "8a3f130f3b1309b30b3f23bc3cff186551207484769344d211d6c9d2fc452ef3" } diff --git a/core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json b/core/lib/dal/.sqlx/query-96de8839bee7d58e2807f98101271fca0e375f1309b34ce09a5beb8ed688c3ef.json similarity index 94% rename from core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json rename to core/lib/dal/.sqlx/query-96de8839bee7d58e2807f98101271fca0e375f1309b34ce09a5beb8ed688c3ef.json index 66d3e18075bf..e45f0ceb6ef9 100644 --- a/core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json +++ b/core/lib/dal/.sqlx/query-96de8839bee7d58e2807f98101271fca0e375f1309b34ce09a5beb8ed688c3ef.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -162,6 +162,11 @@ "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" + }, + { + "ordinal": 32, + "name": "blob_id?", + "type_info": "Text" } ], "parameters": { @@ -203,8 +208,9 @@ true, true, true, - true + true, + false ] }, - "hash": "4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970" + "hash": "96de8839bee7d58e2807f98101271fca0e375f1309b34ce09a5beb8ed688c3ef" } diff --git a/core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json b/core/lib/dal/.sqlx/query-af2bab04895e886343f80077af31afd7240ef53d95408a0d38bff65f786b038a.json similarity index 78% rename from core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json rename to core/lib/dal/.sqlx/query-af2bab04895e886343f80077af31afd7240ef53d95408a0d38bff65f786b038a.json index 11bff1102932..63b5a6501105 100644 --- a/core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json +++ b/core/lib/dal/.sqlx/query-af2bab04895e886343f80077af31afd7240ef53d95408a0d38bff65f786b038a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -162,6 +162,11 @@ "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" + }, + { + "ordinal": 32, + "name": "blob_id?", + "type_info": "Text" } ], "parameters": { @@ -205,8 +210,9 @@ true, true, true, - true + true, + false ] }, - "hash": "45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746" + "hash": "af2bab04895e886343f80077af31afd7240ef53d95408a0d38bff65f786b038a" } diff --git a/core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json b/core/lib/dal/.sqlx/query-fa52ecb8ee44f02f8d5a2061266c277d67f184d29082a03bc70b9d95700e8c05.json similarity index 86% rename from core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json rename to core/lib/dal/.sqlx/query-fa52ecb8ee44f02f8d5a2061266c277d67f184d29082a03bc70b9d95700e8c05.json index dfdb4b6c82e7..e2c6df469102 100644 --- a/core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json +++ b/core/lib/dal/.sqlx/query-fa52ecb8ee44f02f8d5a2061266c277d67f184d29082a03bc70b9d95700e8c05.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -162,6 +162,11 @@ "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" + }, + { + "ordinal": 32, + "name": "blob_id?", + "type_info": "Text" } ], "parameters": { @@ -201,8 +206,9 @@ true, true, true, - true + true, + false ] }, - "hash": "62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37" + "hash": "fa52ecb8ee44f02f8d5a2061266c277d67f184d29082a03bc70b9d95700e8c05" } diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 943aa12caf75..697514535193 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -348,7 +348,8 @@ impl BlocksDal<'_, '_> { aggregation_root, local_root, state_diff_hash, - data_availability.inclusion_data + data_availability.inclusion_data, + data_availability.blob_id AS "blob_id?" FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1219,7 +1220,8 @@ impl BlocksDal<'_, '_> { aggregation_root, local_root, state_diff_hash, - data_availability.inclusion_data + data_availability.inclusion_data, + data_availability.blob_id AS "blob_id?" FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1414,7 +1416,8 @@ impl BlocksDal<'_, '_> { aggregation_root, local_root, state_diff_hash, - data_availability.inclusion_data + data_availability.inclusion_data, + data_availability.blob_id AS "blob_id?" FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1503,7 +1506,8 @@ impl BlocksDal<'_, '_> { aggregation_root, local_root, state_diff_hash, - data_availability.inclusion_data + data_availability.inclusion_data, + data_availability.blob_id AS "blob_id?" FROM ( SELECT @@ -1583,7 +1587,8 @@ impl BlocksDal<'_, '_> { aggregation_root, local_root, state_diff_hash, - data_availability.inclusion_data + data_availability.inclusion_data, + data_availability.blob_id AS "blob_id?" FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1719,7 +1724,8 @@ impl BlocksDal<'_, '_> { aggregation_root, local_root, state_diff_hash, - data_availability.inclusion_data + data_availability.inclusion_data, + data_availability.blob_id AS "blob_id?" FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1792,7 +1798,8 @@ impl BlocksDal<'_, '_> { aggregation_root, local_root, state_diff_hash, - data_availability.inclusion_data + data_availability.inclusion_data, + data_availability.blob_id AS "blob_id?" FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1879,7 +1886,8 @@ impl BlocksDal<'_, '_> { aggregation_root, local_root, state_diff_hash, - data_availability.inclusion_data + data_availability.inclusion_data, + data_availability.blob_id AS "blob_id?" FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 159ed71cc3e9..95625c8b2955 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -159,6 +159,7 @@ pub(crate) struct StorageL1Batch { pub local_root: Option>, pub state_diff_hash: Option>, pub inclusion_data: Option>, + pub blob_id: Option, } impl StorageL1Batch { @@ -271,6 +272,7 @@ impl TryFrom for L1BatchMetadata { local_root: batch.local_root.map(|v| H256::from_slice(&v)), aggregation_root: batch.aggregation_root.map(|v| H256::from_slice(&v)), da_inclusion_data: batch.inclusion_data, + da_blob_id: batch.blob_id.map(|s| s.into_bytes()), }) } } diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index 6438aeb7f55c..3c8a3b26b935 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -216,7 +216,16 @@ impl Tokenizable for CommitBatchInfo<'_> { panic!("Custom pubdata DA is incompatible with Rollup mode") } (L1BatchCommitmentMode::Validium, PubdataSendingMode::Custom) => { - vec![PUBDATA_SOURCE_CUSTOM] + let mut operator_da_input = vec![PUBDATA_SOURCE_CUSTOM]; + operator_da_input.extend( + &self + .l1_batch_with_metadata + .metadata + .da_blob_id + .clone() + .unwrap_or_default(), + ); + operator_da_input } ( diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index 40532a1e5899..0dbea4742c3d 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -103,6 +103,8 @@ pub struct L1BatchMetadata { pub aggregation_root: Option, /// Data Availability inclusion proof, that has to be verified on the settlement layer. pub da_inclusion_data: Option>, + /// Data Availability blob id, persisted in L1 so it can be used for chain reconstruction. + pub da_blob_id: Option>, } impl L1BatchMetadata { diff --git a/core/node/da_clients/src/eigen/eigenda-integration.md b/core/node/da_clients/src/eigen/eigenda-integration.md index 985881d3f6c0..30a2667554bc 100644 --- a/core/node/da_clients/src/eigen/eigenda-integration.md +++ b/core/node/da_clients/src/eigen/eigenda-integration.md @@ -168,6 +168,16 @@ docker ps --filter "label=com.docker.compose.project=era-observability" -q | xar zkstack server --chain eigen_da ``` +### Get Blobs from L1 + +In order to retrieve the blobs sent to EigenDA whose commitments are stored on L1 in order to be able to rebuild the +chain from them run: + +```bash +cd get_all_blobs +cargo run +``` + ### Testing Modify the following flag in `core/lib/config/src/configs/da_dispatcher.rs` (then restart the server) diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index 8e5032a69cfc..6a21767f4ea3 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -130,6 +130,7 @@ pub(crate) fn default_l1_batch_metadata() -> L1BatchMetadata { local_root: Some(H256::default()), aggregation_root: Some(H256::default()), da_inclusion_data: Some(vec![]), + da_blob_id: Some(vec![]), } } diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index 2b446fff12c5..e293849f698d 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -102,6 +102,7 @@ pub fn create_l1_batch_metadata(number: u32) -> L1BatchMetadata { local_root: Some(H256::zero()), aggregation_root: Some(H256::zero()), da_inclusion_data: Some(vec![]), + da_blob_id: Some(vec![]), } } diff --git a/get_all_blobs/.gitignore b/get_all_blobs/.gitignore new file mode 100644 index 000000000000..a1ee59a11803 --- /dev/null +++ b/get_all_blobs/.gitignore @@ -0,0 +1 @@ +blob_data.json diff --git a/get_all_blobs/Cargo.toml b/get_all_blobs/Cargo.toml new file mode 100644 index 000000000000..d629650f6ff1 --- /dev/null +++ b/get_all_blobs/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "get_all_blobs" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +anyhow.workspace = true +tokio = { version = "1" , features = ["full"] } +axum.workspace = true +rustls.workspace = true +rlp.workspace = true +hex.workspace = true + +reqwest.workspace = true +serde = { version = "1.0", features = ["derive"] } +serde_json.workspace = true + +tonic = { version = "0.12.1", features = ["tls", "channel", "tls-roots"]} +prost = "0.13.1" +kzgpad-rs = { git = "https://github.com/Layr-Labs/kzgpad-rs.git", tag = "v0.1.0" } +alloy = { version = "0.3", features = ["full"] } +futures = "0.3" diff --git a/get_all_blobs/abi/commitBatchesSharedBridge.json b/get_all_blobs/abi/commitBatchesSharedBridge.json new file mode 100644 index 000000000000..877ce399c1c6 --- /dev/null +++ b/get_all_blobs/abi/commitBatchesSharedBridge.json @@ -0,0 +1,119 @@ +[ + { + "inputs": [ + { + "internalType": "uint256", + "name": "_chainId", + "type": "uint256" + }, + { + "components": [ + { + "internalType": "uint64", + "name": "batchNumber", + "type": "uint64" + }, + { + "internalType": "bytes32", + "name": "batchHash", + "type": "bytes32" + }, + { + "internalType": "uint64", + "name": "indexRepeatedStorageChanges", + "type": "uint64" + }, + { + "internalType": "uint256", + "name": "numberOfLayer1Txs", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "priorityOperationsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "l2LogsTreeRoot", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "timestamp", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "commitment", + "type": "bytes32" + } + ], + "internalType": "struct IExecutor.StoredBatchInfo", + "name": "", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "uint64", + "name": "batchNumber", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "timestamp", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "indexRepeatedStorageChanges", + "type": "uint64" + }, + { + "internalType": "bytes32", + "name": "newStateRoot", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "numberOfLayer1Txs", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "priorityOperationsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "bootloaderHeapInitialContentsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "eventsQueueStateHash", + "type": "bytes32" + }, + { + "internalType": "bytes", + "name": "systemLogs", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "pubdataCommitments", + "type": "bytes" + } + ], + "internalType": "struct IExecutor.CommitBatchInfo[]", + "name": "_newBatchesData", + "type": "tuple[]" + } + ], + "name": "commitBatchesSharedBridge", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +] diff --git a/get_all_blobs/src/blob_info.rs b/get_all_blobs/src/blob_info.rs new file mode 100644 index 000000000000..05564e750bdd --- /dev/null +++ b/get_all_blobs/src/blob_info.rs @@ -0,0 +1,505 @@ +use std::fmt; + +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; + +use crate::generated::{ + common::G1Commitment as DisperserG1Commitment, + disperser::{ + BatchHeader as DisperserBatchHeader, BatchMetadata as DisperserBatchMetadata, + BlobHeader as DisperserBlobHeader, BlobInfo as DisperserBlobInfo, + BlobQuorumParam as DisperserBlobQuorumParam, + BlobVerificationProof as DisperserBlobVerificationProof, + }, +}; + +#[derive(Debug)] +pub enum ConversionError { + NotPresentError, +} + +impl fmt::Display for ConversionError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ConversionError::NotPresentError => write!(f, "Failed to convert BlobInfo"), + } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct G1Commitment { + pub x: Vec, + pub y: Vec, +} + +impl G1Commitment { + pub fn into_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(&self.x.len().to_be_bytes()); + bytes.extend(&self.x); + bytes.extend(&self.y.len().to_be_bytes()); + bytes.extend(&self.y); + + bytes + } +} + +impl Decodable for G1Commitment { + fn decode(rlp: &Rlp) -> Result { + let x: Vec = rlp.val_at(0)?; // Decode first element as Vec + let y: Vec = rlp.val_at(1)?; // Decode second element as Vec + + Ok(G1Commitment { x, y }) + } +} + +impl Encodable for G1Commitment { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2); + s.append(&self.x); + s.append(&self.y); + } +} + +impl From for G1Commitment { + fn from(value: DisperserG1Commitment) -> Self { + Self { + x: value.x, + y: value.y, + } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BlobQuorumParam { + pub quorum_number: u32, + pub adversary_threshold_percentage: u32, + pub confirmation_threshold_percentage: u32, + pub chunk_length: u32, +} + +impl BlobQuorumParam { + pub fn into_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(&self.quorum_number.to_be_bytes()); + bytes.extend(&self.adversary_threshold_percentage.to_be_bytes()); + bytes.extend(&self.confirmation_threshold_percentage.to_be_bytes()); + bytes.extend(&self.chunk_length.to_be_bytes()); + + bytes + } +} + +impl Decodable for BlobQuorumParam { + fn decode(rlp: &Rlp) -> Result { + Ok(BlobQuorumParam { + quorum_number: rlp.val_at(0)?, + adversary_threshold_percentage: rlp.val_at(1)?, + confirmation_threshold_percentage: rlp.val_at(2)?, + chunk_length: rlp.val_at(3)?, + }) + } +} + +impl Encodable for BlobQuorumParam { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(4); + s.append(&self.quorum_number); + s.append(&self.adversary_threshold_percentage); + s.append(&self.confirmation_threshold_percentage); + s.append(&self.chunk_length); + } +} + +impl From for BlobQuorumParam { + fn from(value: DisperserBlobQuorumParam) -> Self { + Self { + quorum_number: value.quorum_number, + adversary_threshold_percentage: value.adversary_threshold_percentage, + confirmation_threshold_percentage: value.confirmation_threshold_percentage, + chunk_length: value.chunk_length, + } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BlobHeader { + pub commitment: G1Commitment, + pub data_length: u32, + pub blob_quorum_params: Vec, +} + +impl BlobHeader { + pub fn into_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(self.commitment.into_bytes()); + bytes.extend(&self.data_length.to_be_bytes()); + bytes.extend(&self.blob_quorum_params.len().to_be_bytes()); + + for quorum in &self.blob_quorum_params { + bytes.extend(quorum.into_bytes()); + } + + bytes + } +} + +impl Decodable for BlobHeader { + fn decode(rlp: &Rlp) -> Result { + let commitment: G1Commitment = rlp.val_at(0)?; + let data_length: u32 = rlp.val_at(1)?; + let blob_quorum_params: Vec = rlp.list_at(2)?; + + Ok(BlobHeader { + commitment, + data_length, + blob_quorum_params, + }) + } +} + +impl Encodable for BlobHeader { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3); + s.append(&self.commitment); + s.append(&self.data_length); + s.append_list(&self.blob_quorum_params); + } +} + +impl TryFrom for BlobHeader { + type Error = ConversionError; + fn try_from(value: DisperserBlobHeader) -> Result { + if value.commitment.is_none() { + return Err(ConversionError::NotPresentError); + } + let blob_quorum_params: Vec = value + .blob_quorum_params + .iter() + .map(|param| BlobQuorumParam::from(param.clone())) + .collect(); + let blob_quorum_params = blob_quorum_params; + Ok(Self { + commitment: G1Commitment::from(value.commitment.unwrap()), + data_length: value.data_length, + blob_quorum_params, + }) + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BatchHeader { + pub batch_root: Vec, + pub quorum_numbers: Vec, + pub quorum_signed_percentages: Vec, + pub reference_block_number: u32, +} + +impl BatchHeader { + pub fn into_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(&self.batch_root.len().to_be_bytes()); + bytes.extend(&self.batch_root); + bytes.extend(&self.quorum_numbers.len().to_be_bytes()); + bytes.extend(&self.quorum_numbers); + bytes.extend(&self.quorum_signed_percentages.len().to_be_bytes()); + bytes.extend(&self.quorum_signed_percentages); + bytes.extend(&self.reference_block_number.to_be_bytes()); + + bytes + } +} + +impl Decodable for BatchHeader { + fn decode(rlp: &Rlp) -> Result { + Ok(BatchHeader { + batch_root: rlp.val_at(0)?, + quorum_numbers: rlp.val_at(1)?, + quorum_signed_percentages: rlp.val_at(2)?, + reference_block_number: rlp.val_at(3)?, + }) + } +} + +impl Encodable for BatchHeader { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(4); + s.append(&self.batch_root); + s.append(&self.quorum_numbers); + s.append(&self.quorum_signed_percentages); + s.append(&self.reference_block_number); + } +} + +impl From for BatchHeader { + fn from(value: DisperserBatchHeader) -> Self { + Self { + batch_root: value.batch_root, + quorum_numbers: value.quorum_numbers, + quorum_signed_percentages: value.quorum_signed_percentages, + reference_block_number: value.reference_block_number, + } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BatchMetadata { + pub batch_header: BatchHeader, + pub signatory_record_hash: Vec, + pub fee: Vec, + pub confirmation_block_number: u32, + pub batch_header_hash: Vec, +} + +impl BatchMetadata { + pub fn into_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(self.batch_header.into_bytes()); + bytes.extend(&self.signatory_record_hash); + bytes.extend(&self.confirmation_block_number.to_be_bytes()); + + bytes + } +} + +impl Decodable for BatchMetadata { + fn decode(rlp: &Rlp) -> Result { + let batch_header: BatchHeader = rlp.val_at(0)?; + + Ok(BatchMetadata { + batch_header, + signatory_record_hash: rlp.val_at(1)?, + fee: rlp.val_at(2)?, + confirmation_block_number: rlp.val_at(3)?, + batch_header_hash: rlp.val_at(4)?, + }) + } +} + +impl Encodable for BatchMetadata { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(5); + s.append(&self.batch_header); + s.append(&self.signatory_record_hash); + s.append(&self.fee); + s.append(&self.confirmation_block_number); + s.append(&self.batch_header_hash); + } +} + +impl TryFrom for BatchMetadata { + type Error = ConversionError; + fn try_from(value: DisperserBatchMetadata) -> Result { + if value.batch_header.is_none() { + return Err(ConversionError::NotPresentError); + } + Ok(Self { + batch_header: BatchHeader::from(value.batch_header.unwrap()), + signatory_record_hash: value.signatory_record_hash, + fee: value.fee, + confirmation_block_number: value.confirmation_block_number, + batch_header_hash: value.batch_header_hash, + }) + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BlobVerificationProof { + pub batch_id: u32, + pub blob_index: u32, + pub batch_medatada: BatchMetadata, + pub inclusion_proof: Vec, + pub quorum_indexes: Vec, +} + +impl BlobVerificationProof { + pub fn into_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(&self.batch_id.to_be_bytes()); + bytes.extend(&self.blob_index.to_be_bytes()); + bytes.extend(self.batch_medatada.into_bytes()); + bytes.extend(&self.inclusion_proof.len().to_be_bytes()); + bytes.extend(&self.inclusion_proof); + bytes.extend(&self.quorum_indexes.len().to_be_bytes()); + bytes.extend(&self.quorum_indexes); + + bytes + } +} + +impl Decodable for BlobVerificationProof { + fn decode(rlp: &Rlp) -> Result { + Ok(BlobVerificationProof { + batch_id: rlp.val_at(0)?, + blob_index: rlp.val_at(1)?, + batch_medatada: rlp.val_at(2)?, + inclusion_proof: rlp.val_at(3)?, + quorum_indexes: rlp.val_at(4)?, + }) + } +} + +impl Encodable for BlobVerificationProof { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(5); + s.append(&self.batch_id); + s.append(&self.blob_index); + s.append(&self.batch_medatada); + s.append(&self.inclusion_proof); + s.append(&self.quorum_indexes); + } +} + +impl TryFrom for BlobVerificationProof { + type Error = ConversionError; + fn try_from(value: DisperserBlobVerificationProof) -> Result { + if value.batch_metadata.is_none() { + return Err(ConversionError::NotPresentError); + } + Ok(Self { + batch_id: value.batch_id, + blob_index: value.blob_index, + batch_medatada: BatchMetadata::try_from(value.batch_metadata.unwrap())?, + inclusion_proof: value.inclusion_proof, + quorum_indexes: value.quorum_indexes, + }) + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BlobInfo { + pub blob_header: BlobHeader, + pub blob_verification_proof: BlobVerificationProof, +} + +impl BlobInfo { + pub fn into_bytes(&self) -> Vec { + let mut bytes = vec![]; + let blob_header_bytes = self.blob_header.into_bytes(); + bytes.extend(blob_header_bytes.len().to_be_bytes()); + bytes.extend(blob_header_bytes); + let blob_verification_proof_bytes = self.blob_verification_proof.into_bytes(); + bytes.extend(blob_verification_proof_bytes); + bytes + } +} + +impl Decodable for BlobInfo { + fn decode(rlp: &Rlp) -> Result { + let blob_header: BlobHeader = rlp.val_at(0)?; + let blob_verification_proof: BlobVerificationProof = rlp.val_at(1)?; + + Ok(BlobInfo { + blob_header, + blob_verification_proof, + }) + } +} + +impl Encodable for BlobInfo { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2); + s.append(&self.blob_header); + s.append(&self.blob_verification_proof); + } +} + +impl TryFrom for BlobInfo { + type Error = ConversionError; + fn try_from(value: DisperserBlobInfo) -> Result { + if value.blob_header.is_none() || value.blob_verification_proof.is_none() { + return Err(ConversionError::NotPresentError); + } + Ok(Self { + blob_header: BlobHeader::try_from(value.blob_header.unwrap())?, + blob_verification_proof: BlobVerificationProof::try_from( + value.blob_verification_proof.unwrap(), + )?, + }) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_blob_info_encoding_and_decoding() { + let blob_info = BlobInfo { + blob_header: BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + }, + data_length: 4, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 0, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + BlobQuorumParam { + quorum_number: 1, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + ], + }, + blob_verification_proof: BlobVerificationProof { + batch_id: 66507, + blob_index: 92, + batch_medatada: BatchMetadata { + batch_header: BatchHeader { + batch_root: vec![ + 179, 187, 53, 98, 192, 80, 151, 28, 125, 192, 115, 29, 129, 238, 216, + 8, 213, 210, 203, 143, 181, 19, 146, 113, 98, 131, 39, 238, 149, 248, + 211, 43, + ], + quorum_numbers: vec![0, 1], + quorum_signed_percentages: vec![100, 100], + reference_block_number: 2624794, + }, + signatory_record_hash: vec![ + 172, 32, 172, 142, 197, 52, 84, 143, 120, 26, 190, 9, 143, 217, 62, 19, 17, + 107, 105, 67, 203, 5, 172, 249, 6, 60, 105, 240, 134, 34, 66, 133, + ], + fee: vec![0], + confirmation_block_number: 2624876, + batch_header_hash: vec![ + 122, 115, 2, 85, 233, 75, 121, 85, 51, 81, 248, 170, 198, 252, 42, 16, 1, + 146, 96, 218, 159, 44, 41, 40, 94, 247, 147, 11, 255, 68, 40, 177, + ], + }, + inclusion_proof: vec![ + 203, 160, 237, 48, 117, 255, 75, 254, 117, 144, 164, 77, 29, 146, 36, 48, 190, + 140, 50, 100, 144, 237, 125, 125, 75, 54, 210, 247, 147, 23, 48, 189, 120, 4, + 125, 123, 195, 244, 207, 239, 145, 109, 0, 21, 11, 162, 109, 79, 192, 100, 138, + 157, 203, 22, 17, 114, 234, 72, 174, 231, 209, 133, 99, 118, 201, 160, 137, + 128, 112, 84, 34, 136, 174, 139, 96, 26, 246, 148, 134, 52, 200, 229, 160, 145, + 5, 120, 18, 187, 51, 11, 109, 91, 237, 171, 215, 207, 90, 95, 146, 54, 135, + 166, 66, 157, 255, 237, 69, 183, 141, 45, 162, 145, 71, 16, 87, 184, 120, 84, + 156, 220, 159, 4, 99, 48, 191, 203, 136, 112, 127, 226, 192, 184, 110, 6, 177, + 182, 109, 207, 197, 239, 161, 132, 17, 89, 56, 137, 205, 202, 101, 97, 60, 162, + 253, 23, 169, 75, 236, 211, 126, 121, 132, 191, 68, 167, 200, 16, 154, 149, + 202, 197, 7, 191, 26, 8, 67, 3, 37, 137, 16, 153, 30, 209, 238, 53, 233, 148, + 198, 253, 94, 216, 73, 25, 190, 205, 132, 208, 255, 219, 170, 98, 17, 160, 179, + 183, 200, 17, 99, 36, 130, 216, 223, 72, 222, 250, 73, 78, 79, 72, 253, 105, + 245, 84, 244, 196, + ], + quorum_indexes: vec![0, 1], + }, + }; + + let encoded_blob_info = rlp::encode(&blob_info); + let decoded_blob_info: BlobInfo = rlp::decode(&encoded_blob_info).unwrap(); + + assert_eq!(blob_info, decoded_blob_info); + } +} diff --git a/get_all_blobs/src/client.rs b/get_all_blobs/src/client.rs new file mode 100644 index 000000000000..2b95d72b9400 --- /dev/null +++ b/get_all_blobs/src/client.rs @@ -0,0 +1,52 @@ +use std::str::FromStr; + +use tonic::transport::{Channel, ClientTlsConfig, Endpoint}; + +use crate::{ + blob_info::BlobInfo, + generated::{disperser, disperser::disperser_client::DisperserClient}, +}; + +#[derive(Debug, Clone)] +pub struct EigenClientRetriever { + client: DisperserClient, +} + +impl EigenClientRetriever { + pub async fn new(disperser_rpc: &str) -> anyhow::Result { + let endpoint = Endpoint::from_str(disperser_rpc)?.tls_config(ClientTlsConfig::new())?; + let client = DisperserClient::connect(endpoint) + .await + .map_err(|e| anyhow::anyhow!("Failed to connect to Disperser server: {}", e))?; + + Ok(EigenClientRetriever { client }) + } + + pub async fn get_blob_data(&self, blob_id: &str) -> anyhow::Result>> { + let commit = hex::decode(blob_id)?; + + let blob_info: BlobInfo = rlp::decode(&commit)?; + let blob_index = blob_info.blob_verification_proof.blob_index; + let batch_header_hash = blob_info + .blob_verification_proof + .batch_medatada + .batch_header_hash; + let get_response = self + .client + .clone() + .retrieve_blob(disperser::RetrieveBlobRequest { + batch_header_hash, + blob_index, + }) + .await + .unwrap() + .into_inner(); + + if get_response.data.len() == 0 { + panic!("Empty data returned from Disperser") + } + + let data = kzgpad_rs::remove_empty_byte_from_padded_bytes(&get_response.data); + return Ok(Some(data)); + } +} diff --git a/get_all_blobs/src/generated/common.rs b/get_all_blobs/src/generated/common.rs new file mode 100644 index 000000000000..0599b9af4127 --- /dev/null +++ b/get_all_blobs/src/generated/common.rs @@ -0,0 +1,63 @@ +// This file is @generated by prost-build. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct G1Commitment { + /// The X coordinate of the KZG commitment. This is the raw byte representation of the field element. + #[prost(bytes = "vec", tag = "1")] + pub x: ::prost::alloc::vec::Vec, + /// The Y coordinate of the KZG commitment. This is the raw byte representation of the field element. + #[prost(bytes = "vec", tag = "2")] + pub y: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct G2Commitment { + /// The A0 element of the X coordinate of G2 point. + #[prost(bytes = "vec", tag = "1")] + pub x_a0: ::prost::alloc::vec::Vec, + /// The A1 element of the X coordinate of G2 point. + #[prost(bytes = "vec", tag = "2")] + pub x_a1: ::prost::alloc::vec::Vec, + /// The A0 element of the Y coordinate of G2 point. + #[prost(bytes = "vec", tag = "3")] + pub y_a0: ::prost::alloc::vec::Vec, + /// The A1 element of the Y coordinate of G2 point. + #[prost(bytes = "vec", tag = "4")] + pub y_a1: ::prost::alloc::vec::Vec, +} +/// BlobCommitment represents commitment of a specific blob, containing its +/// KZG commitment, degree proof, the actual degree, and data length in number of symbols. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobCommitment { + #[prost(message, optional, tag = "1")] + pub commitment: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub length_commitment: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub length_proof: ::core::option::Option, + #[prost(uint32, tag = "4")] + pub data_length: u32, +} +/// BlobCertificate is what gets attested by the network +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobCertificate { + #[prost(uint32, tag = "1")] + pub version: u32, + #[prost(bytes = "vec", tag = "2")] + pub blob_key: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub blob_commitment: ::core::option::Option, + #[prost(uint32, repeated, tag = "4")] + pub quorum_numbers: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "5")] + pub reference_block_number: u32, +} +/// A chunk of a blob. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ChunkData { + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, +} diff --git a/get_all_blobs/src/generated/disperser.rs b/get_all_blobs/src/generated/disperser.rs new file mode 100644 index 000000000000..16c330ae8e50 --- /dev/null +++ b/get_all_blobs/src/generated/disperser.rs @@ -0,0 +1,486 @@ +// This file is @generated by prost-build. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AuthenticatedRequest { + #[prost(oneof = "authenticated_request::Payload", tags = "1, 2")] + pub payload: ::core::option::Option, +} +/// Nested message and enum types in `AuthenticatedRequest`. +pub mod authenticated_request { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Payload { + #[prost(message, tag = "1")] + DisperseRequest(super::DisperseBlobRequest), + #[prost(message, tag = "2")] + AuthenticationData(super::AuthenticationData), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AuthenticatedReply { + #[prost(oneof = "authenticated_reply::Payload", tags = "1, 2")] + pub payload: ::core::option::Option, +} +/// Nested message and enum types in `AuthenticatedReply`. +pub mod authenticated_reply { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Payload { + #[prost(message, tag = "1")] + BlobAuthHeader(super::BlobAuthHeader), + #[prost(message, tag = "2")] + DisperseReply(super::DisperseBlobReply), + } +} +/// BlobAuthHeader contains information about the blob for the client to verify and sign. +/// - Once payments are enabled, the BlobAuthHeader will contain the KZG commitment to the blob, which the client +/// will verify and sign. Having the client verify the KZG commitment instead of calculating it avoids +/// the need for the client to have the KZG structured reference string (SRS), which can be large. +/// The signed KZG commitment prevents the disperser from sending a different blob to the DA Nodes +/// than the one the client sent. +/// - In the meantime, the BlobAuthHeader contains a simple challenge parameter is used to prevent +/// replay attacks in the event that a signature is leaked. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobAuthHeader { + #[prost(uint32, tag = "1")] + pub challenge_parameter: u32, +} +/// AuthenticationData contains the signature of the BlobAuthHeader. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AuthenticationData { + #[prost(bytes = "vec", tag = "1")] + pub authentication_data: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DisperseBlobRequest { + /// The data to be dispersed. + /// The size of data must be <= 2MiB. Every 32 bytes of data chunk is interpreted as an integer in big endian format + /// where the lower address has more significant bits. The integer must stay in the valid range to be interpreted + /// as a field element on the bn254 curve. The valid range is + /// 0 <= x < 21888242871839275222246405745257275088548364400416034343698204186575808495617 + /// containing slightly less than 254 bits and more than 253 bits. If any one of the 32 bytes chunk is outside the range, + /// the whole request is deemed as invalid, and rejected. + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, + /// The quorums to which the blob will be sent, in addition to the required quorums which are configured + /// on the EigenDA smart contract. If required quorums are included here, an error will be returned. + /// The disperser will ensure that the encoded blobs for each quorum are all processed + /// within the same batch. + #[prost(uint32, repeated, tag = "2")] + pub custom_quorum_numbers: ::prost::alloc::vec::Vec, + /// The account ID of the client. This should be a hex-encoded string of the ECSDA public key + /// corresponding to the key used by the client to sign the BlobAuthHeader. + #[prost(string, tag = "3")] + pub account_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DisperseBlobReply { + /// The status of the blob associated with the request_id. + #[prost(enumeration = "BlobStatus", tag = "1")] + pub result: i32, + /// The request ID generated by the disperser. + /// Once a request is accepted (although not processed), a unique request ID will be + /// generated. + /// Two different DisperseBlobRequests (determined by the hash of the DisperseBlobRequest) + /// will have different IDs, and the same DisperseBlobRequest sent repeatedly at different + /// times will also have different IDs. + /// The client should use this ID to query the processing status of the request (via + /// the GetBlobStatus API). + #[prost(bytes = "vec", tag = "2")] + pub request_id: ::prost::alloc::vec::Vec, +} +/// BlobStatusRequest is used to query the status of a blob. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobStatusRequest { + #[prost(bytes = "vec", tag = "1")] + pub request_id: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobStatusReply { + /// The status of the blob. + #[prost(enumeration = "BlobStatus", tag = "1")] + pub status: i32, + /// The blob info needed for clients to confirm the blob against the EigenDA contracts. + #[prost(message, optional, tag = "2")] + pub info: ::core::option::Option, +} +/// RetrieveBlobRequest contains parameters to retrieve the blob. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RetrieveBlobRequest { + #[prost(bytes = "vec", tag = "1")] + pub batch_header_hash: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "2")] + pub blob_index: u32, +} +/// RetrieveBlobReply contains the retrieved blob data +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RetrieveBlobReply { + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, +} +/// BlobInfo contains information needed to confirm the blob against the EigenDA contracts +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobInfo { + #[prost(message, optional, tag = "1")] + pub blob_header: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub blob_verification_proof: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobHeader { + /// KZG commitment of the blob. + #[prost(message, optional, tag = "1")] + pub commitment: ::core::option::Option, + /// The length of the blob in symbols (each symbol is 32 bytes). + #[prost(uint32, tag = "2")] + pub data_length: u32, + /// The params of the quorums that this blob participates in. + #[prost(message, repeated, tag = "3")] + pub blob_quorum_params: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobQuorumParam { + /// The ID of the quorum. + #[prost(uint32, tag = "1")] + pub quorum_number: u32, + /// The max percentage of stake within the quorum that can be held by or delegated + /// to adversarial operators. Currently, this and the next parameter are standardized + /// across the quorum using values read from the EigenDA contracts. + #[prost(uint32, tag = "2")] + pub adversary_threshold_percentage: u32, + /// The min percentage of stake that must attest in order to consider + /// the dispersal is successful. + #[prost(uint32, tag = "3")] + pub confirmation_threshold_percentage: u32, + /// The length of each chunk. + #[prost(uint32, tag = "4")] + pub chunk_length: u32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobVerificationProof { + /// batch_id is an incremental ID assigned to a batch by EigenDAServiceManager + #[prost(uint32, tag = "1")] + pub batch_id: u32, + /// The index of the blob in the batch (which is logically an ordered list of blobs). + #[prost(uint32, tag = "2")] + pub blob_index: u32, + #[prost(message, optional, tag = "3")] + pub batch_metadata: ::core::option::Option, + /// inclusion_proof is a merkle proof for a blob header's inclusion in a batch + #[prost(bytes = "vec", tag = "4")] + pub inclusion_proof: ::prost::alloc::vec::Vec, + /// indexes of quorums in BatchHeader.quorum_numbers that match the quorums in BlobHeader.blob_quorum_params + /// Ex. BlobHeader.blob_quorum_params = [ + /// { + /// quorum_number = 0, + /// ... + /// }, + /// { + /// quorum_number = 3, + /// ... + /// }, + /// { + /// quorum_number = 5, + /// ... + /// }, + /// ] + /// BatchHeader.quorum_numbers = \[0, 5, 3\] => 0x000503 + /// Then, quorum_indexes = \[0, 2, 1\] => 0x000201 + #[prost(bytes = "vec", tag = "5")] + pub quorum_indexes: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchMetadata { + #[prost(message, optional, tag = "1")] + pub batch_header: ::core::option::Option, + /// The hash of all public keys of the operators that did not sign the batch. + #[prost(bytes = "vec", tag = "2")] + pub signatory_record_hash: ::prost::alloc::vec::Vec, + /// The fee payment paid by users for dispersing this batch. It's the bytes + /// representation of a big.Int value. + #[prost(bytes = "vec", tag = "3")] + pub fee: ::prost::alloc::vec::Vec, + /// The Ethereum block number at which the batch is confirmed onchain. + #[prost(uint32, tag = "4")] + pub confirmation_block_number: u32, + /// This is the hash of the ReducedBatchHeader defined onchain, see: + /// + /// The is the message that the operators will sign their signatures on. + #[prost(bytes = "vec", tag = "5")] + pub batch_header_hash: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchHeader { + /// The root of the merkle tree with the hashes of blob headers as leaves. + #[prost(bytes = "vec", tag = "1")] + pub batch_root: ::prost::alloc::vec::Vec, + /// All quorums associated with blobs in this batch. Sorted in ascending order. + /// Ex. \[0, 2, 1\] => 0x000102 + #[prost(bytes = "vec", tag = "2")] + pub quorum_numbers: ::prost::alloc::vec::Vec, + /// The percentage of stake that has signed for this batch. + /// The quorum_signed_percentages\[i\] is percentage for the quorum_numbers\[i\]. + #[prost(bytes = "vec", tag = "3")] + pub quorum_signed_percentages: ::prost::alloc::vec::Vec, + /// The Ethereum block number at which the batch was created. + /// The Disperser will encode and disperse the blobs based on the onchain info + /// (e.g. operator stakes) at this block number. + #[prost(uint32, tag = "4")] + pub reference_block_number: u32, +} +/// BlobStatus represents the status of a blob. +/// The status of a blob is updated as the blob is processed by the disperser. +/// The status of a blob can be queried by the client using the GetBlobStatus API. +/// Intermediate states are states that the blob can be in while being processed, and it can be updated to a differet state: +/// - PROCESSING +/// - DISPERSING +/// - CONFIRMED +/// Terminal states are states that will not be updated to a different state: +/// - FAILED +/// - FINALIZED +/// - INSUFFICIENT_SIGNATURES +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum BlobStatus { + Unknown = 0, + /// PROCESSING means that the blob is currently being processed by the disperser + Processing = 1, + /// CONFIRMED means that the blob has been dispersed to DA Nodes and the dispersed + /// batch containing the blob has been confirmed onchain + Confirmed = 2, + /// FAILED means that the blob has failed permanently (for reasons other than insufficient + /// signatures, which is a separate state) + Failed = 3, + /// FINALIZED means that the block containing the blob's confirmation transaction has been finalized on Ethereum + Finalized = 4, + /// INSUFFICIENT_SIGNATURES means that the confirmation threshold for the blob was not met + /// for at least one quorum. + InsufficientSignatures = 5, + /// DISPERSING means that the blob is currently being dispersed to DA Nodes and being confirmed onchain + Dispersing = 6, +} +impl BlobStatus { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + BlobStatus::Unknown => "UNKNOWN", + BlobStatus::Processing => "PROCESSING", + BlobStatus::Confirmed => "CONFIRMED", + BlobStatus::Failed => "FAILED", + BlobStatus::Finalized => "FINALIZED", + BlobStatus::InsufficientSignatures => "INSUFFICIENT_SIGNATURES", + BlobStatus::Dispersing => "DISPERSING", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNKNOWN" => Some(Self::Unknown), + "PROCESSING" => Some(Self::Processing), + "CONFIRMED" => Some(Self::Confirmed), + "FAILED" => Some(Self::Failed), + "FINALIZED" => Some(Self::Finalized), + "INSUFFICIENT_SIGNATURES" => Some(Self::InsufficientSignatures), + "DISPERSING" => Some(Self::Dispersing), + _ => None, + } + } +} +/// Generated client implementations. +pub mod disperser_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::{http::Uri, *}; + /// Disperser defines the public APIs for dispersing blobs. + #[derive(Debug, Clone)] + pub struct DisperserClient { + inner: tonic::client::Grpc, + } + impl DisperserClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl DisperserClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> DisperserClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + >>::Error: + Into + Send + Sync, + { + DisperserClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// This API accepts blob to disperse from clients. + /// This executes the dispersal async, i.e. it returns once the request + /// is accepted. The client could use GetBlobStatus() API to poll the the + /// processing status of the blob. + pub async fn disperse_blob( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/disperser.Disperser/DisperseBlob"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("disperser.Disperser", "DisperseBlob")); + self.inner.unary(req, path, codec).await + } + /// DisperseBlobAuthenticated is similar to DisperseBlob, except that it requires the + /// client to authenticate itself via the AuthenticationData message. The protoco is as follows: + /// 1. The client sends a DisperseBlobAuthenticated request with the DisperseBlobRequest message + /// 2. The Disperser sends back a BlobAuthHeader message containing information for the client to + /// verify and sign. + /// 3. The client verifies the BlobAuthHeader and sends back the signed BlobAuthHeader in an + /// AuthenticationData message. + /// 4. The Disperser verifies the signature and returns a DisperseBlobReply message. + pub async fn disperse_blob_authenticated( + &mut self, + request: impl tonic::IntoStreamingRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/disperser.Disperser/DisperseBlobAuthenticated", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut().insert(GrpcMethod::new( + "disperser.Disperser", + "DisperseBlobAuthenticated", + )); + self.inner.streaming(req, path, codec).await + } + /// This API is meant to be polled for the blob status. + pub async fn get_blob_status( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/disperser.Disperser/GetBlobStatus"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("disperser.Disperser", "GetBlobStatus")); + self.inner.unary(req, path, codec).await + } + /// This retrieves the requested blob from the Disperser's backend. + /// This is a more efficient way to retrieve blobs than directly retrieving + /// from the DA Nodes (see detail about this approach in + /// api/proto/retriever/retriever.proto). + /// The blob should have been initially dispersed via this Disperser service + /// for this API to work. + pub async fn retrieve_blob( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/disperser.Disperser/RetrieveBlob"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("disperser.Disperser", "RetrieveBlob")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/get_all_blobs/src/generated/mod.rs b/get_all_blobs/src/generated/mod.rs new file mode 100644 index 000000000000..d77a351741d9 --- /dev/null +++ b/get_all_blobs/src/generated/mod.rs @@ -0,0 +1,3 @@ +pub(crate) mod common; +pub(crate) mod disperser; +// pub(crate) mod eigendaservicemanager; diff --git a/get_all_blobs/src/main.rs b/get_all_blobs/src/main.rs new file mode 100644 index 000000000000..87a8ab21d420 --- /dev/null +++ b/get_all_blobs/src/main.rs @@ -0,0 +1,165 @@ +use std::{fs, str::FromStr}; + +use alloy::{ + dyn_abi::JsonAbiExt, + json_abi::JsonAbi, + network::Ethereum, + primitives::Address, + providers::{Provider, RootProvider}, +}; +use client::EigenClientRetriever; +use serde::{Deserialize, Serialize}; + +mod blob_info; +mod client; +mod generated; + +#[derive(Debug, Serialize, Deserialize)] +struct BlobData { + pub commitment: String, + pub blob: String, +} + +const EIGENDA_API_URL: &str = "https://disperser-holesky.eigenda.xyz:443"; +const BLOB_DATA_JSON: &str = "blob_data.json"; +const ABI_JSON: &str = "./abi/commitBatchesSharedBridge.json"; +const COMMIT_BATCHES_SELECTOR: &str = "6edd4f12"; + +async fn get_blob(commitment: &str) -> anyhow::Result> { + let client = EigenClientRetriever::new(EIGENDA_API_URL).await?; + let data = client + .get_blob_data(&commitment) + .await? + .ok_or_else(|| anyhow::anyhow!("Blob not found"))?; + Ok(data) +} + +async fn get_transactions( + provider: &RootProvider< + alloy::transports::http::Http, + Ethereum, + >, + validator_timelock_address: Address, + block_start: u64, +) -> anyhow::Result<()> { + let latest_block = provider.get_block_number().await?; + let mut json_array = Vec::new(); + + let mut i = 0; + for block_number in block_start..=latest_block { + i += 1; + if i % 50 == 0 { + println!( + "\x1b[32mProcessed up to block {} of {}\x1b[0m", + block_number, latest_block + ); + } + if let Ok(Some(block)) = provider + .get_block_by_number(block_number.into(), true) + .await + { + for tx in block.transactions.into_transactions() { + if let Some(to) = tx.to { + if to == validator_timelock_address { + let input = tx.input; + let selector = &input[0..4]; + if selector == hex::decode(COMMIT_BATCHES_SELECTOR)? { + if let Ok(decoded) = decode_blob_data_input(&input[4..]).await { + for blob in decoded { + json_array.push(blob); + } + } + } + } + } + } + } + } + + if json_array.is_empty() { + println!("\x1b[31mNo transactions found.\x1b[0m"); + return Ok(()); + } + + let json_string = serde_json::to_string_pretty(&json_array)?; + fs::write(BLOB_DATA_JSON, json_string)?; + println!("\x1b[32mData stored in blob_data.json file.\x1b[0m"); + + Ok(()) +} + +async fn decode_blob_data_input(input: &[u8]) -> anyhow::Result> { + let json = std::fs::read_to_string(ABI_JSON)?; + let json_abi: JsonAbi = serde_json::from_str(&json)?; + let function = json_abi + .functions + .iter() + .find(|f| f.0 == "commitBatchesSharedBridge") + .ok_or(anyhow::anyhow!("Function not found"))? + .1; + + let decoded = function[0].abi_decode_input(input, true)?; + let commit_batch_info = decoded[2].as_array().ok_or(anyhow::anyhow!( + "CommitBatchInfo cannot be represented as an array" + ))?[0] + .as_tuple() + .ok_or(anyhow::anyhow!( + "CommitBatchInfo components cannot be represented as a tuple" + ))?; + + let mut blobs = vec![]; + + for pubdata_commitments in commit_batch_info.iter() { + let pubdata_commitments_bytes = pubdata_commitments.as_bytes(); + match get_blob_from_pubdata_commitment(pubdata_commitments_bytes).await { + Ok(blob_data) => blobs.push(blob_data), + Err(_) => (), + } + } + + Ok(blobs) +} + +async fn get_blob_from_pubdata_commitment( + pubdata_commitments_bytes: Option<&[u8]>, +) -> anyhow::Result { + if pubdata_commitments_bytes.is_none() { + return Err(anyhow::anyhow!( + "CommitBatchInfo components cannot be represented as a tuple" + )); + } + let pubdata_commitments_bytes = pubdata_commitments_bytes.unwrap(); + let commitment = hex::decode(&pubdata_commitments_bytes[1..])?; + let commitment = hex::encode(&commitment); + let blob = get_blob(&commitment).await?; + Ok(BlobData { + commitment, + blob: hex::encode(blob), + }) +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let args: Vec = std::env::args().collect(); + + if args.len() != 4 { + eprintln!("Usage: cargo run "); + std::process::exit(1); + } + + let validator_timelock_address = Address::from_str(&args[1])?; + + let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); + + let url = alloy::transports::http::reqwest::Url::from_str(&args[2])?; + let provider: RootProvider< + alloy::transports::http::Http, + Ethereum, + > = RootProvider::new_http(url); + + let block_start = args[3].parse::()?; + + get_transactions(&provider, validator_timelock_address, block_start).await?; + + Ok(()) +}