From 546684594acd673366c0a78d2ae7009e54b7aba2 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Thu, 21 Mar 2024 11:05:13 +0100 Subject: [PATCH 01/17] first iteration of l1infotree recursive --- db/migrations/state/0019.sql | 12 +++ l1infotree/tree.go | 4 + l1infotree/tree_recursive.go | 59 +++++++++++ l1infotree/tree_recursive_test.go | 99 +++++++++++++++++++ state/interfaces.go | 7 ++ state/l1infotree_v2_feijoa.go | 40 ++++++++ state/pgstatestorage/l1infotree.go | 94 ++++++++++++------ state/pgstatestorage/l1infotree_v2_feijoa.go | 42 ++++++++ .../l1-info-tree-recursive/input.json | 17 ++++ .../smt-full-output.json | 92 +++++++++++++++++ 10 files changed, 438 insertions(+), 28 deletions(-) create mode 100644 db/migrations/state/0019.sql create mode 100644 l1infotree/tree_recursive.go create mode 100644 l1infotree/tree_recursive_test.go create mode 100644 state/l1infotree_v2_feijoa.go create mode 100644 state/pgstatestorage/l1infotree_v2_feijoa.go create mode 100644 test/vectors/src/merkle-tree/l1-info-tree-recursive/input.json create mode 100644 test/vectors/src/merkle-tree/l1-info-tree-recursive/smt-full-output.json diff --git a/db/migrations/state/0019.sql b/db/migrations/state/0019.sql new file mode 100644 index 0000000000..b6a1e51e82 --- /dev/null +++ b/db/migrations/state/0019.sql @@ -0,0 +1,12 @@ +-- +migrate Up + +-- +migrate Up +ALTER TABLE state.exit_root + ADD COLUMN IF NOT EXISTS l1_info_tree_index_feijoa BIGINT DEFAULT NULL UNIQUE; +CREATE INDEX IF NOT EXISTS idx_exit_root_l1_info_tree_index_feijoa ON state.exit_root (l1_info_tree_index_feijoa); + +-- +migrate Down +ALTER TABLE state.exit_root + DROP COLUMN IF EXISTS l1_info_tree_index_feijoa, +DROP INDEX IF EXISTS state.idx_exit_root_l1_info_tree_index_feijoa; + diff --git a/l1infotree/tree.go b/l1infotree/tree.go index 5e356c164d..0fdbf65cc3 100644 --- a/l1infotree/tree.go +++ b/l1infotree/tree.go @@ -176,6 +176,10 @@ func (mt *L1InfoTree) initSiblings(initialLeaves [][32]byte) ([][32]byte, common return mt.ComputeMerkleProof(mt.count, initialLeaves) } +func (mt *L1InfoTree) GetRoot() common.Hash { + return mt.currentRoot +} + // GetCurrentRootCountAndSiblings returns the latest root, count and sibblings func (mt *L1InfoTree) GetCurrentRootCountAndSiblings() (common.Hash, uint32, [][32]byte) { return mt.currentRoot, mt.count, mt.siblings diff --git a/l1infotree/tree_recursive.go b/l1infotree/tree_recursive.go new file mode 100644 index 0000000000..04c5d8cbbe --- /dev/null +++ b/l1infotree/tree_recursive.go @@ -0,0 +1,59 @@ +package l1infotree + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +const ( + firstLeafHistoricL1InfoTree = "0x0000000000000000000000000000000000000000000000000000000000000000" +) + +type L1InfoTreeRecursive struct { + historicL1InfoTree *L1InfoTree + l1InfoTreeDataHash *common.Hash + leaves [][32]byte +} + +func NewL1InfoTreeRecursive(height uint8) (*L1InfoTreeRecursive, error) { + historic, err := NewL1InfoTree(height, nil) + if err != nil { + return nil, err + } + // Insert first leaf, all zeros (no changes in tree, just to skip leaf with index 0) + //historic.AddLeaf(0, common.HexToHash(firstLeafHistoricL1InfoTree)) + + return &L1InfoTreeRecursive{ + historicL1InfoTree: historic, + }, nil +} + +func (mt *L1InfoTreeRecursive) AddLeaf(index uint32, leaf [32]byte) (common.Hash, error) { + + previousRoot := mt.GetRoot() + _, err := mt.historicL1InfoTree.AddLeaf(index, previousRoot) + if err != nil { + return common.Hash{}, err + } + mt.leaves = append(mt.leaves, leaf) + leafHash := common.Hash(leaf) + mt.l1InfoTreeDataHash = &leafHash + return mt.GetRoot(), nil + +} + +func (mt *L1InfoTreeRecursive) GetRoot() common.Hash { + if mt.l1InfoTreeDataHash == nil { + return common.HexToHash(firstLeafHistoricL1InfoTree) + } + return crypto.Keccak256Hash(mt.historicL1InfoTree.GetRoot().Bytes(), mt.l1InfoTreeDataHash.Bytes()) + +} + +func (mt *L1InfoTreeRecursive) ComputeMerkleProofFromLeaves(gerIndex uint32, leaves [][32]byte) ([][32]byte, common.Hash, error) { + return mt.historicL1InfoTree.ComputeMerkleProof(gerIndex, leaves) +} + +func (mt *L1InfoTreeRecursive) ComputeMerkleProof(gerIndex uint32) ([][32]byte, common.Hash, error) { + return mt.historicL1InfoTree.ComputeMerkleProof(gerIndex, mt.leaves) +} diff --git a/l1infotree/tree_recursive_test.go b/l1infotree/tree_recursive_test.go new file mode 100644 index 0000000000..395625f16b --- /dev/null +++ b/l1infotree/tree_recursive_test.go @@ -0,0 +1,99 @@ +package l1infotree_test + +import ( + "encoding/json" + "os" + "testing" + + "github.com/0xPolygonHermez/zkevm-node/l1infotree" + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +type testL1InfoEntry struct { + ger common.Hash + blockhash common.Hash + timestamp uint64 + index uint32 + expectedRoot common.Hash +} + +const ( + L1InfoRootRecursiveHeight = uint8(32) + EmptyL1InfoRootRecursive = "0x0000000000000000000000000000000000000000000000000000000000000000" + + root1 = "0xcc4105312818e9b7f692c9c807ea09699f4f290e5e31671a8e0c2c937f1c43f0" + filenameTestData = "../test/vectors/src/merkle-tree/l1-info-tree-recursive/smt-full-output.json" +) + +type vectorTestData struct { + GlobalExitRoot common.Hash `json:"globalExitRoot"` + BlockHash common.Hash `json:"blockHash"` + Timestamp uint64 `json:"timestamp"` + SmtProofPreviousIndex []common.Hash `json:"smtProofPreviousIndex"` + Index uint32 `json:"index"` + PreviousL1InfoTreeRoot common.Hash `json:"previousL1InfoTreeRoot"` + L1DataHash common.Hash `json:"l1DataHash"` + L1InfoTreeRoot common.Hash `json:"l1InfoTreeRoot"` + HistoricL1InfoRoot common.Hash `json:"historicL1InfoRoot"` +} + +func hash(leaf testL1InfoEntry) [32]byte { + return l1infotree.HashLeafData(leaf.ger, leaf.blockhash, leaf.timestamp) +} + +func readData(t *testing.T) []vectorTestData { + data, err := os.ReadFile(filenameTestData) + require.NoError(t, err) + var mtTestVectors []vectorTestData + err = json.Unmarshal(data, &mtTestVectors) + require.NoError(t, err) + return mtTestVectors +} + +func TestBuildTreeVectorData(t *testing.T) { + data := readData(t) + sut, err := l1infotree.NewL1InfoTreeRecursive(L1InfoRootRecursiveHeight) + require.NoError(t, err) + for _, testVector := range data { + // Add leaf + leafData := l1infotree.HashLeafData(testVector.GlobalExitRoot, testVector.BlockHash, testVector.Timestamp) + leafDataHash := common.BytesToHash(leafData[:]) + root, err := sut.AddLeaf(testVector.Index-1, leafData) + require.NoError(t, err) + require.Equal(t, testVector.L1InfoTreeRoot.String(), root.String(), "Roots do not match leaf", testVector.Index) + require.Equal(t, testVector.L1DataHash.String(), leafDataHash.String(), "leafData do not match leaf", testVector.Index) + + } +} + +func TestEmptyL1InfoRootRecursive(t *testing.T) { + // empty + sut, err := l1infotree.NewL1InfoTreeRecursive(L1InfoRootRecursiveHeight) + require.NoError(t, err) + require.NotNil(t, sut) + root := sut.GetRoot() + require.Equal(t, EmptyL1InfoRootRecursive, root.String()) +} +func TestProofsTreeVectorData(t *testing.T) { + data := readData(t) + sut, err := l1infotree.NewL1InfoTreeRecursive(L1InfoRootRecursiveHeight) + require.NoError(t, err) + for _, testVector := range data { + // Add leaf + leafData := l1infotree.HashLeafData(testVector.GlobalExitRoot, testVector.BlockHash, testVector.Timestamp) + + _, err := sut.AddLeaf(testVector.Index-1, leafData) + require.NoError(t, err) + mp, _, err := sut.ComputeMerkleProof(testVector.Index) + require.NoError(t, err) + for i, v := range mp { + c := common.Hash(v) + if c.String() != testVector.SmtProofPreviousIndex[i].String() { + log.Info("MerkleProof: index ", testVector.Index, " mk:", i, " v:", c.String(), " expected:", testVector.SmtProofPreviousIndex[i].String()) + } + + } + } +} diff --git a/state/interfaces.go b/state/interfaces.go index 74cb9b6a74..22b5da4d69 100644 --- a/state/interfaces.go +++ b/state/interfaces.go @@ -10,6 +10,11 @@ import ( "github.com/jackc/pgx/v4" ) +type storageL1InfoTreeV2 interface { + AddL1InfoRootToExitRootV2Feijoa(ctx context.Context, exitRoot *L1InfoTreeExitRootStorageEntryV2Feijoa, dbTx pgx.Tx) error + GetAllL1InfoRootEntriesV2Feijoa(ctx context.Context, dbTx pgx.Tx) ([]L1InfoTreeExitRootStorageEntryV2Feijoa, error) + GetLatestL1InfoRootV2Feijoa(ctx context.Context, maxBlockNumber uint64) (L1InfoTreeExitRootStorageEntryV2Feijoa, error) +} type storage interface { Exec(ctx context.Context, sql string, arguments ...interface{}) (commandTag pgconn.CommandTag, err error) Query(ctx context.Context, sql string, args ...interface{}) (pgx.Rows, error) @@ -158,4 +163,6 @@ type storage interface { UpdateBatchAsChecked(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error GetNotCheckedBatches(ctx context.Context, dbTx pgx.Tx) ([]*Batch, error) GetLastL2BlockByBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*L2Block, error) + + storageL1InfoTreeV2 } diff --git a/state/l1infotree_v2_feijoa.go b/state/l1infotree_v2_feijoa.go new file mode 100644 index 0000000000..c0f4214c95 --- /dev/null +++ b/state/l1infotree_v2_feijoa.go @@ -0,0 +1,40 @@ +package state + +import ( + "context" + "fmt" + + "github.com/0xPolygonHermez/zkevm-node/l1infotree" + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/jackc/pgx/v4" +) + +type L1InfoTreeExitRootStorageEntryV2Feijoa L1InfoTreeExitRootStorageEntry + +type StateL1InfoTreeV2 struct { + storageL1InfoTreeV2 storageL1InfoTreeV2 + l1InfoTreeV2 *l1infotree.L1InfoTree +} + +func (s *StateL1InfoTreeV2) buildL1InfoTreeV2CacheIfNeed(ctx context.Context, dbTx pgx.Tx) error { + if s.l1InfoTreeV2 != nil { + return nil + } + log.Debugf("Building L1InfoTree cache") + allLeaves, err := s.storageL1InfoTreeV2.GetAllL1InfoRootEntriesV2Feijoa(ctx, dbTx) + if err != nil { + log.Error("error getting all leaves. Error: ", err) + return fmt.Errorf("error getting all leaves. Error: %w", err) + } + var leaves [][32]byte + for _, leaf := range allLeaves { + leaves = append(leaves, leaf.Hash()) + } + mt, err := l1infotree.NewL1InfoTree(uint8(32), leaves) //nolint:gomnd + if err != nil { + log.Error("error creating L1InfoTree. Error: ", err) + return fmt.Errorf("error creating L1InfoTree. Error: %w", err) + } + s.l1InfoTreeV2 = mt + return nil +} diff --git a/state/pgstatestorage/l1infotree.go b/state/pgstatestorage/l1infotree.go index 450124dde2..07a892f14e 100644 --- a/state/pgstatestorage/l1infotree.go +++ b/state/pgstatestorage/l1infotree.go @@ -3,33 +3,48 @@ package pgstatestorage import ( "context" "errors" + "fmt" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/ethereum/go-ethereum/common" "github.com/jackc/pgx/v4" ) +const ( + l1InfoTreeIndexFieldNameV1 = "l1_info_tree_index" +) + // AddL1InfoRootToExitRoot adds a new entry in ExitRoot and returns index of L1InfoTree and error func (p *PostgresStorage) AddL1InfoRootToExitRoot(ctx context.Context, exitRoot *state.L1InfoTreeExitRootStorageEntry, dbTx pgx.Tx) error { + return p.addL1InfoRootToExitRootVx(ctx, exitRoot, dbTx, l1InfoTreeIndexFieldNameV1) +} + +func (p *PostgresStorage) addL1InfoRootToExitRootVx(ctx context.Context, exitRoot *state.L1InfoTreeExitRootStorageEntry, dbTx pgx.Tx, indexFieldName string) error { const addGlobalExitRootSQL = ` - INSERT INTO state.exit_root(block_num, timestamp, mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index) + INSERT INTO state.exit_root(block_num, timestamp, mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, %s) VALUES ($1, $2, $3, $4, $5, $6, $7, $8); ` + sql := fmt.Sprintf(addGlobalExitRootSQL, indexFieldName) e := p.getExecQuerier(dbTx) - _, err := e.Exec(ctx, addGlobalExitRootSQL, + _, err := e.Exec(ctx, sql, exitRoot.BlockNumber, exitRoot.Timestamp, exitRoot.MainnetExitRoot, exitRoot.RollupExitRoot, exitRoot.GlobalExitRoot.GlobalExitRoot, exitRoot.PreviousBlockHash, exitRoot.L1InfoTreeRoot, exitRoot.L1InfoTreeIndex) return err } func (p *PostgresStorage) GetAllL1InfoRootEntries(ctx context.Context, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) { - const getL1InfoRootSQL = `SELECT block_num, timestamp, mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index + return p.GetAllL1InfoRootEntriesVx(ctx, dbTx, l1InfoTreeIndexFieldNameV1) +} + +func (p *PostgresStorage) GetAllL1InfoRootEntriesVx(ctx context.Context, dbTx pgx.Tx, indexFieldName string) ([]state.L1InfoTreeExitRootStorageEntry, error) { + const getL1InfoRootSQL = `SELECT block_num, timestamp, mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, %s FROM state.exit_root - WHERE l1_info_tree_index IS NOT NULL - ORDER BY l1_info_tree_index` + WHERE %s IS NOT NULL + ORDER BY %s` + sql := fmt.Sprintf(getL1InfoRootSQL, indexFieldName, indexFieldName, indexFieldName) e := p.getExecQuerier(dbTx) - rows, err := e.Query(ctx, getL1InfoRootSQL) + rows, err := e.Query(ctx, sql) if err != nil { return nil, err } @@ -50,15 +65,22 @@ func (p *PostgresStorage) GetAllL1InfoRootEntries(ctx context.Context, dbTx pgx. // GetLatestL1InfoRoot is used to get the latest L1InfoRoot func (p *PostgresStorage) GetLatestL1InfoRoot(ctx context.Context, maxBlockNumber uint64) (state.L1InfoTreeExitRootStorageEntry, error) { - const getL1InfoRootSQL = `SELECT block_num, timestamp, mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index + return p.GetLatestL1InfoRootVx(ctx, maxBlockNumber, nil, l1InfoTreeIndexFieldNameV1) +} + +// GetLatestL1InfoRoot is used to get the latest L1InfoRoot +func (p *PostgresStorage) GetLatestL1InfoRootVx(ctx context.Context, maxBlockNumber uint64, dbTx pgx.Tx, indexFieldName string) (state.L1InfoTreeExitRootStorageEntry, error) { + const getL1InfoRootSQL = `SELECT block_num, timestamp, mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, %s FROM state.exit_root - WHERE l1_info_tree_index IS NOT NULL AND block_num <= $1 - ORDER BY l1_info_tree_index DESC` + WHERE %s IS NOT NULL AND block_num <= $1 + ORDER BY %s DESC` + + sql := fmt.Sprintf(getL1InfoRootSQL, indexFieldName, indexFieldName, indexFieldName) entry := state.L1InfoTreeExitRootStorageEntry{} - e := p.getExecQuerier(nil) - err := e.QueryRow(ctx, getL1InfoRootSQL, maxBlockNumber).Scan(&entry.BlockNumber, &entry.Timestamp, &entry.MainnetExitRoot, &entry.RollupExitRoot, &entry.GlobalExitRoot.GlobalExitRoot, + e := p.getExecQuerier(dbTx) + err := e.QueryRow(ctx, sql, maxBlockNumber).Scan(&entry.BlockNumber, &entry.Timestamp, &entry.MainnetExitRoot, &entry.RollupExitRoot, &entry.GlobalExitRoot.GlobalExitRoot, &entry.PreviousBlockHash, &entry.L1InfoTreeRoot, &entry.L1InfoTreeIndex) if !errors.Is(err, pgx.ErrNoRows) { @@ -68,11 +90,16 @@ func (p *PostgresStorage) GetLatestL1InfoRoot(ctx context.Context, maxBlockNumbe return entry, nil } func (p *PostgresStorage) GetLatestIndex(ctx context.Context, dbTx pgx.Tx) (uint32, error) { - const getLatestIndexSQL = `SELECT max(l1_info_tree_index) as l1_info_tree_index FROM state.exit_root - WHERE l1_info_tree_index IS NOT NULL` + return p.GetLatestIndexVx(ctx, dbTx, l1InfoTreeIndexFieldNameV1) +} +func (p *PostgresStorage) GetLatestIndexVx(ctx context.Context, dbTx pgx.Tx, indexFieldName string) (uint32, error) { + const getLatestIndexSQL = `SELECT max(%s) as %s FROM state.exit_root + WHERE %s IS NOT NULL` + sql := fmt.Sprintf(getLatestIndexSQL, indexFieldName, indexFieldName, indexFieldName) + var l1InfoTreeIndex *uint32 e := p.getExecQuerier(dbTx) - err := e.QueryRow(ctx, getLatestIndexSQL).Scan(&l1InfoTreeIndex) + err := e.QueryRow(ctx, sql).Scan(&l1InfoTreeIndex) if err != nil { return 0, err } @@ -83,13 +110,17 @@ func (p *PostgresStorage) GetLatestIndex(ctx context.Context, dbTx pgx.Tx) (uint } func (p *PostgresStorage) GetL1InfoRootLeafByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) (state.L1InfoTreeExitRootStorageEntry, error) { - const getL1InfoRootSQL = `SELECT block_num, timestamp, mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index - FROM state.exit_root - WHERE l1_info_tree_index IS NOT NULL AND l1_info_root=$1` + return p.GetL1InfoRootLeafByL1InfoRootVx(ctx, l1InfoRoot, dbTx, l1InfoTreeIndexFieldNameV1) +} +func (p *PostgresStorage) GetL1InfoRootLeafByL1InfoRootVx(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx, indexFieldName string) (state.L1InfoTreeExitRootStorageEntry, error) { + const getL1InfoRootSQL = `SELECT block_num, timestamp, mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, %s + FROM state.exit_root + WHERE %s IS NOT NULL AND l1_info_root=$1` + sql := fmt.Sprintf(getL1InfoRootSQL, indexFieldName, indexFieldName) var entry state.L1InfoTreeExitRootStorageEntry e := p.getExecQuerier(dbTx) - err := e.QueryRow(ctx, getL1InfoRootSQL, l1InfoRoot).Scan(&entry.BlockNumber, &entry.Timestamp, &entry.MainnetExitRoot, &entry.RollupExitRoot, &entry.GlobalExitRoot.GlobalExitRoot, + err := e.QueryRow(ctx, sql, l1InfoRoot).Scan(&entry.BlockNumber, &entry.Timestamp, &entry.MainnetExitRoot, &entry.RollupExitRoot, &entry.GlobalExitRoot.GlobalExitRoot, &entry.PreviousBlockHash, &entry.L1InfoTreeRoot, &entry.L1InfoTreeIndex) if !errors.Is(err, pgx.ErrNoRows) { return entry, err @@ -98,29 +129,36 @@ func (p *PostgresStorage) GetL1InfoRootLeafByL1InfoRoot(ctx context.Context, l1I } func (p *PostgresStorage) GetL1InfoRootLeafByIndex(ctx context.Context, l1InfoTreeIndex uint32, dbTx pgx.Tx) (state.L1InfoTreeExitRootStorageEntry, error) { - const getL1InfoRootByIndexSQL = `SELECT block_num, timestamp, mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index - FROM state.exit_root - WHERE l1_info_tree_index = $1` + return p.GetL1InfoRootLeafByIndexVx(ctx, l1InfoTreeIndex, dbTx, l1InfoTreeIndexFieldNameV1) +} +func (p *PostgresStorage) GetL1InfoRootLeafByIndexVx(ctx context.Context, l1InfoTreeIndex uint32, dbTx pgx.Tx, indexFieldName string) (state.L1InfoTreeExitRootStorageEntry, error) { + const getL1InfoRootByIndexSQL = `SELECT block_num, timestamp, mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, %s + FROM state.exit_root + WHERE %s = $1` + sql := fmt.Sprintf(getL1InfoRootByIndexSQL, indexFieldName, indexFieldName) var entry state.L1InfoTreeExitRootStorageEntry e := p.getExecQuerier(dbTx) - err := e.QueryRow(ctx, getL1InfoRootByIndexSQL, l1InfoTreeIndex).Scan(&entry.BlockNumber, &entry.Timestamp, &entry.MainnetExitRoot, &entry.RollupExitRoot, &entry.GlobalExitRoot.GlobalExitRoot, + err := e.QueryRow(ctx, sql, l1InfoTreeIndex).Scan(&entry.BlockNumber, &entry.Timestamp, &entry.MainnetExitRoot, &entry.RollupExitRoot, &entry.GlobalExitRoot.GlobalExitRoot, &entry.PreviousBlockHash, &entry.L1InfoTreeRoot, &entry.L1InfoTreeIndex) if !errors.Is(err, pgx.ErrNoRows) { return entry, err } return entry, nil } - func (p *PostgresStorage) GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) { + return p.GetLeafsByL1InfoRootVx(ctx, l1InfoRoot, dbTx, l1InfoTreeIndexFieldNameV1) +} + +func (p *PostgresStorage) GetLeafsByL1InfoRootVx(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx, indexFieldName string) ([]state.L1InfoTreeExitRootStorageEntry, error) { // TODO: Optimize this query - const getLeafsByL1InfoRootSQL = `SELECT block_num, timestamp, mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index + const getLeafsByL1InfoRootSQL = `SELECT block_num, timestamp, mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, %s FROM state.exit_root - WHERE l1_info_tree_index IS NOT NULL AND l1_info_tree_index <= (SELECT l1_info_tree_index FROM state.exit_root WHERE l1_info_root=$1) - ORDER BY l1_info_tree_index ASC` - + WHERE %s IS NOT NULL AND %s <= (SELECT %s FROM state.exit_root WHERE l1_info_root=$1) + ORDER BY %s ASC` + sql := fmt.Sprintf(getLeafsByL1InfoRootSQL, indexFieldName, indexFieldName, indexFieldName, indexFieldName, indexFieldName) e := p.getExecQuerier(dbTx) - rows, err := e.Query(ctx, getLeafsByL1InfoRootSQL, l1InfoRoot) + rows, err := e.Query(ctx, sql, l1InfoRoot) if err != nil { return nil, err } diff --git a/state/pgstatestorage/l1infotree_v2_feijoa.go b/state/pgstatestorage/l1infotree_v2_feijoa.go new file mode 100644 index 0000000000..8851401e44 --- /dev/null +++ b/state/pgstatestorage/l1infotree_v2_feijoa.go @@ -0,0 +1,42 @@ +package pgstatestorage + +import ( + "context" + + "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/jackc/pgx/v4" +) + +const ( + l1InfoTreeIndexFieldNameV2Feijoa = "l1_info_tree_index_feijoa" +) + +// AddL1InfoRootToExitRoot adds a new entry in ExitRoot and returns index of L1InfoTree and error +func (p *PostgresStorage) AddL1InfoRootToExitRootV2Feijoa(ctx context.Context, exitRoot *state.L1InfoTreeExitRootStorageEntryV2Feijoa, dbTx pgx.Tx) error { + exitRootOld := state.L1InfoTreeExitRootStorageEntry(*exitRoot) + return p.addL1InfoRootToExitRootVx(ctx, &exitRootOld, dbTx, l1InfoTreeIndexFieldNameV2Feijoa) +} + +func (p *PostgresStorage) GetAllL1InfoRootEntriesV2Feijoa(ctx context.Context, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntryV2Feijoa, error) { + res, err := p.GetAllL1InfoRootEntriesVx(ctx, dbTx, l1InfoTreeIndexFieldNameV2Feijoa) + if err != nil { + return nil, err + } + var entriesV2Feijoa []state.L1InfoTreeExitRootStorageEntryV2Feijoa + for _, entry := range res { + entriesV2Feijoa = append(entriesV2Feijoa, state.L1InfoTreeExitRootStorageEntryV2Feijoa(entry)) + } + return entriesV2Feijoa, nil +} + +func (p *PostgresStorage) GetLatestL1InfoRootV2Feijoa(ctx context.Context, maxBlockNumber uint64, dbTx pgx.Tx) (state.L1InfoTreeExitRootStorageEntryV2Feijoa, error) { + res, err := p.GetLatestL1InfoRootVx(ctx, maxBlockNumber, dbTx, l1InfoTreeIndexFieldNameV2Feijoa) + if err != nil { + return state.L1InfoTreeExitRootStorageEntryV2Feijoa{}, err + } + return state.L1InfoTreeExitRootStorageEntryV2Feijoa(res), nil +} + +func (p *PostgresStorage) GetLatestIndexV2Feijoa(ctx context.Context, dbTx pgx.Tx) (uint32, error) { + return p.GetLatestIndexVx(ctx, dbTx, l1InfoTreeIndexFieldNameV2Feijoa) +} diff --git a/test/vectors/src/merkle-tree/l1-info-tree-recursive/input.json b/test/vectors/src/merkle-tree/l1-info-tree-recursive/input.json new file mode 100644 index 0000000000..eddd444dd9 --- /dev/null +++ b/test/vectors/src/merkle-tree/l1-info-tree-recursive/input.json @@ -0,0 +1,17 @@ +[ + { + "globalExitRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "0" + }, + { + "globalExitRoot": "0x16994edfddddb9480667b64174fc00d3b6da7290d37b8db3a16571b4ddf0789f", + "blockHash": "0x24a5871d68723340d9eadc674aa8ad75f3e33b61d5a9db7db92af856a19270bb", + "timestamp": "42" + }, + { + "globalExitRoot": "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "blockHash": "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "timestamp": "42" + } +] \ No newline at end of file diff --git a/test/vectors/src/merkle-tree/l1-info-tree-recursive/smt-full-output.json b/test/vectors/src/merkle-tree/l1-info-tree-recursive/smt-full-output.json new file mode 100644 index 0000000000..e81cb2bc30 --- /dev/null +++ b/test/vectors/src/merkle-tree/l1-info-tree-recursive/smt-full-output.json @@ -0,0 +1,92 @@ +[ + { + "globalExitRoot": "0x16994edfddddb9480667b64174fc00d3b6da7290d37b8db3a16571b4ddf0789f", + "blockHash": "0x24a5871d68723340d9eadc674aa8ad75f3e33b61d5a9db7db92af856a19270bb", + "timestamp": 42, + "smtProofPreviousIndex": [ + "0xcc4105312818e9b7f692c9c807ea09699f4f290e5e31671a8e0c2c937f1c43f0", + "0xece5fbe7739fd48f4931ce884ee9cf5f373d0a6c63b80ce836bc0ae674e78540", + "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", + "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", + "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", + "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", + "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", + "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", + "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", + "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", + "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", + "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", + "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", + "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", + "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", + "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", + "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", + "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" + ], + "index": 1, + "previousIndex": 0, + "previousL1InfoTreeRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "l1DataHash": "0x0312659ccc1839f6cdc8db9cbaefabc1ee9a9c1f71b3a20ceb906d80575c5736", + "l1InfoTreeRoot": "0xcc4105312818e9b7f692c9c807ea09699f4f290e5e31671a8e0c2c937f1c43f0", + "historicL1InfoRoot": "0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757" + }, + { + "globalExitRoot": "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "blockHash": "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "timestamp": 42, + "smtProofPreviousIndex": [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xece5fbe7739fd48f4931ce884ee9cf5f373d0a6c63b80ce836bc0ae674e78540", + "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", + "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", + "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", + "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", + "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", + "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", + "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", + "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", + "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", + "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", + "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", + "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", + "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", + "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", + "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", + "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" + ], + "index": 2, + "previousIndex": 1, + "previousL1InfoTreeRoot": "0xcc4105312818e9b7f692c9c807ea09699f4f290e5e31671a8e0c2c937f1c43f0", + "l1DataHash": "0xf6340c5e4f2b138f56f028a5a7fd42b4976ba3fe5fae98d040dd86b9ed59f172", + "l1InfoTreeRoot": "0x264008191dde377bbc6e99914cf1f1222143542afe28d4b7d4c9ecba02dd2273", + "historicL1InfoRoot": "0x4c9ea822b94a2367aeba9ce15cc881edfda28e1763e377b54a141068b08002f4" + } + ] \ No newline at end of file From 246b3f26629fd013e207757a78b963346702300e Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Thu, 21 Mar 2024 16:59:29 +0100 Subject: [PATCH 02/17] fix lint, and integration with state --- l1infotree/tree.go | 1 + l1infotree/tree_recursive.go | 30 +++- l1infotree/tree_recursive_test.go | 14 -- state/interfaces.go | 2 +- state/l1infotree_v2_feijoa.go | 18 +- state/mocks/mock_storage.go | 165 ++++++++++++++++++ state/state.go | 2 + .../feijoa/processor_l1_info_tree_update.go | 1 + 8 files changed, 211 insertions(+), 22 deletions(-) create mode 100644 synchronizer/actions/feijoa/processor_l1_info_tree_update.go diff --git a/l1infotree/tree.go b/l1infotree/tree.go index 0fdbf65cc3..01bf1de198 100644 --- a/l1infotree/tree.go +++ b/l1infotree/tree.go @@ -176,6 +176,7 @@ func (mt *L1InfoTree) initSiblings(initialLeaves [][32]byte) ([][32]byte, common return mt.ComputeMerkleProof(mt.count, initialLeaves) } +// GetRoot returns the root of the L1InfoTree func (mt *L1InfoTree) GetRoot() common.Hash { return mt.currentRoot } diff --git a/l1infotree/tree_recursive.go b/l1infotree/tree_recursive.go index 04c5d8cbbe..6d0b855658 100644 --- a/l1infotree/tree_recursive.go +++ b/l1infotree/tree_recursive.go @@ -9,27 +9,46 @@ const ( firstLeafHistoricL1InfoTree = "0x0000000000000000000000000000000000000000000000000000000000000000" ) +// L1InfoTreeRecursive is a recursive implementation of the L1InfoTree of Feijoa type L1InfoTreeRecursive struct { historicL1InfoTree *L1InfoTree l1InfoTreeDataHash *common.Hash leaves [][32]byte } +// NewL1InfoTreeRecursive creates a new empty L1InfoTreeRecursive func NewL1InfoTreeRecursive(height uint8) (*L1InfoTreeRecursive, error) { historic, err := NewL1InfoTree(height, nil) if err != nil { return nil, err } - // Insert first leaf, all zeros (no changes in tree, just to skip leaf with index 0) - //historic.AddLeaf(0, common.HexToHash(firstLeafHistoricL1InfoTree)) return &L1InfoTreeRecursive{ historicL1InfoTree: historic, }, nil } -func (mt *L1InfoTreeRecursive) AddLeaf(index uint32, leaf [32]byte) (common.Hash, error) { +// NewL1InfoTreeRecursiveFromLeaves creates a new L1InfoTreeRecursive from leaves +func NewL1InfoTreeRecursiveFromLeaves(height uint8, leaves [][32]byte) (*L1InfoTreeRecursive, error) { + historic, err := NewL1InfoTree(height, nil) + if err != nil { + return nil, err + } + res := &L1InfoTreeRecursive{ + historicL1InfoTree: historic, + } + for _, leaf := range leaves { + _, err := res.AddLeaf(uint32(len(res.leaves)), leaf) + if err != nil { + return nil, err + } + } + return res, nil +} + +// AddLeaf adds a new leaf to the L1InfoTreeRecursive +func (mt *L1InfoTreeRecursive) AddLeaf(index uint32, leaf [32]byte) (common.Hash, error) { previousRoot := mt.GetRoot() _, err := mt.historicL1InfoTree.AddLeaf(index, previousRoot) if err != nil { @@ -39,21 +58,22 @@ func (mt *L1InfoTreeRecursive) AddLeaf(index uint32, leaf [32]byte) (common.Hash leafHash := common.Hash(leaf) mt.l1InfoTreeDataHash = &leafHash return mt.GetRoot(), nil - } +// GetRoot returns the root of the L1InfoTreeRecursive func (mt *L1InfoTreeRecursive) GetRoot() common.Hash { if mt.l1InfoTreeDataHash == nil { return common.HexToHash(firstLeafHistoricL1InfoTree) } return crypto.Keccak256Hash(mt.historicL1InfoTree.GetRoot().Bytes(), mt.l1InfoTreeDataHash.Bytes()) - } +// ComputeMerkleProofFromLeaves computes the Merkle proof from the leaves func (mt *L1InfoTreeRecursive) ComputeMerkleProofFromLeaves(gerIndex uint32, leaves [][32]byte) ([][32]byte, common.Hash, error) { return mt.historicL1InfoTree.ComputeMerkleProof(gerIndex, leaves) } +// ComputeMerkleProof computes the Merkle proof from the current leaves func (mt *L1InfoTreeRecursive) ComputeMerkleProof(gerIndex uint32) ([][32]byte, common.Hash, error) { return mt.historicL1InfoTree.ComputeMerkleProof(gerIndex, mt.leaves) } diff --git a/l1infotree/tree_recursive_test.go b/l1infotree/tree_recursive_test.go index 395625f16b..f3b1e26463 100644 --- a/l1infotree/tree_recursive_test.go +++ b/l1infotree/tree_recursive_test.go @@ -11,14 +11,6 @@ import ( "github.com/stretchr/testify/require" ) -type testL1InfoEntry struct { - ger common.Hash - blockhash common.Hash - timestamp uint64 - index uint32 - expectedRoot common.Hash -} - const ( L1InfoRootRecursiveHeight = uint8(32) EmptyL1InfoRootRecursive = "0x0000000000000000000000000000000000000000000000000000000000000000" @@ -39,10 +31,6 @@ type vectorTestData struct { HistoricL1InfoRoot common.Hash `json:"historicL1InfoRoot"` } -func hash(leaf testL1InfoEntry) [32]byte { - return l1infotree.HashLeafData(leaf.ger, leaf.blockhash, leaf.timestamp) -} - func readData(t *testing.T) []vectorTestData { data, err := os.ReadFile(filenameTestData) require.NoError(t, err) @@ -64,7 +52,6 @@ func TestBuildTreeVectorData(t *testing.T) { require.NoError(t, err) require.Equal(t, testVector.L1InfoTreeRoot.String(), root.String(), "Roots do not match leaf", testVector.Index) require.Equal(t, testVector.L1DataHash.String(), leafDataHash.String(), "leafData do not match leaf", testVector.Index) - } } @@ -93,7 +80,6 @@ func TestProofsTreeVectorData(t *testing.T) { if c.String() != testVector.SmtProofPreviousIndex[i].String() { log.Info("MerkleProof: index ", testVector.Index, " mk:", i, " v:", c.String(), " expected:", testVector.SmtProofPreviousIndex[i].String()) } - } } } diff --git a/state/interfaces.go b/state/interfaces.go index 22b5da4d69..e561456e67 100644 --- a/state/interfaces.go +++ b/state/interfaces.go @@ -13,7 +13,7 @@ import ( type storageL1InfoTreeV2 interface { AddL1InfoRootToExitRootV2Feijoa(ctx context.Context, exitRoot *L1InfoTreeExitRootStorageEntryV2Feijoa, dbTx pgx.Tx) error GetAllL1InfoRootEntriesV2Feijoa(ctx context.Context, dbTx pgx.Tx) ([]L1InfoTreeExitRootStorageEntryV2Feijoa, error) - GetLatestL1InfoRootV2Feijoa(ctx context.Context, maxBlockNumber uint64) (L1InfoTreeExitRootStorageEntryV2Feijoa, error) + GetLatestL1InfoRootV2Feijoa(ctx context.Context, maxBlockNumber uint64, dbTx pgx.Tx) (L1InfoTreeExitRootStorageEntryV2Feijoa, error) } type storage interface { Exec(ctx context.Context, sql string, arguments ...interface{}) (commandTag pgconn.CommandTag, err error) diff --git a/state/l1infotree_v2_feijoa.go b/state/l1infotree_v2_feijoa.go index c0f4214c95..31da4f90c9 100644 --- a/state/l1infotree_v2_feijoa.go +++ b/state/l1infotree_v2_feijoa.go @@ -6,14 +6,17 @@ import ( "github.com/0xPolygonHermez/zkevm-node/l1infotree" "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/ethereum/go-ethereum/common" "github.com/jackc/pgx/v4" ) +// L1InfoTreeExitRootStorageEntryV2Feijoa leaf of the L1InfoTreeRecurisve type L1InfoTreeExitRootStorageEntryV2Feijoa L1InfoTreeExitRootStorageEntry +// StateL1InfoTreeV2 state for L1InfoTreeV2 Feijoa Recursive Tree type StateL1InfoTreeV2 struct { storageL1InfoTreeV2 storageL1InfoTreeV2 - l1InfoTreeV2 *l1infotree.L1InfoTree + l1InfoTreeV2 *l1infotree.L1InfoTreeRecursive } func (s *StateL1InfoTreeV2) buildL1InfoTreeV2CacheIfNeed(ctx context.Context, dbTx pgx.Tx) error { @@ -26,11 +29,12 @@ func (s *StateL1InfoTreeV2) buildL1InfoTreeV2CacheIfNeed(ctx context.Context, db log.Error("error getting all leaves. Error: ", err) return fmt.Errorf("error getting all leaves. Error: %w", err) } + var leaves [][32]byte for _, leaf := range allLeaves { leaves = append(leaves, leaf.Hash()) } - mt, err := l1infotree.NewL1InfoTree(uint8(32), leaves) //nolint:gomnd + mt, err := l1infotree.NewL1InfoTreeRecursiveFromLeaves(uint8(32), leaves) //nolint:gomnd if err != nil { log.Error("error creating L1InfoTree. Error: ", err) return fmt.Errorf("error creating L1InfoTree. Error: %w", err) @@ -38,3 +42,13 @@ func (s *StateL1InfoTreeV2) buildL1InfoTreeV2CacheIfNeed(ctx context.Context, db s.l1InfoTreeV2 = mt return nil } + +// GetCurrentL1InfoRoot Return current L1InfoRoot +func (s *StateL1InfoTreeV2) GetCurrentL1InfoRoot(ctx context.Context, dbTx pgx.Tx) (common.Hash, error) { + err := s.buildL1InfoTreeV2CacheIfNeed(ctx, dbTx) + if err != nil { + log.Error("error building L1InfoTree cache. Error: ", err) + return ZeroHash, err + } + return s.l1InfoTreeV2.GetRoot(), nil +} diff --git a/state/mocks/mock_storage.go b/state/mocks/mock_storage.go index a9a83eab43..7555009224 100644 --- a/state/mocks/mock_storage.go +++ b/state/mocks/mock_storage.go @@ -418,6 +418,54 @@ func (_c *StorageMock_AddL1InfoRootToExitRoot_Call) RunAndReturn(run func(contex return _c } +// AddL1InfoRootToExitRootV2Feijoa provides a mock function with given fields: ctx, exitRoot, dbTx +func (_m *StorageMock) AddL1InfoRootToExitRootV2Feijoa(ctx context.Context, exitRoot *state.L1InfoTreeExitRootStorageEntryV2Feijoa, dbTx pgx.Tx) error { + ret := _m.Called(ctx, exitRoot, dbTx) + + if len(ret) == 0 { + panic("no return value specified for AddL1InfoRootToExitRootV2Feijoa") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *state.L1InfoTreeExitRootStorageEntryV2Feijoa, pgx.Tx) error); ok { + r0 = rf(ctx, exitRoot, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddL1InfoRootToExitRootV2Feijoa' +type StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call struct { + *mock.Call +} + +// AddL1InfoRootToExitRootV2Feijoa is a helper method to define mock.On call +// - ctx context.Context +// - exitRoot *state.L1InfoTreeExitRootStorageEntryV2Feijoa +// - dbTx pgx.Tx +func (_e *StorageMock_Expecter) AddL1InfoRootToExitRootV2Feijoa(ctx interface{}, exitRoot interface{}, dbTx interface{}) *StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call { + return &StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call{Call: _e.mock.On("AddL1InfoRootToExitRootV2Feijoa", ctx, exitRoot, dbTx)} +} + +func (_c *StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call) Run(run func(ctx context.Context, exitRoot *state.L1InfoTreeExitRootStorageEntryV2Feijoa, dbTx pgx.Tx)) *StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*state.L1InfoTreeExitRootStorageEntryV2Feijoa), args[2].(pgx.Tx)) + }) + return _c +} + +func (_c *StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call) Return(_a0 error) *StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call) RunAndReturn(run func(context.Context, *state.L1InfoTreeExitRootStorageEntryV2Feijoa, pgx.Tx) error) *StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call { + _c.Call.Return(run) + return _c +} + // AddL2Block provides a mock function with given fields: ctx, batchNumber, l2Block, receipts, txsL2Hash, txsEGPData, imStateRoots, dbTx func (_m *StorageMock) AddL2Block(ctx context.Context, batchNumber uint64, l2Block *state.L2Block, receipts []*types.Receipt, txsL2Hash []common.Hash, txsEGPData []state.StoreTxEGPData, imStateRoots []common.Hash, dbTx pgx.Tx) error { ret := _m.Called(ctx, batchNumber, l2Block, receipts, txsL2Hash, txsEGPData, imStateRoots, dbTx) @@ -1418,6 +1466,65 @@ func (_c *StorageMock_GetAllL1InfoRootEntries_Call) RunAndReturn(run func(contex return _c } +// GetAllL1InfoRootEntriesV2Feijoa provides a mock function with given fields: ctx, dbTx +func (_m *StorageMock) GetAllL1InfoRootEntriesV2Feijoa(ctx context.Context, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntryV2Feijoa, error) { + ret := _m.Called(ctx, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetAllL1InfoRootEntriesV2Feijoa") + } + + var r0 []state.L1InfoTreeExitRootStorageEntryV2Feijoa + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntryV2Feijoa, error)); ok { + return rf(ctx, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) []state.L1InfoTreeExitRootStorageEntryV2Feijoa); ok { + r0 = rf(ctx, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]state.L1InfoTreeExitRootStorageEntryV2Feijoa) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, pgx.Tx) error); ok { + r1 = rf(ctx, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllL1InfoRootEntriesV2Feijoa' +type StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call struct { + *mock.Call +} + +// GetAllL1InfoRootEntriesV2Feijoa is a helper method to define mock.On call +// - ctx context.Context +// - dbTx pgx.Tx +func (_e *StorageMock_Expecter) GetAllL1InfoRootEntriesV2Feijoa(ctx interface{}, dbTx interface{}) *StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call { + return &StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call{Call: _e.mock.On("GetAllL1InfoRootEntriesV2Feijoa", ctx, dbTx)} +} + +func (_c *StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call) Run(run func(ctx context.Context, dbTx pgx.Tx)) *StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(pgx.Tx)) + }) + return _c +} + +func (_c *StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call) Return(_a0 []state.L1InfoTreeExitRootStorageEntryV2Feijoa, _a1 error) *StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call) RunAndReturn(run func(context.Context, pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntryV2Feijoa, error)) *StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call { + _c.Call.Return(run) + return _c +} + // GetBatchByForcedBatchNum provides a mock function with given fields: ctx, forcedBatchNumber, dbTx func (_m *StorageMock) GetBatchByForcedBatchNum(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (*state.Batch, error) { ret := _m.Called(ctx, forcedBatchNumber, dbTx) @@ -4909,6 +5016,64 @@ func (_c *StorageMock_GetLatestL1InfoRoot_Call) RunAndReturn(run func(context.Co return _c } +// GetLatestL1InfoRootV2Feijoa provides a mock function with given fields: ctx, maxBlockNumber, dbTx +func (_m *StorageMock) GetLatestL1InfoRootV2Feijoa(ctx context.Context, maxBlockNumber uint64, dbTx pgx.Tx) (state.L1InfoTreeExitRootStorageEntryV2Feijoa, error) { + ret := _m.Called(ctx, maxBlockNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetLatestL1InfoRootV2Feijoa") + } + + var r0 state.L1InfoTreeExitRootStorageEntryV2Feijoa + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (state.L1InfoTreeExitRootStorageEntryV2Feijoa, error)); ok { + return rf(ctx, maxBlockNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) state.L1InfoTreeExitRootStorageEntryV2Feijoa); ok { + r0 = rf(ctx, maxBlockNumber, dbTx) + } else { + r0 = ret.Get(0).(state.L1InfoTreeExitRootStorageEntryV2Feijoa) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, maxBlockNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StorageMock_GetLatestL1InfoRootV2Feijoa_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestL1InfoRootV2Feijoa' +type StorageMock_GetLatestL1InfoRootV2Feijoa_Call struct { + *mock.Call +} + +// GetLatestL1InfoRootV2Feijoa is a helper method to define mock.On call +// - ctx context.Context +// - maxBlockNumber uint64 +// - dbTx pgx.Tx +func (_e *StorageMock_Expecter) GetLatestL1InfoRootV2Feijoa(ctx interface{}, maxBlockNumber interface{}, dbTx interface{}) *StorageMock_GetLatestL1InfoRootV2Feijoa_Call { + return &StorageMock_GetLatestL1InfoRootV2Feijoa_Call{Call: _e.mock.On("GetLatestL1InfoRootV2Feijoa", ctx, maxBlockNumber, dbTx)} +} + +func (_c *StorageMock_GetLatestL1InfoRootV2Feijoa_Call) Run(run func(ctx context.Context, maxBlockNumber uint64, dbTx pgx.Tx)) *StorageMock_GetLatestL1InfoRootV2Feijoa_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(pgx.Tx)) + }) + return _c +} + +func (_c *StorageMock_GetLatestL1InfoRootV2Feijoa_Call) Return(_a0 state.L1InfoTreeExitRootStorageEntryV2Feijoa, _a1 error) *StorageMock_GetLatestL1InfoRootV2Feijoa_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StorageMock_GetLatestL1InfoRootV2Feijoa_Call) RunAndReturn(run func(context.Context, uint64, pgx.Tx) (state.L1InfoTreeExitRootStorageEntryV2Feijoa, error)) *StorageMock_GetLatestL1InfoRootV2Feijoa_Call { + _c.Call.Return(run) + return _c +} + // GetLatestVirtualBatchTimestamp provides a mock function with given fields: ctx, dbTx func (_m *StorageMock) GetLatestVirtualBatchTimestamp(ctx context.Context, dbTx pgx.Tx) (time.Time, error) { ret := _m.Called(ctx, dbTx) diff --git a/state/state.go b/state/state.go index a1a754242f..06e2381b7d 100644 --- a/state/state.go +++ b/state/state.go @@ -39,6 +39,7 @@ type State struct { newL2BlockEvents chan NewL2BlockEvent newL2BlockEventHandlers []NewL2BlockEventHandler + StateL1InfoTreeV2 } // NewState creates a new State @@ -57,6 +58,7 @@ func NewState(cfg Config, storage storage, executorClient executor.ExecutorServi newL2BlockEvents: make(chan NewL2BlockEvent, newL2BlockEventBufferSize), newL2BlockEventHandlers: []NewL2BlockEventHandler{}, l1InfoTree: mt, + StateL1InfoTreeV2: StateL1InfoTreeV2{storageL1InfoTreeV2: storage}, } return state diff --git a/synchronizer/actions/feijoa/processor_l1_info_tree_update.go b/synchronizer/actions/feijoa/processor_l1_info_tree_update.go new file mode 100644 index 0000000000..c9f60c8b7f --- /dev/null +++ b/synchronizer/actions/feijoa/processor_l1_info_tree_update.go @@ -0,0 +1 @@ +package feijoa From 923fd73db1274265fb5200d69f6e17f47e6fd4ee Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Thu, 21 Mar 2024 17:39:04 +0100 Subject: [PATCH 03/17] fix migration --- db/migrations/state/0019.sql | 2 +- db/migrations/state/0019_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) create mode 100644 db/migrations/state/0019_test.go diff --git a/db/migrations/state/0019.sql b/db/migrations/state/0019.sql index b6a1e51e82..4628575f9c 100644 --- a/db/migrations/state/0019.sql +++ b/db/migrations/state/0019.sql @@ -7,6 +7,6 @@ CREATE INDEX IF NOT EXISTS idx_exit_root_l1_info_tree_index_feijoa ON state.exit -- +migrate Down ALTER TABLE state.exit_root - DROP COLUMN IF EXISTS l1_info_tree_index_feijoa, + DROP COLUMN IF EXISTS l1_info_tree_index_feijoa; DROP INDEX IF EXISTS state.idx_exit_root_l1_info_tree_index_feijoa; diff --git a/db/migrations/state/0019_test.go b/db/migrations/state/0019_test.go new file mode 100644 index 0000000000..ac4ce9b276 --- /dev/null +++ b/db/migrations/state/0019_test.go @@ -0,0 +1,24 @@ +package migrations_test + +import ( + "database/sql" + "testing" +) + +type migrationTest0019 struct{} + +func (m migrationTest0019) InsertData(db *sql.DB) error { + //TODO: Add insert data + return nil +} + +func (m migrationTest0019) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + //TODO: Add checks +} + +func (m migrationTest0019) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + //TODO: Add checks +} +func TestMigration0019(t *testing.T) { + runMigrationTest(t, 19, migrationTest0019{}) +} From 3e523ae7c348d5b56a04a801df41f3a774f38f9c Mon Sep 17 00:00:00 2001 From: tclemos Date: Tue, 26 Mar 2024 12:17:44 -0300 Subject: [PATCH 04/17] implement migration 19 tests and improve migration tests in general --- db/migrations/state/0013_test.go | 44 +++++---- db/migrations/state/0018_test.go | 48 +++++----- db/migrations/state/0019_test.go | 92 +++++++++++++++++-- db/migrations/state/utils_test.go | 144 ++++++++++++++++++++++++------ 4 files changed, 256 insertions(+), 72 deletions(-) diff --git a/db/migrations/state/0013_test.go b/db/migrations/state/0013_test.go index 2f597718aa..56ef9db23a 100644 --- a/db/migrations/state/0013_test.go +++ b/db/migrations/state/0013_test.go @@ -8,21 +8,21 @@ import ( "github.com/stretchr/testify/assert" ) -const ( - blockHashValue = "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1" - mainExitRootValue = "0x83fc198de31e1b2b1a8212d2430fbb7766c13d9ad305637dea3759065606475d" - rollupExitRootValue = "0xadb91a6a1fce56eaea561002bc9a993f4e65a7710bd72f4eee3067cbd73a743c" - globalExitRootValue = "0x5bf4af1a651a2a74b36e6eb208481f94c69fc959f756223dfa49608061937585" - previousBlockHashValue = "0xe865e912b504572a4d80ad018e29797e3c11f00bf9ae2549548a25779c9d7e57" - l1InfoRootValue = "0x2b9484b83c6398033241865b015fb9430eb3e159182a6075d00c924845cc393e" -) +const () // this migration changes length of the token name -type migrationTest0013 struct{} +type migrationTest0013 struct { + blockHashValue string + mainExitRootValue string + rollupExitRootValue string + globalExitRootValue string + previousBlockHashValue string + l1InfoRootValue string +} func (m migrationTest0013) insertBlock(blockNumber uint64, db *sql.DB) error { const addBlock = "INSERT INTO state.block (block_num, received_at, block_hash) VALUES ($1, $2, $3)" - if _, err := db.Exec(addBlock, blockNumber, time.Now(), blockHashValue); err != nil { + if _, err := db.Exec(addBlock, blockNumber, time.Now(), m.blockHashValue); err != nil { return err } return nil @@ -45,10 +45,10 @@ func (m migrationTest0013) InsertData(db *sql.DB) error { if err = m.insertBlock(uint64(124), db); err != nil { return err } - if err = m.insertRowInOldTable(db, []interface{}{123, time.Now(), mainExitRootValue, rollupExitRootValue, globalExitRootValue}); err != nil { + if err = m.insertRowInOldTable(db, []interface{}{123, time.Now(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue}); err != nil { return err } - if err = m.insertRowInOldTable(db, []interface{}{124, time.Now(), mainExitRootValue, rollupExitRootValue, globalExitRootValue}); err != nil { + if err = m.insertRowInOldTable(db, []interface{}{124, time.Now(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue}); err != nil { return err } @@ -113,16 +113,16 @@ func (m migrationTest0013) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) assert.NoError(t, err) err = m.insertBlock(uint64(127), db) assert.NoError(t, err) - prevBlockHash := previousBlockHashValue - l1InfoRoot := l1InfoRootValue - err = m.insertRowInMigratedTable(db, []interface{}{125, time.Now(), mainExitRootValue, rollupExitRootValue, globalExitRootValue, prevBlockHash, l1InfoRoot, 1}) + prevBlockHash := m.previousBlockHashValue + l1InfoRoot := m.l1InfoRootValue + err = m.insertRowInMigratedTable(db, []interface{}{125, time.Now(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, prevBlockHash, l1InfoRoot, 1}) assert.NoError(t, err) // insert duplicated l1_info_root - err = m.insertRowInMigratedTable(db, []interface{}{126, time.Now(), mainExitRootValue, rollupExitRootValue, globalExitRootValue, prevBlockHash, l1InfoRoot, 1}) + err = m.insertRowInMigratedTable(db, []interface{}{126, time.Now(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, prevBlockHash, l1InfoRoot, 1}) assert.Error(t, err) // insert in the old way must work - err = m.insertRowInOldTable(db, []interface{}{127, time.Now(), mainExitRootValue, rollupExitRootValue, globalExitRootValue}) + err = m.insertRowInOldTable(db, []interface{}{127, time.Now(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue}) assert.NoError(t, err) sqlSelect := `SELECT prev_block_hash, l1_info_root FROM state.exit_root WHERE l1_info_tree_index = $1` @@ -185,5 +185,13 @@ func (m migrationTest0013) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB } func TestMigration0013(t *testing.T) { - runMigrationTest(t, 13, migrationTest0013{}) + m := migrationTest0013{ + blockHashValue: "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1", + mainExitRootValue: "0x83fc198de31e1b2b1a8212d2430fbb7766c13d9ad305637dea3759065606475d", + rollupExitRootValue: "0xadb91a6a1fce56eaea561002bc9a993f4e65a7710bd72f4eee3067cbd73a743c", + globalExitRootValue: "0x5bf4af1a651a2a74b36e6eb208481f94c69fc959f756223dfa49608061937585", + previousBlockHashValue: "0xe865e912b504572a4d80ad018e29797e3c11f00bf9ae2549548a25779c9d7e57", + l1InfoRootValue: "0x2b9484b83c6398033241865b015fb9430eb3e159182a6075d00c924845cc393e", + } + runMigrationTest(t, 13, m) } diff --git a/db/migrations/state/0018_test.go b/db/migrations/state/0018_test.go index e92068a0be..382e2c2a62 100644 --- a/db/migrations/state/0018_test.go +++ b/db/migrations/state/0018_test.go @@ -7,7 +7,9 @@ import ( "github.com/stretchr/testify/assert" ) -type migrationTest0018 struct{} +type migrationTest0018 struct { + migrationBase +} func (m migrationTest0018) InsertData(db *sql.DB) error { const insertBatch1 = ` @@ -46,16 +48,7 @@ func (m migrationTest0018) InsertData(db *sql.DB) error { } func (m migrationTest0018) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { - assertTableNotExists(t, db, "state", "proof") - - assertTableExists(t, db, "state", "blob_inner") - assertTableExists(t, db, "state", "batch_proof") - assertTableExists(t, db, "state", "blob_inner_proof") - assertTableExists(t, db, "state", "blob_outer_proof") - - assertColumnExists(t, db, "state", "virtual_batch", "blob_inner_num") - assertColumnExists(t, db, "state", "virtual_batch", "prev_l1_it_root") - assertColumnExists(t, db, "state", "virtual_batch", "prev_l1_it_index") + m.AssertNewAndRemovedItemsAfterMigrationUp(t, db) // Insert blobInner 1 const insertBlobInner = `INSERT INTO state.blob_inner (blob_inner_num, data, block_num) VALUES (1, E'\\x1234', 1);` @@ -80,16 +73,7 @@ func (m migrationTest0018) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) func (m migrationTest0018) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { var result int - assertTableExists(t, db, "state", "proof") - - assertTableNotExists(t, db, "state", "blob_inner") - assertTableNotExists(t, db, "state", "batch_proof") - assertTableNotExists(t, db, "state", "blob_inner_proof") - assertTableNotExists(t, db, "state", "blob_outer_proof") - - assertColumnNotExists(t, db, "state", "virtual_batch", "blob_inner_num") - assertColumnNotExists(t, db, "state", "virtual_batch", "prev_l1_it_root") - assertColumnNotExists(t, db, "state", "virtual_batch", "prev_l1_it_index") + m.AssertNewAndRemovedItemsAfterMigrationDown(t, db) // Check column blob_inner_num doesn't exists in state.virtual_batch table const getBlobInnerNumColumn = `SELECT count(*) FROM information_schema.columns WHERE table_name='virtual_batch' and column_name='blob_inner_num'` @@ -111,5 +95,25 @@ func (m migrationTest0018) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB } func TestMigration0018(t *testing.T) { - runMigrationTest(t, 18, migrationTest0018{}) + m := migrationTest0018{ + migrationBase: migrationBase{ + removedTables: []tableMetadata{ + {"state", "proof"}, + }, + + newTables: []tableMetadata{ + {"state", "blob_inner"}, + {"state", "batch_proof"}, + {"state", "blob_inner_proof"}, + {"state", "blob_outer_proof"}, + }, + + newColumns: []columnMetadata{ + {"state", "virtual_batch", "blob_inner_num"}, + {"state", "virtual_batch", "prev_l1_it_root"}, + {"state", "virtual_batch", "prev_l1_it_index"}, + }, + }, + } + runMigrationTest(t, 18, m) } diff --git a/db/migrations/state/0019_test.go b/db/migrations/state/0019_test.go index ac4ce9b276..839b114c60 100644 --- a/db/migrations/state/0019_test.go +++ b/db/migrations/state/0019_test.go @@ -3,22 +3,104 @@ package migrations_test import ( "database/sql" "testing" + "time" + + "github.com/stretchr/testify/assert" ) -type migrationTest0019 struct{} +type migrationTest0019 struct { + migrationBase + + blockHashValue string + mainExitRootValue string + rollupExitRootValue string + globalExitRootValue string + previousBlockHashValue string + l1InfoRootValue string +} + +func (m migrationTest0019) insertBlock(blockNumber uint64, db *sql.DB) error { + const addBlock = "INSERT INTO state.block (block_num, received_at, block_hash) VALUES ($1, $2, $3)" + if _, err := db.Exec(addBlock, blockNumber, time.Now(), m.blockHashValue); err != nil { + return err + } + return nil +} + +func (m migrationTest0019) insertRowInOldTable(db *sql.DB, args ...interface{}) error { + sql := ` + INSERT INTO state.exit_root (block_num, "timestamp", mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index) + VALUES ( $1, $2, $3, $4, $5, $6, $7, $8);` + + _, err := db.Exec(sql, args...) + return err +} + +func (m migrationTest0019) insertRowInMigratedTable(db *sql.DB, args ...interface{}) error { + sql := ` + INSERT INTO state.exit_root (block_num, "timestamp", mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index, l1_info_tree_index_feijoa) + VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9);` + + _, err := db.Exec(sql, args...) + return err +} func (m migrationTest0019) InsertData(db *sql.DB) error { - //TODO: Add insert data + var err error + for i := uint64(1); i <= 6; i++ { + if err = m.insertBlock(i, db); err != nil { + return err + } + } + return nil } func (m migrationTest0019) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { - //TODO: Add checks + m.AssertNewAndRemovedItemsAfterMigrationUp(t, db) + + var nilL1InfoTreeIndex *uint = nil + err := m.insertRowInOldTable(db, 1, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, nilL1InfoTreeIndex) + assert.NoError(t, err) + + err = m.insertRowInOldTable(db, 2, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, uint(1)) + assert.NoError(t, err) + + err = m.insertRowInMigratedTable(db, 3, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, nilL1InfoTreeIndex, 1) + assert.NoError(t, err) } func (m migrationTest0019) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { - //TODO: Add checks + m.AssertNewAndRemovedItemsAfterMigrationDown(t, db) + + var nilL1InfoTreeIndex *uint = nil + err := m.insertRowInOldTable(db, 4, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, nilL1InfoTreeIndex) + assert.NoError(t, err) + + err = m.insertRowInOldTable(db, 5, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, uint(2)) + assert.NoError(t, err) + + err = m.insertRowInMigratedTable(db, 6, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, nilL1InfoTreeIndex, 2) + assert.Error(t, err) } + func TestMigration0019(t *testing.T) { - runMigrationTest(t, 19, migrationTest0019{}) + m := migrationTest0019{ + migrationBase: migrationBase{ + newIndexes: []string{ + "idx_exit_root_l1_info_tree_index_feijoa", + }, + newColumns: []columnMetadata{ + {"state", "exit_root", "l1_info_tree_index_feijoa"}, + }, + }, + + blockHashValue: "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1", + mainExitRootValue: "0x83fc198de31e1b2b1a8212d2430fbb7766c13d9ad305637dea3759065606475d", + rollupExitRootValue: "0xadb91a6a1fce56eaea561002bc9a993f4e65a7710bd72f4eee3067cbd73a743c", + globalExitRootValue: "0x5bf4af1a651a2a74b36e6eb208481f94c69fc959f756223dfa49608061937585", + previousBlockHashValue: "0xe865e912b504572a4d80ad018e29797e3c11f00bf9ae2549548a25779c9d7e57", + l1InfoRootValue: "0x2b9484b83c6398033241865b015fb9430eb3e159182a6075d00c924845cc393e", + } + runMigrationTest(t, 19, m) } diff --git a/db/migrations/state/utils_test.go b/db/migrations/state/utils_test.go index 5ad563201b..df645f85e3 100644 --- a/db/migrations/state/utils_test.go +++ b/db/migrations/state/utils_test.go @@ -2,6 +2,7 @@ package migrations_test import ( "database/sql" + "errors" "fmt" "testing" @@ -32,6 +33,28 @@ func init() { }) } +type migrationBase struct { + newIndexes []string + newTables []tableMetadata + newColumns []columnMetadata + + removedIndexes []string + removedTables []tableMetadata + removedColumns []columnMetadata +} + +func (m migrationBase) AssertNewAndRemovedItemsAfterMigrationUp(t *testing.T, db *sql.DB) { + assertTablesNotExist(t, db, m.removedTables) + assertTablesExist(t, db, m.newTables) + assertColumnsExist(t, db, m.newColumns) +} + +func (m migrationBase) AssertNewAndRemovedItemsAfterMigrationDown(t *testing.T, db *sql.DB) { + assertTablesExist(t, db, m.removedTables) + assertTablesNotExist(t, db, m.newTables) + assertColumnsNotExist(t, db, m.newColumns) +} + type migrationTester interface { // InsertData used to insert data in the affected tables of the migration that is being tested // data will be inserted with the schema as it was previous the migration that is being tested @@ -44,6 +67,14 @@ type migrationTester interface { RunAssertsAfterMigrationDown(*testing.T, *sql.DB) } +type tableMetadata struct { + schema, name string +} + +type columnMetadata struct { + schema, tableName, name string +} + var ( stateDBCfg = dbutils.NewStateConfigFromEnv() packrMigrations = map[string]*packr.Box{ @@ -118,62 +149,121 @@ func runMigrationsDown(d *sql.DB, n int, packrName string) error { return nil } -func checkColumn(t *testing.T, db *sql.DB, schema string, table string, column string, exists bool) (bool, error) { +func checkColumnExists(db *sql.DB, column columnMetadata) (bool, error) { const getColumn = `SELECT count(*) FROM information_schema.columns WHERE table_schema=$1 AND table_name=$2 AND column_name=$3` var result int - row := db.QueryRow(getColumn, schema, table, column) + row := db.QueryRow(getColumn, column.schema, column.tableName, column.name) err := row.Scan(&result) - if err != nil { + if errors.Is(err, pgx.ErrNoRows) { return false, nil + } else if err != nil { + return false, err } - if exists { - return (result == 1), nil - } else { - return (result == 0), nil - } + return (result == 1), nil } -func assertColumnExists(t *testing.T, db *sql.DB, schema string, table string, column string) { - exists, err := checkColumn(t, db, schema, table, column, true) +func assertColumnExists(t *testing.T, db *sql.DB, column columnMetadata) { + exists, err := checkColumnExists(db, column) assert.NoError(t, err) - assert.Equal(t, true, exists) + assert.True(t, exists) } -func assertColumnNotExists(t *testing.T, db *sql.DB, schema string, table string, column string) { - notExists, err := checkColumn(t, db, schema, table, column, false) +func assertColumnNotExists(t *testing.T, db *sql.DB, column columnMetadata) { + exists, err := checkColumnExists(db, column) assert.NoError(t, err) - assert.Equal(t, true, notExists) + assert.False(t, exists) +} + +func assertColumnsExist(t *testing.T, db *sql.DB, columns []columnMetadata) { + for _, column := range columns { + assertColumnExists(t, db, column) + } } -func checkTable(t *testing.T, db *sql.DB, schema string, table string, exists bool) (bool, error) { +func assertColumnsNotExist(t *testing.T, db *sql.DB, columns []columnMetadata) { + for _, column := range columns { + assertColumnNotExists(t, db, column) + } +} + +func checkTableExists(db *sql.DB, table tableMetadata) (bool, error) { const getTable = `SELECT count(*) FROM information_schema.tables WHERE table_schema=$1 AND table_name=$2` var result int - row := db.QueryRow(getTable, schema, table) + row := db.QueryRow(getTable, table.schema, table.name) err := row.Scan(&result) - if err != nil { + if errors.Is(err, pgx.ErrNoRows) { return false, nil + } else if err != nil { + return false, err } - if exists { - return (result == 1), nil - } else { - return (result == 0), nil + return (result == 1), nil +} + +func assertTableExists(t *testing.T, db *sql.DB, table tableMetadata) { + exists, err := checkTableExists(db, table) + assert.NoError(t, err) + assert.True(t, exists) +} + +func assertTableNotExists(t *testing.T, db *sql.DB, table tableMetadata) { + exists, err := checkTableExists(db, table) + assert.NoError(t, err) + assert.False(t, exists) +} + +func assertTablesExist(t *testing.T, db *sql.DB, tables []tableMetadata) { + for _, table := range tables { + assertTableExists(t, db, table) + } +} + +func assertTablesNotExist(t *testing.T, db *sql.DB, tables []tableMetadata) { + for _, table := range tables { + assertTableNotExists(t, db, table) + } +} + +func checkIndexExists(db *sql.DB, index string) (bool, error) { + const getIndex = `SELECT count(*) FROM pg_indexes WHERE indexname = $1;` + row := db.QueryRow(getIndex, index) + + var result int + err := row.Scan(&result) + if errors.Is(err, pgx.ErrNoRows) { + return false, nil + } else if err != nil { + return false, err } + + return (result == 1), nil } -func assertTableExists(t *testing.T, db *sql.DB, schema string, table string) { - exists, err := checkTable(t, db, schema, table, true) +func assertIndexExists(t *testing.T, db *sql.DB, index string) { + exists, err := checkIndexExists(db, index) assert.NoError(t, err) - assert.Equal(t, true, exists) + assert.True(t, exists) } -func assertTableNotExists(t *testing.T, db *sql.DB, schema string, table string) { - notExists, err := checkTable(t, db, schema, table, false) +func assertIndexNotExists(t *testing.T, db *sql.DB, index string) { + exists, err := checkIndexExists(db, index) assert.NoError(t, err) - assert.Equal(t, true, notExists) + assert.False(t, exists) +} + +func assertIndexesExist(t *testing.T, db *sql.DB, indexes []string) { + for _, index := range indexes { + assertIndexExists(t, db, index) + } +} + +func assertIndexesNotExist(t *testing.T, db *sql.DB, indexes []string) { + for _, index := range indexes { + assertIndexNotExists(t, db, index) + } } From 1a180aa2e93eb5a23679b923ac76a348631917e5 Mon Sep 17 00:00:00 2001 From: tclemos Date: Tue, 26 Mar 2024 15:33:30 -0300 Subject: [PATCH 05/17] fix linter issues --- db/migrations/state/0013_test.go | 2 -- db/migrations/state/utils_test.go | 8 ++++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/db/migrations/state/0013_test.go b/db/migrations/state/0013_test.go index 56ef9db23a..28bcc587ac 100644 --- a/db/migrations/state/0013_test.go +++ b/db/migrations/state/0013_test.go @@ -8,8 +8,6 @@ import ( "github.com/stretchr/testify/assert" ) -const () - // this migration changes length of the token name type migrationTest0013 struct { blockHashValue string diff --git a/db/migrations/state/utils_test.go b/db/migrations/state/utils_test.go index df645f85e3..c3b783597b 100644 --- a/db/migrations/state/utils_test.go +++ b/db/migrations/state/utils_test.go @@ -44,13 +44,21 @@ type migrationBase struct { } func (m migrationBase) AssertNewAndRemovedItemsAfterMigrationUp(t *testing.T, db *sql.DB) { + assertIndexesNotExist(t, db, m.removedIndexes) assertTablesNotExist(t, db, m.removedTables) + assertColumnsNotExist(t, db, m.removedColumns) + + assertIndexesExist(t, db, m.newIndexes) assertTablesExist(t, db, m.newTables) assertColumnsExist(t, db, m.newColumns) } func (m migrationBase) AssertNewAndRemovedItemsAfterMigrationDown(t *testing.T, db *sql.DB) { + assertIndexesExist(t, db, m.removedIndexes) assertTablesExist(t, db, m.removedTables) + assertColumnsExist(t, db, m.removedColumns) + + assertIndexesNotExist(t, db, m.newIndexes) assertTablesNotExist(t, db, m.newTables) assertColumnsNotExist(t, db, m.newColumns) } From c1177b20eb6b9f0bafbf36197e57c520c5bddf68 Mon Sep 17 00:00:00 2001 From: tclemos Date: Tue, 26 Mar 2024 17:37:22 -0300 Subject: [PATCH 06/17] normalize l1 info tree recursive names; implement state l1 info tree recursive methods --- cmd/run.go | 2 +- db/migrations/state/0019.sql | 8 +- db/migrations/state/0019_test.go | 6 +- l1infotree/tree_recursive.go | 5 +- pool/pool_test.go | 2 +- state/batchV2_test.go | 4 +- state/interfaces.go | 10 +- state/l1infotree_recursive.go | 83 ++++++++++++++++ state/l1infotree_test.go | 15 ++- state/l1infotree_v2_feijoa.go | 54 ---------- state/mocks/mock_storage.go | 88 ++++++++--------- state/pgstatestorage/forkid_external_test.go | 8 +- state/pgstatestorage/forkid_test.go | 2 +- state/pgstatestorage/l1infotree_recursive.go | 42 ++++++++ state/pgstatestorage/l1infotree_v2_feijoa.go | 42 -------- state/pgstatestorage/pgstatestorage_test.go | 18 +++- state/reset.go | 3 +- state/state.go | 14 +-- state/syncinginfo_test.go | 4 +- state/test/forkid_common/common.go | 6 +- .../processor_l1_info_tree_update_test.go | 2 +- .../mocks/state_last_block_getter.go | 98 +++++++++++++++++++ test/operations/manager.go | 6 +- tools/datastreamer/main.go | 2 +- tools/state/reprocess_cmd.go | 2 +- 25 files changed, 337 insertions(+), 189 deletions(-) create mode 100644 state/l1infotree_recursive.go delete mode 100644 state/l1infotree_v2_feijoa.go create mode 100644 state/pgstatestorage/l1infotree_recursive.go delete mode 100644 state/pgstatestorage/l1infotree_v2_feijoa.go create mode 100644 synchronizer/common/syncinterfaces/mocks/state_last_block_getter.go diff --git a/cmd/run.go b/cmd/run.go index 3d8d4d7e6d..d3a227393e 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -490,7 +490,7 @@ func newState(ctx context.Context, c *config.Config, etherman *etherman.Client, } stateDb := pgstatestorage.NewPostgresStorage(stateCfg, sqlDB) - st := state.NewState(stateCfg, stateDb, executorClient, stateTree, eventLog, nil) + st := state.NewState(stateCfg, stateDb, executorClient, stateTree, eventLog, nil, nil) // This is to force to build cache, and check that DB is ok before starting the application l1InfoRoot, err := st.GetCurrentL1InfoRoot(ctx, nil) if err != nil { diff --git a/db/migrations/state/0019.sql b/db/migrations/state/0019.sql index 4628575f9c..d2955452a5 100644 --- a/db/migrations/state/0019.sql +++ b/db/migrations/state/0019.sql @@ -2,11 +2,11 @@ -- +migrate Up ALTER TABLE state.exit_root - ADD COLUMN IF NOT EXISTS l1_info_tree_index_feijoa BIGINT DEFAULT NULL UNIQUE; -CREATE INDEX IF NOT EXISTS idx_exit_root_l1_info_tree_index_feijoa ON state.exit_root (l1_info_tree_index_feijoa); + ADD COLUMN IF NOT EXISTS l1_info_tree_recursive_index BIGINT DEFAULT NULL UNIQUE; +CREATE INDEX IF NOT EXISTS idx_exit_root_l1_info_tree_recursive_index ON state.exit_root (l1_info_tree_recursive_index); -- +migrate Down ALTER TABLE state.exit_root - DROP COLUMN IF EXISTS l1_info_tree_index_feijoa; -DROP INDEX IF EXISTS state.idx_exit_root_l1_info_tree_index_feijoa; + DROP COLUMN IF EXISTS l1_info_tree_recursive_index; +DROP INDEX IF EXISTS state.idx_exit_root_l1_info_tree_recursive_index; diff --git a/db/migrations/state/0019_test.go b/db/migrations/state/0019_test.go index 839b114c60..ddd93cd29e 100644 --- a/db/migrations/state/0019_test.go +++ b/db/migrations/state/0019_test.go @@ -38,7 +38,7 @@ func (m migrationTest0019) insertRowInOldTable(db *sql.DB, args ...interface{}) func (m migrationTest0019) insertRowInMigratedTable(db *sql.DB, args ...interface{}) error { sql := ` - INSERT INTO state.exit_root (block_num, "timestamp", mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index, l1_info_tree_index_feijoa) + INSERT INTO state.exit_root (block_num, "timestamp", mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index, l1_info_tree_recursive_index) VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9);` _, err := db.Exec(sql, args...) @@ -88,10 +88,10 @@ func TestMigration0019(t *testing.T) { m := migrationTest0019{ migrationBase: migrationBase{ newIndexes: []string{ - "idx_exit_root_l1_info_tree_index_feijoa", + "idx_exit_root_l1_info_tree_recursive_index", }, newColumns: []columnMetadata{ - {"state", "exit_root", "l1_info_tree_index_feijoa"}, + {"state", "exit_root", "l1_info_tree_recursive_index"}, }, }, diff --git a/l1infotree/tree_recursive.go b/l1infotree/tree_recursive.go index 6d0b855658..6a2562b5d8 100644 --- a/l1infotree/tree_recursive.go +++ b/l1infotree/tree_recursive.go @@ -30,14 +30,11 @@ func NewL1InfoTreeRecursive(height uint8) (*L1InfoTreeRecursive, error) { // NewL1InfoTreeRecursiveFromLeaves creates a new L1InfoTreeRecursive from leaves func NewL1InfoTreeRecursiveFromLeaves(height uint8, leaves [][32]byte) (*L1InfoTreeRecursive, error) { - historic, err := NewL1InfoTree(height, nil) + res, err := NewL1InfoTreeRecursive(height) if err != nil { return nil, err } - res := &L1InfoTreeRecursive{ - historicL1InfoTree: historic, - } for _, leaf := range leaves { _, err := res.AddLeaf(uint32(len(res.leaves)), leaf) if err != nil { diff --git a/pool/pool_test.go b/pool/pool_test.go index a68b3bdebe..2af6f2348f 100644 --- a/pool/pool_test.go +++ b/pool/pool_test.go @@ -1114,7 +1114,7 @@ func newState(sqlDB *pgxpool.Pool, eventLog *event.EventLog) *state.State { stateDBClient, _, _ := merkletree.NewMTDBServiceClient(ctx, mtDBServerConfig) stateTree := merkletree.NewStateTree(stateDBClient) - st := state.NewState(stCfg, stateDb, executorClient, stateTree, eventLog, nil) + st := state.NewState(stCfg, stateDb, executorClient, stateTree, eventLog, nil, nil) return st } diff --git a/state/batchV2_test.go b/state/batchV2_test.go index 4495d7a204..78b07fa521 100644 --- a/state/batchV2_test.go +++ b/state/batchV2_test.go @@ -44,7 +44,7 @@ func TestProcessAndStoreClosedBatchV2(t *testing.T) { ctx := context.Background() mockStorage := mocks.NewStorageMock(t) mockExecutor := mocks.NewExecutorServiceClientMock(t) - testState := state.NewState(stateCfg, mockStorage, mockExecutor, nil, nil, nil) + testState := state.NewState(stateCfg, mockStorage, mockExecutor, nil, nil, nil, nil) mockStorage.EXPECT().Begin(ctx).Return(mocks.NewDbTxMock(t), nil) dbTx, err := testState.BeginStateTransaction(ctx) require.NoError(t, err) @@ -122,7 +122,7 @@ func TestProcessAndStoreClosedBatchV2ErrorOOC(t *testing.T) { ctx := context.Background() mockStorage := mocks.NewStorageMock(t) mockExecutor := mocks.NewExecutorServiceClientMock(t) - testState := state.NewState(stateCfg, mockStorage, mockExecutor, nil, nil, nil) + testState := state.NewState(stateCfg, mockStorage, mockExecutor, nil, nil, nil, nil) mockStorage.EXPECT().Begin(ctx).Return(mocks.NewDbTxMock(t), nil) dbTx, err := testState.BeginStateTransaction(ctx) require.NoError(t, err) diff --git a/state/interfaces.go b/state/interfaces.go index e561456e67..d6556d44c1 100644 --- a/state/interfaces.go +++ b/state/interfaces.go @@ -10,11 +10,6 @@ import ( "github.com/jackc/pgx/v4" ) -type storageL1InfoTreeV2 interface { - AddL1InfoRootToExitRootV2Feijoa(ctx context.Context, exitRoot *L1InfoTreeExitRootStorageEntryV2Feijoa, dbTx pgx.Tx) error - GetAllL1InfoRootEntriesV2Feijoa(ctx context.Context, dbTx pgx.Tx) ([]L1InfoTreeExitRootStorageEntryV2Feijoa, error) - GetLatestL1InfoRootV2Feijoa(ctx context.Context, maxBlockNumber uint64, dbTx pgx.Tx) (L1InfoTreeExitRootStorageEntryV2Feijoa, error) -} type storage interface { Exec(ctx context.Context, sql string, arguments ...interface{}) (commandTag pgconn.CommandTag, err error) Query(ctx context.Context, sql string, args ...interface{}) (pgx.Rows, error) @@ -163,6 +158,7 @@ type storage interface { UpdateBatchAsChecked(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error GetNotCheckedBatches(ctx context.Context, dbTx pgx.Tx) ([]*Batch, error) GetLastL2BlockByBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*L2Block, error) - - storageL1InfoTreeV2 + AddL1InfoTreeRecursiveRootToExitRoot(ctx context.Context, exitRoot *L1InfoTreeRecursiveExitRootStorageEntry, dbTx pgx.Tx) error + GetAllL1InfoTreeRecursiveRootEntries(ctx context.Context, dbTx pgx.Tx) ([]L1InfoTreeRecursiveExitRootStorageEntry, error) + GetLatestL1InfoTreeRecursiveRoot(ctx context.Context, maxBlockNumber uint64, dbTx pgx.Tx) (L1InfoTreeRecursiveExitRootStorageEntry, error) } diff --git a/state/l1infotree_recursive.go b/state/l1infotree_recursive.go new file mode 100644 index 0000000000..f33047bd98 --- /dev/null +++ b/state/l1infotree_recursive.go @@ -0,0 +1,83 @@ +package state + +import ( + "context" + "errors" + "fmt" + + "github.com/0xPolygonHermez/zkevm-node/l1infotree" + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/ethereum/go-ethereum/common" + "github.com/jackc/pgx/v4" +) + +// L1InfoTreeRecursiveExitRootStorageEntry leaf of the L1InfoTreeRecursive +type L1InfoTreeRecursiveExitRootStorageEntry L1InfoTreeExitRootStorageEntry + +func (s *State) buildL1InfoTreeRecursiveCacheIfNeed(ctx context.Context, dbTx pgx.Tx) error { + if s.l1InfoTreeRecursive != nil { + return nil + } + log.Debugf("Building L1InfoTree cache") + allLeaves, err := s.GetAllL1InfoTreeRecursiveRootEntries(ctx, dbTx) + if err != nil { + log.Error("error getting all leaves. Error: ", err) + return fmt.Errorf("error getting all leaves. Error: %w", err) + } + + var leaves [][32]byte + for _, leaf := range allLeaves { + leaves = append(leaves, leaf.Hash()) + } + mt, err := l1infotree.NewL1InfoTreeRecursiveFromLeaves(uint8(32), leaves) //nolint:gomnd + if err != nil { + log.Error("error creating L1InfoTree. Error: ", err) + return fmt.Errorf("error creating L1InfoTree. Error: %w", err) + } + s.l1InfoTreeRecursive = mt + return nil +} + +// AddL1InfoTreeLeaf adds a new leaf to the L1InfoTree and returns the entry and error +func (s *State) AddLeafToL1InfoTreeRecursive(ctx context.Context, l1InfoTreeLeaf *L1InfoTreeLeaf, dbTx pgx.Tx) (*L1InfoTreeExitRootStorageEntry, error) { + var newIndex uint32 + gerIndex, err := s.GetLatestIndex(ctx, dbTx) + if err != nil && !errors.Is(err, ErrNotFound) { + log.Error("error getting latest L1InfoTreeRecursive index. Error: ", err) + return nil, err + } else if err == nil { + newIndex = gerIndex + 1 + } + err = s.buildL1InfoTreeCacheIfNeed(ctx, dbTx) + if err != nil { + log.Error("error building L1InfoTreeRecursive cache. Error: ", err) + return nil, err + } + log.Debug("latestIndex: ", gerIndex) + root, err := s.l1InfoTreeRecursive.AddLeaf(newIndex, l1InfoTreeLeaf.Hash()) + if err != nil { + log.Error("error add new leaf to the L1InfoTreeRecursive. Error: ", err) + return nil, err + } + entry := L1InfoTreeExitRootStorageEntry{ + L1InfoTreeLeaf: *l1InfoTreeLeaf, + L1InfoTreeRoot: root, + L1InfoTreeIndex: newIndex, + } + err = s.AddL1InfoRootToExitRoot(ctx, &entry, dbTx) + if err != nil { + log.Error("error adding L1InfoRoot to ExitRoot. Error: ", err) + return nil, err + } + return &entry, nil +} + +// GetCurrentL1InfoRoot Return current L1InfoRoot +func (s *State) GetCurrentL1InfoTreeRecursiveRoot(ctx context.Context, dbTx pgx.Tx) (common.Hash, error) { + err := s.buildL1InfoTreeRecursiveCacheIfNeed(ctx, dbTx) + if err != nil { + log.Error("error building L1InfoTree cache. Error: ", err) + return ZeroHash, err + } + return s.l1InfoTreeRecursive.GetRoot(), nil +} diff --git a/state/l1infotree_test.go b/state/l1infotree_test.go index ccba8d390e..c4bfbcfb4d 100644 --- a/state/l1infotree_test.go +++ b/state/l1infotree_test.go @@ -45,7 +45,11 @@ func TestFirstLeafOfL1InfoTreeIsIndex0(t *testing.T) { if err != nil { panic(err) } - testState := state.NewState(stateCfg, storage, nil, nil, nil, mt) + mtr, err := l1infotree.NewL1InfoTreeRecursive(32) + if err != nil { + panic(err) + } + testState := state.NewState(stateCfg, storage, nil, nil, nil, mt, mtr) dbTx, err := testState.BeginStateTransaction(ctx) defer func() { _ = dbTx.Rollback(ctx) @@ -82,7 +86,7 @@ func TestGetCurrentL1InfoRootBuildCacheIfNil(t *testing.T) { }}, } ctx := context.Background() - testState := state.NewState(stateCfg, mockStorage, nil, nil, nil, nil) + testState := state.NewState(stateCfg, mockStorage, nil, nil, nil, nil, nil) mockStorage.EXPECT().GetAllL1InfoRootEntries(ctx, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil) @@ -108,7 +112,10 @@ func TestGetCurrentL1InfoRootNoBuildCacheIfNotNil(t *testing.T) { ctx := context.Background() l1InfoTree, err := l1infotree.NewL1InfoTree(uint8(32), nil) require.NoError(t, err) - testState := state.NewState(stateCfg, mockStorage, nil, nil, nil, l1InfoTree) + + l1InfoTreeRecursive, err := l1infotree.NewL1InfoTreeRecursive(32) + require.NoError(t, err) + testState := state.NewState(stateCfg, mockStorage, nil, nil, nil, l1InfoTree, l1InfoTreeRecursive) // GetCurrentL1InfoRoot use the cache value in state.l1InfoTree l1InfoRoot, err := testState.GetCurrentL1InfoRoot(ctx, nil) @@ -131,7 +138,7 @@ func TestAddL1InfoTreeLeafIfNil(t *testing.T) { }}, } ctx := context.Background() - testState := state.NewState(stateCfg, mockStorage, nil, nil, nil, nil) + testState := state.NewState(stateCfg, mockStorage, nil, nil, nil, nil, nil) mockStorage.EXPECT().GetLatestIndex(ctx, mock.Anything).Return(uint32(0), state.ErrNotFound) mockStorage.EXPECT().AddL1InfoRootToExitRoot(ctx, mock.Anything, mock.Anything).Return(nil) diff --git a/state/l1infotree_v2_feijoa.go b/state/l1infotree_v2_feijoa.go deleted file mode 100644 index 31da4f90c9..0000000000 --- a/state/l1infotree_v2_feijoa.go +++ /dev/null @@ -1,54 +0,0 @@ -package state - -import ( - "context" - "fmt" - - "github.com/0xPolygonHermez/zkevm-node/l1infotree" - "github.com/0xPolygonHermez/zkevm-node/log" - "github.com/ethereum/go-ethereum/common" - "github.com/jackc/pgx/v4" -) - -// L1InfoTreeExitRootStorageEntryV2Feijoa leaf of the L1InfoTreeRecurisve -type L1InfoTreeExitRootStorageEntryV2Feijoa L1InfoTreeExitRootStorageEntry - -// StateL1InfoTreeV2 state for L1InfoTreeV2 Feijoa Recursive Tree -type StateL1InfoTreeV2 struct { - storageL1InfoTreeV2 storageL1InfoTreeV2 - l1InfoTreeV2 *l1infotree.L1InfoTreeRecursive -} - -func (s *StateL1InfoTreeV2) buildL1InfoTreeV2CacheIfNeed(ctx context.Context, dbTx pgx.Tx) error { - if s.l1InfoTreeV2 != nil { - return nil - } - log.Debugf("Building L1InfoTree cache") - allLeaves, err := s.storageL1InfoTreeV2.GetAllL1InfoRootEntriesV2Feijoa(ctx, dbTx) - if err != nil { - log.Error("error getting all leaves. Error: ", err) - return fmt.Errorf("error getting all leaves. Error: %w", err) - } - - var leaves [][32]byte - for _, leaf := range allLeaves { - leaves = append(leaves, leaf.Hash()) - } - mt, err := l1infotree.NewL1InfoTreeRecursiveFromLeaves(uint8(32), leaves) //nolint:gomnd - if err != nil { - log.Error("error creating L1InfoTree. Error: ", err) - return fmt.Errorf("error creating L1InfoTree. Error: %w", err) - } - s.l1InfoTreeV2 = mt - return nil -} - -// GetCurrentL1InfoRoot Return current L1InfoRoot -func (s *StateL1InfoTreeV2) GetCurrentL1InfoRoot(ctx context.Context, dbTx pgx.Tx) (common.Hash, error) { - err := s.buildL1InfoTreeV2CacheIfNeed(ctx, dbTx) - if err != nil { - log.Error("error building L1InfoTree cache. Error: ", err) - return ZeroHash, err - } - return s.l1InfoTreeV2.GetRoot(), nil -} diff --git a/state/mocks/mock_storage.go b/state/mocks/mock_storage.go index 7555009224..ccbeca5f38 100644 --- a/state/mocks/mock_storage.go +++ b/state/mocks/mock_storage.go @@ -418,16 +418,16 @@ func (_c *StorageMock_AddL1InfoRootToExitRoot_Call) RunAndReturn(run func(contex return _c } -// AddL1InfoRootToExitRootV2Feijoa provides a mock function with given fields: ctx, exitRoot, dbTx -func (_m *StorageMock) AddL1InfoRootToExitRootV2Feijoa(ctx context.Context, exitRoot *state.L1InfoTreeExitRootStorageEntryV2Feijoa, dbTx pgx.Tx) error { +// AddL1InfoTreeRecursiveRootToExitRoot provides a mock function with given fields: ctx, exitRoot, dbTx +func (_m *StorageMock) AddL1InfoTreeRecursiveRootToExitRoot(ctx context.Context, exitRoot *state.L1InfoTreeRecursiveExitRootStorageEntry, dbTx pgx.Tx) error { ret := _m.Called(ctx, exitRoot, dbTx) if len(ret) == 0 { - panic("no return value specified for AddL1InfoRootToExitRootV2Feijoa") + panic("no return value specified for AddL1InfoTreeRecursiveRootToExitRoot") } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *state.L1InfoTreeExitRootStorageEntryV2Feijoa, pgx.Tx) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, *state.L1InfoTreeRecursiveExitRootStorageEntry, pgx.Tx) error); ok { r0 = rf(ctx, exitRoot, dbTx) } else { r0 = ret.Error(0) @@ -436,32 +436,32 @@ func (_m *StorageMock) AddL1InfoRootToExitRootV2Feijoa(ctx context.Context, exit return r0 } -// StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddL1InfoRootToExitRootV2Feijoa' -type StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call struct { +// StorageMock_AddL1InfoTreeRecursiveRootToExitRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddL1InfoTreeRecursiveRootToExitRoot' +type StorageMock_AddL1InfoTreeRecursiveRootToExitRoot_Call struct { *mock.Call } -// AddL1InfoRootToExitRootV2Feijoa is a helper method to define mock.On call +// AddL1InfoTreeRecursiveRootToExitRoot is a helper method to define mock.On call // - ctx context.Context -// - exitRoot *state.L1InfoTreeExitRootStorageEntryV2Feijoa +// - exitRoot *state.L1InfoTreeRecursiveExitRootStorageEntry // - dbTx pgx.Tx -func (_e *StorageMock_Expecter) AddL1InfoRootToExitRootV2Feijoa(ctx interface{}, exitRoot interface{}, dbTx interface{}) *StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call { - return &StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call{Call: _e.mock.On("AddL1InfoRootToExitRootV2Feijoa", ctx, exitRoot, dbTx)} +func (_e *StorageMock_Expecter) AddL1InfoTreeRecursiveRootToExitRoot(ctx interface{}, exitRoot interface{}, dbTx interface{}) *StorageMock_AddL1InfoTreeRecursiveRootToExitRoot_Call { + return &StorageMock_AddL1InfoTreeRecursiveRootToExitRoot_Call{Call: _e.mock.On("AddL1InfoTreeRecursiveRootToExitRoot", ctx, exitRoot, dbTx)} } -func (_c *StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call) Run(run func(ctx context.Context, exitRoot *state.L1InfoTreeExitRootStorageEntryV2Feijoa, dbTx pgx.Tx)) *StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call { +func (_c *StorageMock_AddL1InfoTreeRecursiveRootToExitRoot_Call) Run(run func(ctx context.Context, exitRoot *state.L1InfoTreeRecursiveExitRootStorageEntry, dbTx pgx.Tx)) *StorageMock_AddL1InfoTreeRecursiveRootToExitRoot_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*state.L1InfoTreeExitRootStorageEntryV2Feijoa), args[2].(pgx.Tx)) + run(args[0].(context.Context), args[1].(*state.L1InfoTreeRecursiveExitRootStorageEntry), args[2].(pgx.Tx)) }) return _c } -func (_c *StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call) Return(_a0 error) *StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call { +func (_c *StorageMock_AddL1InfoTreeRecursiveRootToExitRoot_Call) Return(_a0 error) *StorageMock_AddL1InfoTreeRecursiveRootToExitRoot_Call { _c.Call.Return(_a0) return _c } -func (_c *StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call) RunAndReturn(run func(context.Context, *state.L1InfoTreeExitRootStorageEntryV2Feijoa, pgx.Tx) error) *StorageMock_AddL1InfoRootToExitRootV2Feijoa_Call { +func (_c *StorageMock_AddL1InfoTreeRecursiveRootToExitRoot_Call) RunAndReturn(run func(context.Context, *state.L1InfoTreeRecursiveExitRootStorageEntry, pgx.Tx) error) *StorageMock_AddL1InfoTreeRecursiveRootToExitRoot_Call { _c.Call.Return(run) return _c } @@ -1466,24 +1466,24 @@ func (_c *StorageMock_GetAllL1InfoRootEntries_Call) RunAndReturn(run func(contex return _c } -// GetAllL1InfoRootEntriesV2Feijoa provides a mock function with given fields: ctx, dbTx -func (_m *StorageMock) GetAllL1InfoRootEntriesV2Feijoa(ctx context.Context, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntryV2Feijoa, error) { +// GetAllL1InfoTreeRecursiveRootEntries provides a mock function with given fields: ctx, dbTx +func (_m *StorageMock) GetAllL1InfoTreeRecursiveRootEntries(ctx context.Context, dbTx pgx.Tx) ([]state.L1InfoTreeRecursiveExitRootStorageEntry, error) { ret := _m.Called(ctx, dbTx) if len(ret) == 0 { - panic("no return value specified for GetAllL1InfoRootEntriesV2Feijoa") + panic("no return value specified for GetAllL1InfoTreeRecursiveRootEntries") } - var r0 []state.L1InfoTreeExitRootStorageEntryV2Feijoa + var r0 []state.L1InfoTreeRecursiveExitRootStorageEntry var r1 error - if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntryV2Feijoa, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) ([]state.L1InfoTreeRecursiveExitRootStorageEntry, error)); ok { return rf(ctx, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) []state.L1InfoTreeExitRootStorageEntryV2Feijoa); ok { + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) []state.L1InfoTreeRecursiveExitRootStorageEntry); ok { r0 = rf(ctx, dbTx) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]state.L1InfoTreeExitRootStorageEntryV2Feijoa) + r0 = ret.Get(0).([]state.L1InfoTreeRecursiveExitRootStorageEntry) } } @@ -1496,31 +1496,31 @@ func (_m *StorageMock) GetAllL1InfoRootEntriesV2Feijoa(ctx context.Context, dbTx return r0, r1 } -// StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllL1InfoRootEntriesV2Feijoa' -type StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call struct { +// StorageMock_GetAllL1InfoTreeRecursiveRootEntries_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllL1InfoTreeRecursiveRootEntries' +type StorageMock_GetAllL1InfoTreeRecursiveRootEntries_Call struct { *mock.Call } -// GetAllL1InfoRootEntriesV2Feijoa is a helper method to define mock.On call +// GetAllL1InfoTreeRecursiveRootEntries is a helper method to define mock.On call // - ctx context.Context // - dbTx pgx.Tx -func (_e *StorageMock_Expecter) GetAllL1InfoRootEntriesV2Feijoa(ctx interface{}, dbTx interface{}) *StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call { - return &StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call{Call: _e.mock.On("GetAllL1InfoRootEntriesV2Feijoa", ctx, dbTx)} +func (_e *StorageMock_Expecter) GetAllL1InfoTreeRecursiveRootEntries(ctx interface{}, dbTx interface{}) *StorageMock_GetAllL1InfoTreeRecursiveRootEntries_Call { + return &StorageMock_GetAllL1InfoTreeRecursiveRootEntries_Call{Call: _e.mock.On("GetAllL1InfoTreeRecursiveRootEntries", ctx, dbTx)} } -func (_c *StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call) Run(run func(ctx context.Context, dbTx pgx.Tx)) *StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call { +func (_c *StorageMock_GetAllL1InfoTreeRecursiveRootEntries_Call) Run(run func(ctx context.Context, dbTx pgx.Tx)) *StorageMock_GetAllL1InfoTreeRecursiveRootEntries_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(pgx.Tx)) }) return _c } -func (_c *StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call) Return(_a0 []state.L1InfoTreeExitRootStorageEntryV2Feijoa, _a1 error) *StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call { +func (_c *StorageMock_GetAllL1InfoTreeRecursiveRootEntries_Call) Return(_a0 []state.L1InfoTreeRecursiveExitRootStorageEntry, _a1 error) *StorageMock_GetAllL1InfoTreeRecursiveRootEntries_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call) RunAndReturn(run func(context.Context, pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntryV2Feijoa, error)) *StorageMock_GetAllL1InfoRootEntriesV2Feijoa_Call { +func (_c *StorageMock_GetAllL1InfoTreeRecursiveRootEntries_Call) RunAndReturn(run func(context.Context, pgx.Tx) ([]state.L1InfoTreeRecursiveExitRootStorageEntry, error)) *StorageMock_GetAllL1InfoTreeRecursiveRootEntries_Call { _c.Call.Return(run) return _c } @@ -5016,23 +5016,23 @@ func (_c *StorageMock_GetLatestL1InfoRoot_Call) RunAndReturn(run func(context.Co return _c } -// GetLatestL1InfoRootV2Feijoa provides a mock function with given fields: ctx, maxBlockNumber, dbTx -func (_m *StorageMock) GetLatestL1InfoRootV2Feijoa(ctx context.Context, maxBlockNumber uint64, dbTx pgx.Tx) (state.L1InfoTreeExitRootStorageEntryV2Feijoa, error) { +// GetLatestL1InfoTreeRecursiveRoot provides a mock function with given fields: ctx, maxBlockNumber, dbTx +func (_m *StorageMock) GetLatestL1InfoTreeRecursiveRoot(ctx context.Context, maxBlockNumber uint64, dbTx pgx.Tx) (state.L1InfoTreeRecursiveExitRootStorageEntry, error) { ret := _m.Called(ctx, maxBlockNumber, dbTx) if len(ret) == 0 { - panic("no return value specified for GetLatestL1InfoRootV2Feijoa") + panic("no return value specified for GetLatestL1InfoTreeRecursiveRoot") } - var r0 state.L1InfoTreeExitRootStorageEntryV2Feijoa + var r0 state.L1InfoTreeRecursiveExitRootStorageEntry var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (state.L1InfoTreeExitRootStorageEntryV2Feijoa, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (state.L1InfoTreeRecursiveExitRootStorageEntry, error)); ok { return rf(ctx, maxBlockNumber, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) state.L1InfoTreeExitRootStorageEntryV2Feijoa); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) state.L1InfoTreeRecursiveExitRootStorageEntry); ok { r0 = rf(ctx, maxBlockNumber, dbTx) } else { - r0 = ret.Get(0).(state.L1InfoTreeExitRootStorageEntryV2Feijoa) + r0 = ret.Get(0).(state.L1InfoTreeRecursiveExitRootStorageEntry) } if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { @@ -5044,32 +5044,32 @@ func (_m *StorageMock) GetLatestL1InfoRootV2Feijoa(ctx context.Context, maxBlock return r0, r1 } -// StorageMock_GetLatestL1InfoRootV2Feijoa_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestL1InfoRootV2Feijoa' -type StorageMock_GetLatestL1InfoRootV2Feijoa_Call struct { +// StorageMock_GetLatestL1InfoTreeRecursiveRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestL1InfoTreeRecursiveRoot' +type StorageMock_GetLatestL1InfoTreeRecursiveRoot_Call struct { *mock.Call } -// GetLatestL1InfoRootV2Feijoa is a helper method to define mock.On call +// GetLatestL1InfoTreeRecursiveRoot is a helper method to define mock.On call // - ctx context.Context // - maxBlockNumber uint64 // - dbTx pgx.Tx -func (_e *StorageMock_Expecter) GetLatestL1InfoRootV2Feijoa(ctx interface{}, maxBlockNumber interface{}, dbTx interface{}) *StorageMock_GetLatestL1InfoRootV2Feijoa_Call { - return &StorageMock_GetLatestL1InfoRootV2Feijoa_Call{Call: _e.mock.On("GetLatestL1InfoRootV2Feijoa", ctx, maxBlockNumber, dbTx)} +func (_e *StorageMock_Expecter) GetLatestL1InfoTreeRecursiveRoot(ctx interface{}, maxBlockNumber interface{}, dbTx interface{}) *StorageMock_GetLatestL1InfoTreeRecursiveRoot_Call { + return &StorageMock_GetLatestL1InfoTreeRecursiveRoot_Call{Call: _e.mock.On("GetLatestL1InfoTreeRecursiveRoot", ctx, maxBlockNumber, dbTx)} } -func (_c *StorageMock_GetLatestL1InfoRootV2Feijoa_Call) Run(run func(ctx context.Context, maxBlockNumber uint64, dbTx pgx.Tx)) *StorageMock_GetLatestL1InfoRootV2Feijoa_Call { +func (_c *StorageMock_GetLatestL1InfoTreeRecursiveRoot_Call) Run(run func(ctx context.Context, maxBlockNumber uint64, dbTx pgx.Tx)) *StorageMock_GetLatestL1InfoTreeRecursiveRoot_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(uint64), args[2].(pgx.Tx)) }) return _c } -func (_c *StorageMock_GetLatestL1InfoRootV2Feijoa_Call) Return(_a0 state.L1InfoTreeExitRootStorageEntryV2Feijoa, _a1 error) *StorageMock_GetLatestL1InfoRootV2Feijoa_Call { +func (_c *StorageMock_GetLatestL1InfoTreeRecursiveRoot_Call) Return(_a0 state.L1InfoTreeRecursiveExitRootStorageEntry, _a1 error) *StorageMock_GetLatestL1InfoTreeRecursiveRoot_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *StorageMock_GetLatestL1InfoRootV2Feijoa_Call) RunAndReturn(run func(context.Context, uint64, pgx.Tx) (state.L1InfoTreeExitRootStorageEntryV2Feijoa, error)) *StorageMock_GetLatestL1InfoRootV2Feijoa_Call { +func (_c *StorageMock_GetLatestL1InfoTreeRecursiveRoot_Call) RunAndReturn(run func(context.Context, uint64, pgx.Tx) (state.L1InfoTreeRecursiveExitRootStorageEntry, error)) *StorageMock_GetLatestL1InfoTreeRecursiveRoot_Call { _c.Call.Return(run) return _c } diff --git a/state/pgstatestorage/forkid_external_test.go b/state/pgstatestorage/forkid_external_test.go index 705d9c81da..2d562a2fd4 100644 --- a/state/pgstatestorage/forkid_external_test.go +++ b/state/pgstatestorage/forkid_external_test.go @@ -17,7 +17,7 @@ func TestAddForkIDInterval(t *testing.T) { panic(err) } pgStateStorage = pgstatestorage.NewPostgresStorage(state.Config{}, stateDb) - testState = state.NewState(stateCfg, pgStateStorage, executorClient, stateTree, nil, nil) + testState = state.NewState(stateCfg, pgStateStorage, executorClient, stateTree, nil, nil, nil) for i := 1; i <= 6; i++ { err = testState.AddForkID(ctx, state.ForkIDInterval{ForkId: uint64(i), BlockNumber: uint64(i * 100), FromBatchNumber: uint64(i * 10), ToBatchNumber: uint64(i*10) + 9}, nil) @@ -76,13 +76,13 @@ func TestGetForkID(t *testing.T) { panic(err) } pgStateStorage = pgstatestorage.NewPostgresStorage(stateCfg, stateDb) - testState = state.NewState(stateCfg, pgStateStorage, executorClient, stateTree, nil, nil) - st := state.NewState(stateCfg, pgstatestorage.NewPostgresStorage(stateCfg, stateDb), executorClient, stateTree, nil, nil) + testState = state.NewState(stateCfg, pgStateStorage, executorClient, stateTree, nil, nil, nil) + st := state.NewState(stateCfg, pgstatestorage.NewPostgresStorage(stateCfg, stateDb), executorClient, stateTree, nil, nil, nil) avoidMemoryStateCfg := stateCfg avoidMemoryStateCfg.AvoidForkIDInMemory = true pgStateStorageAvoidMemory := pgstatestorage.NewPostgresStorage(avoidMemoryStateCfg, stateDb) - stAvoidMemory := state.NewState(avoidMemoryStateCfg, pgStateStorageAvoidMemory, executorClient, stateTree, nil, nil) + stAvoidMemory := state.NewState(avoidMemoryStateCfg, pgStateStorageAvoidMemory, executorClient, stateTree, nil, nil, nil) // persist forkID intervals forkIdIntervals := []state.ForkIDInterval{} diff --git a/state/pgstatestorage/forkid_test.go b/state/pgstatestorage/forkid_test.go index ff698be11b..5581706111 100644 --- a/state/pgstatestorage/forkid_test.go +++ b/state/pgstatestorage/forkid_test.go @@ -93,7 +93,7 @@ func TestGetForkIDByBlockNumber(t *testing.T) { } storage := NewPostgresStorage(cfg, nil) // Create a new State instance with test data - state := state.NewState(cfg, storage, nil, nil, nil, nil) + state := state.NewState(cfg, storage, nil, nil, nil, nil, nil) // Call the function being tested actual := state.GetForkIDByBlockNumber(tc.blockNumber) diff --git a/state/pgstatestorage/l1infotree_recursive.go b/state/pgstatestorage/l1infotree_recursive.go new file mode 100644 index 0000000000..ee37609d6b --- /dev/null +++ b/state/pgstatestorage/l1infotree_recursive.go @@ -0,0 +1,42 @@ +package pgstatestorage + +import ( + "context" + + "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/jackc/pgx/v4" +) + +const ( + l1InfoTreeRecursiveIndexFieldName = "l1_info_tree_recursive_index" +) + +// AddL1InfoRootToExitRoot adds a new entry in ExitRoot and returns index of L1InfoTree and error +func (p *PostgresStorage) AddL1InfoTreeRecursiveRootToExitRoot(ctx context.Context, exitRoot *state.L1InfoTreeRecursiveExitRootStorageEntry, dbTx pgx.Tx) error { + exitRootOld := state.L1InfoTreeExitRootStorageEntry(*exitRoot) + return p.addL1InfoRootToExitRootVx(ctx, &exitRootOld, dbTx, l1InfoTreeRecursiveIndexFieldName) +} + +func (p *PostgresStorage) GetAllL1InfoTreeRecursiveRootEntries(ctx context.Context, dbTx pgx.Tx) ([]state.L1InfoTreeRecursiveExitRootStorageEntry, error) { + res, err := p.GetAllL1InfoRootEntriesVx(ctx, dbTx, l1InfoTreeRecursiveIndexFieldName) + if err != nil { + return nil, err + } + var entries []state.L1InfoTreeRecursiveExitRootStorageEntry + for _, entry := range res { + entries = append(entries, state.L1InfoTreeRecursiveExitRootStorageEntry(entry)) + } + return entries, nil +} + +func (p *PostgresStorage) GetLatestL1InfoTreeRecursiveRoot(ctx context.Context, maxBlockNumber uint64, dbTx pgx.Tx) (state.L1InfoTreeRecursiveExitRootStorageEntry, error) { + res, err := p.GetLatestL1InfoRootVx(ctx, maxBlockNumber, dbTx, l1InfoTreeRecursiveIndexFieldName) + if err != nil { + return state.L1InfoTreeRecursiveExitRootStorageEntry{}, err + } + return state.L1InfoTreeRecursiveExitRootStorageEntry(res), nil +} + +func (p *PostgresStorage) GetLatestL1InfoTreeRecursiveIndex(ctx context.Context, dbTx pgx.Tx) (uint32, error) { + return p.GetLatestIndexVx(ctx, dbTx, l1InfoTreeRecursiveIndexFieldName) +} diff --git a/state/pgstatestorage/l1infotree_v2_feijoa.go b/state/pgstatestorage/l1infotree_v2_feijoa.go deleted file mode 100644 index 8851401e44..0000000000 --- a/state/pgstatestorage/l1infotree_v2_feijoa.go +++ /dev/null @@ -1,42 +0,0 @@ -package pgstatestorage - -import ( - "context" - - "github.com/0xPolygonHermez/zkevm-node/state" - "github.com/jackc/pgx/v4" -) - -const ( - l1InfoTreeIndexFieldNameV2Feijoa = "l1_info_tree_index_feijoa" -) - -// AddL1InfoRootToExitRoot adds a new entry in ExitRoot and returns index of L1InfoTree and error -func (p *PostgresStorage) AddL1InfoRootToExitRootV2Feijoa(ctx context.Context, exitRoot *state.L1InfoTreeExitRootStorageEntryV2Feijoa, dbTx pgx.Tx) error { - exitRootOld := state.L1InfoTreeExitRootStorageEntry(*exitRoot) - return p.addL1InfoRootToExitRootVx(ctx, &exitRootOld, dbTx, l1InfoTreeIndexFieldNameV2Feijoa) -} - -func (p *PostgresStorage) GetAllL1InfoRootEntriesV2Feijoa(ctx context.Context, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntryV2Feijoa, error) { - res, err := p.GetAllL1InfoRootEntriesVx(ctx, dbTx, l1InfoTreeIndexFieldNameV2Feijoa) - if err != nil { - return nil, err - } - var entriesV2Feijoa []state.L1InfoTreeExitRootStorageEntryV2Feijoa - for _, entry := range res { - entriesV2Feijoa = append(entriesV2Feijoa, state.L1InfoTreeExitRootStorageEntryV2Feijoa(entry)) - } - return entriesV2Feijoa, nil -} - -func (p *PostgresStorage) GetLatestL1InfoRootV2Feijoa(ctx context.Context, maxBlockNumber uint64, dbTx pgx.Tx) (state.L1InfoTreeExitRootStorageEntryV2Feijoa, error) { - res, err := p.GetLatestL1InfoRootVx(ctx, maxBlockNumber, dbTx, l1InfoTreeIndexFieldNameV2Feijoa) - if err != nil { - return state.L1InfoTreeExitRootStorageEntryV2Feijoa{}, err - } - return state.L1InfoTreeExitRootStorageEntryV2Feijoa(res), nil -} - -func (p *PostgresStorage) GetLatestIndexV2Feijoa(ctx context.Context, dbTx pgx.Tx) (uint32, error) { - return p.GetLatestIndexVx(ctx, dbTx, l1InfoTreeIndexFieldNameV2Feijoa) -} diff --git a/state/pgstatestorage/pgstatestorage_test.go b/state/pgstatestorage/pgstatestorage_test.go index 8b3c18a313..b8329d4f42 100644 --- a/state/pgstatestorage/pgstatestorage_test.go +++ b/state/pgstatestorage/pgstatestorage_test.go @@ -110,7 +110,11 @@ func TestMain(m *testing.M) { if err != nil { panic(err) } - testState = state.NewState(stateCfg, pgstatestorage.NewPostgresStorage(stateCfg, stateDb), executorClient, stateTree, eventLog, mt) + mtr, err := l1infotree.NewL1InfoTreeRecursive(32) + if err != nil { + panic(err) + } + testState = state.NewState(stateCfg, pgstatestorage.NewPostgresStorage(stateCfg, stateDb), executorClient, stateTree, eventLog, mt, mtr) result := m.Run() @@ -881,7 +885,11 @@ func TestGetLogs(t *testing.T) { if err != nil { panic(err) } - testState = state.NewState(stateCfg, pgstatestorage.NewPostgresStorage(cfg, stateDb), executorClient, stateTree, nil, mt) + mtr, err := l1infotree.NewL1InfoTreeRecursive(32) + if err != nil { + panic(err) + } + testState = state.NewState(stateCfg, pgstatestorage.NewPostgresStorage(cfg, stateDb), executorClient, stateTree, nil, mt, mtr) dbTx, err := testState.BeginStateTransaction(ctx) require.NoError(t, err) @@ -1017,7 +1025,11 @@ func TestGetNativeBlockHashesInRange(t *testing.T) { if err != nil { panic(err) } - testState = state.NewState(stateCfg, pgstatestorage.NewPostgresStorage(cfg, stateDb), executorClient, stateTree, nil, mt) + mtr, err := l1infotree.NewL1InfoTreeRecursive(32) + if err != nil { + panic(err) + } + testState = state.NewState(stateCfg, pgstatestorage.NewPostgresStorage(cfg, stateDb), executorClient, stateTree, nil, mt, mtr) dbTx, err := testState.BeginStateTransaction(ctx) require.NoError(t, err) diff --git a/state/reset.go b/state/reset.go index 62571250e0..e54926e051 100644 --- a/state/reset.go +++ b/state/reset.go @@ -15,10 +15,11 @@ func (s *State) Reset(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) erro err := s.ResetToL1BlockNumber(ctx, blockNumber, dbTx) if err == nil { // Discard L1InfoTree cache - // We can't rebuild cache, because we are inside a transaction, so we dont known + // We can't rebuild cache, because we are inside a transaction, so we don't known // is going to be a commit or a rollback. So is going to be rebuild on the next // request that needs it. s.l1InfoTree = nil + s.l1InfoTreeRecursive = nil } return err } diff --git a/state/state.go b/state/state.go index 06e2381b7d..a1bd7e5019 100644 --- a/state/state.go +++ b/state/state.go @@ -32,18 +32,18 @@ var ( type State struct { cfg Config storage - executorClient executor.ExecutorServiceClient - tree *merkletree.StateTree - eventLog *event.EventLog - l1InfoTree *l1infotree.L1InfoTree + executorClient executor.ExecutorServiceClient + tree *merkletree.StateTree + eventLog *event.EventLog + l1InfoTree *l1infotree.L1InfoTree + l1InfoTreeRecursive *l1infotree.L1InfoTreeRecursive newL2BlockEvents chan NewL2BlockEvent newL2BlockEventHandlers []NewL2BlockEventHandler - StateL1InfoTreeV2 } // NewState creates a new State -func NewState(cfg Config, storage storage, executorClient executor.ExecutorServiceClient, stateTree *merkletree.StateTree, eventLog *event.EventLog, mt *l1infotree.L1InfoTree) *State { +func NewState(cfg Config, storage storage, executorClient executor.ExecutorServiceClient, stateTree *merkletree.StateTree, eventLog *event.EventLog, mt *l1infotree.L1InfoTree, mtr *l1infotree.L1InfoTreeRecursive) *State { var once sync.Once once.Do(func() { metrics.Register() @@ -58,7 +58,7 @@ func NewState(cfg Config, storage storage, executorClient executor.ExecutorServi newL2BlockEvents: make(chan NewL2BlockEvent, newL2BlockEventBufferSize), newL2BlockEventHandlers: []NewL2BlockEventHandler{}, l1InfoTree: mt, - StateL1InfoTreeV2: StateL1InfoTreeV2{storageL1InfoTreeV2: storage}, + l1InfoTreeRecursive: mtr, } return state diff --git a/state/syncinginfo_test.go b/state/syncinginfo_test.go index 8a09e72dcf..b15b511ba7 100644 --- a/state/syncinginfo_test.go +++ b/state/syncinginfo_test.go @@ -28,7 +28,7 @@ func TestGetSyncingInfoErrors(t *testing.T) { ctx := context.Background() mockStorage := mocks.NewStorageMock(t) mockExecutor := mocks.NewExecutorServiceClientMock(t) - testState := state.NewState(stateCfg, mockStorage, mockExecutor, nil, nil, nil) + testState := state.NewState(stateCfg, mockStorage, mockExecutor, nil, nil, nil, nil) mockStorage.EXPECT().Begin(ctx).Return(mocks.NewDbTxMock(t), nil) dbTx, err := testState.BeginStateTransaction(ctx) require.NoError(t, err) @@ -74,7 +74,7 @@ func TestGetSyncingInfoOk(t *testing.T) { ctx := context.Background() mockStorage := mocks.NewStorageMock(t) mockExecutor := mocks.NewExecutorServiceClientMock(t) - testState := state.NewState(stateCfg, mockStorage, mockExecutor, nil, nil, nil) + testState := state.NewState(stateCfg, mockStorage, mockExecutor, nil, nil, nil, nil) mockStorage.EXPECT().Begin(ctx).Return(mocks.NewDbTxMock(t), nil) dbTx, err := testState.BeginStateTransaction(ctx) require.NoError(t, err) diff --git a/state/test/forkid_common/common.go b/state/test/forkid_common/common.go index 478b7e0222..b5ce1da6b1 100644 --- a/state/test/forkid_common/common.go +++ b/state/test/forkid_common/common.go @@ -76,7 +76,11 @@ func InitTestState(stateCfg state.Config) *state.State { if err != nil { panic(err) } - return state.NewState(stateCfg, pgstatestorage.NewPostgresStorage(stateCfg, stateDb), ExecutorClient, stateTree, eventLog, mt) + mtr, err := l1infotree.NewL1InfoTreeRecursive(32) + if err != nil { + panic(err) + } + return state.NewState(stateCfg, pgstatestorage.NewPostgresStorage(stateCfg, stateDb), ExecutorClient, stateTree, eventLog, mt, mtr) } func InitOrResetDB(cfg db.Config) { diff --git a/synchronizer/actions/etrog/processor_l1_info_tree_update_test.go b/synchronizer/actions/etrog/processor_l1_info_tree_update_test.go index 30da5937a7..9633c6f6e4 100644 --- a/synchronizer/actions/etrog/processor_l1_info_tree_update_test.go +++ b/synchronizer/actions/etrog/processor_l1_info_tree_update_test.go @@ -45,7 +45,7 @@ func TestProcessorL1InfoTreeUpdate_Process(t *testing.T) { if err != nil { panic(err) } - testState := state.NewState(stateCfg, pgstatestorage.NewPostgresStorage(stateCfg, stateDb), nil, nil, nil, mt) + testState := state.NewState(stateCfg, pgstatestorage.NewPostgresStorage(stateCfg, stateDb), nil, nil, nil, mt, nil) sut := NewProcessorL1InfoTreeUpdate(testState) l1infotree := etherman.GlobalExitRoot{ diff --git a/synchronizer/common/syncinterfaces/mocks/state_last_block_getter.go b/synchronizer/common/syncinterfaces/mocks/state_last_block_getter.go new file mode 100644 index 0000000000..204f0a883d --- /dev/null +++ b/synchronizer/common/syncinterfaces/mocks/state_last_block_getter.go @@ -0,0 +1,98 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock_syncinterfaces + +import ( + context "context" + + pgx "github.com/jackc/pgx/v4" + mock "github.com/stretchr/testify/mock" + + state "github.com/0xPolygonHermez/zkevm-node/state" +) + +// StateLastBlockGetter is an autogenerated mock type for the StateLastBlockGetter type +type StateLastBlockGetter struct { + mock.Mock +} + +type StateLastBlockGetter_Expecter struct { + mock *mock.Mock +} + +func (_m *StateLastBlockGetter) EXPECT() *StateLastBlockGetter_Expecter { + return &StateLastBlockGetter_Expecter{mock: &_m.Mock} +} + +// GetLastBlock provides a mock function with given fields: ctx, dbTx +func (_m *StateLastBlockGetter) GetLastBlock(ctx context.Context, dbTx pgx.Tx) (*state.Block, error) { + ret := _m.Called(ctx, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetLastBlock") + } + + var r0 *state.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) (*state.Block, error)); ok { + return rf(ctx, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) *state.Block); ok { + r0 = rf(ctx, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, pgx.Tx) error); ok { + r1 = rf(ctx, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StateLastBlockGetter_GetLastBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastBlock' +type StateLastBlockGetter_GetLastBlock_Call struct { + *mock.Call +} + +// GetLastBlock is a helper method to define mock.On call +// - ctx context.Context +// - dbTx pgx.Tx +func (_e *StateLastBlockGetter_Expecter) GetLastBlock(ctx interface{}, dbTx interface{}) *StateLastBlockGetter_GetLastBlock_Call { + return &StateLastBlockGetter_GetLastBlock_Call{Call: _e.mock.On("GetLastBlock", ctx, dbTx)} +} + +func (_c *StateLastBlockGetter_GetLastBlock_Call) Run(run func(ctx context.Context, dbTx pgx.Tx)) *StateLastBlockGetter_GetLastBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(pgx.Tx)) + }) + return _c +} + +func (_c *StateLastBlockGetter_GetLastBlock_Call) Return(_a0 *state.Block, _a1 error) *StateLastBlockGetter_GetLastBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StateLastBlockGetter_GetLastBlock_Call) RunAndReturn(run func(context.Context, pgx.Tx) (*state.Block, error)) *StateLastBlockGetter_GetLastBlock_Call { + _c.Call.Return(run) + return _c +} + +// NewStateLastBlockGetter creates a new instance of StateLastBlockGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStateLastBlockGetter(t interface { + mock.TestingT + Cleanup(func()) +}) *StateLastBlockGetter { + mock := &StateLastBlockGetter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/test/operations/manager.go b/test/operations/manager.go index 5c2ea3b9d0..18fbe16610 100644 --- a/test/operations/manager.go +++ b/test/operations/manager.go @@ -503,7 +503,11 @@ func initState(cfg state.Config) (*state.State, error) { if err != nil { panic(err) } - st := state.NewState(stateCfg, stateDb, executorClient, stateTree, eventLog, mt) + mtr, err := l1infotree.NewL1InfoTreeRecursive(32) + if err != nil { + panic(err) + } + st := state.NewState(stateCfg, stateDb, executorClient, stateTree, eventLog, mt, mtr) return st, nil } diff --git a/tools/datastreamer/main.go b/tools/datastreamer/main.go index 5212164da3..70b18d6ce7 100644 --- a/tools/datastreamer/main.go +++ b/tools/datastreamer/main.go @@ -207,7 +207,7 @@ func generate(cliCtx *cli.Context) error { stateTree := merkletree.NewStateTree(mtDBServiceClient) log.Debug("Connected to the merkle tree") - stateDB := state.NewState(state.Config{}, stateDBStorage, nil, stateTree, nil, nil) + stateDB := state.NewState(state.Config{}, stateDBStorage, nil, stateTree, nil, nil, nil) // Calculate intermediate state roots var imStateRoots map[uint64][]byte diff --git a/tools/state/reprocess_cmd.go b/tools/state/reprocess_cmd.go index 129b9ecdd8..2b145ed42b 100644 --- a/tools/state/reprocess_cmd.go +++ b/tools/state/reprocess_cmd.go @@ -164,7 +164,7 @@ func newState(ctx context.Context, c *config.Config, l2ChainID uint64, forkIDInt stateTree = merkletree.NewStateTree(stateDBClient) } - st := state.NewState(stateCfg, stateDb, executorClient, stateTree, eventLog, nil) + st := state.NewState(stateCfg, stateDb, executorClient, stateTree, eventLog, nil, nil) return st } From 27c438a07b830b563d47131bf0f50251865dab03 Mon Sep 17 00:00:00 2001 From: tclemos Date: Wed, 27 Mar 2024 20:57:09 -0300 Subject: [PATCH 07/17] fix l1InfoTreeRecursive vectors and tests --- l1infotree/tree_recursive_test.go | 31 +- .../l1-info-tree-recursive/input.json | 62 ++- .../smt-full-output.json | 407 ++++++++++++++---- 3 files changed, 380 insertions(+), 120 deletions(-) diff --git a/l1infotree/tree_recursive_test.go b/l1infotree/tree_recursive_test.go index f3b1e26463..ca141f1b18 100644 --- a/l1infotree/tree_recursive_test.go +++ b/l1infotree/tree_recursive_test.go @@ -3,11 +3,13 @@ package l1infotree_test import ( "encoding/json" "os" + "strconv" "testing" "github.com/0xPolygonHermez/zkevm-node/l1infotree" "github.com/0xPolygonHermez/zkevm-node/log" "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -22,9 +24,10 @@ const ( type vectorTestData struct { GlobalExitRoot common.Hash `json:"globalExitRoot"` BlockHash common.Hash `json:"blockHash"` - Timestamp uint64 `json:"timestamp"` - SmtProofPreviousIndex []common.Hash `json:"smtProofPreviousIndex"` + MinTimestamp string `json:"minTimestamp"` + SmtProof []common.Hash `json:"smtProof"` Index uint32 `json:"index"` + PreviousIndex uint32 `json:"previousIndex"` PreviousL1InfoTreeRoot common.Hash `json:"previousL1InfoTreeRoot"` L1DataHash common.Hash `json:"l1DataHash"` L1InfoTreeRoot common.Hash `json:"l1InfoTreeRoot"` @@ -42,16 +45,18 @@ func readData(t *testing.T) []vectorTestData { func TestBuildTreeVectorData(t *testing.T) { data := readData(t) - sut, err := l1infotree.NewL1InfoTreeRecursive(L1InfoRootRecursiveHeight) + mtr, err := l1infotree.NewL1InfoTreeRecursive(L1InfoRootRecursiveHeight) require.NoError(t, err) for _, testVector := range data { - // Add leaf - leafData := l1infotree.HashLeafData(testVector.GlobalExitRoot, testVector.BlockHash, testVector.Timestamp) + minTimestamp, err := strconv.ParseUint(testVector.MinTimestamp, 10, 0) + require.NoError(t, err) + leafData := l1infotree.HashLeafData(testVector.GlobalExitRoot, testVector.BlockHash, minTimestamp) leafDataHash := common.BytesToHash(leafData[:]) - root, err := sut.AddLeaf(testVector.Index-1, leafData) + assert.Equal(t, testVector.L1DataHash.String(), leafDataHash.String(), "leafData do not match leaf", testVector.Index) + + root, err := mtr.AddLeaf(testVector.Index-1, leafData) require.NoError(t, err) - require.Equal(t, testVector.L1InfoTreeRoot.String(), root.String(), "Roots do not match leaf", testVector.Index) - require.Equal(t, testVector.L1DataHash.String(), leafDataHash.String(), "leafData do not match leaf", testVector.Index) + assert.Equal(t, testVector.L1InfoTreeRoot.String(), root.String(), "Roots do not match leaf", testVector.Index) } } @@ -69,16 +74,18 @@ func TestProofsTreeVectorData(t *testing.T) { require.NoError(t, err) for _, testVector := range data { // Add leaf - leafData := l1infotree.HashLeafData(testVector.GlobalExitRoot, testVector.BlockHash, testVector.Timestamp) + minTimestamp, err := strconv.ParseUint(testVector.MinTimestamp, 10, 0) + require.NoError(t, err) + leafData := l1infotree.HashLeafData(testVector.GlobalExitRoot, testVector.BlockHash, minTimestamp) - _, err := sut.AddLeaf(testVector.Index-1, leafData) + _, err = sut.AddLeaf(testVector.Index-1, leafData) require.NoError(t, err) mp, _, err := sut.ComputeMerkleProof(testVector.Index) require.NoError(t, err) for i, v := range mp { c := common.Hash(v) - if c.String() != testVector.SmtProofPreviousIndex[i].String() { - log.Info("MerkleProof: index ", testVector.Index, " mk:", i, " v:", c.String(), " expected:", testVector.SmtProofPreviousIndex[i].String()) + if c.String() != testVector.SmtProof[i].String() { + log.Info("MerkleProof: index ", testVector.Index, " mk:", i, " v:", c.String(), " expected:", testVector.SmtProof[i].String()) } } } diff --git a/test/vectors/src/merkle-tree/l1-info-tree-recursive/input.json b/test/vectors/src/merkle-tree/l1-info-tree-recursive/input.json index eddd444dd9..f7b1936d44 100644 --- a/test/vectors/src/merkle-tree/l1-info-tree-recursive/input.json +++ b/test/vectors/src/merkle-tree/l1-info-tree-recursive/input.json @@ -1,17 +1,45 @@ -[ - { - "globalExitRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "0" - }, - { - "globalExitRoot": "0x16994edfddddb9480667b64174fc00d3b6da7290d37b8db3a16571b4ddf0789f", - "blockHash": "0x24a5871d68723340d9eadc674aa8ad75f3e33b61d5a9db7db92af856a19270bb", - "timestamp": "42" - }, - { - "globalExitRoot": "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", - "blockHash": "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", - "timestamp": "42" - } -] \ No newline at end of file +{ + "leafs": [ + { + "globalExitRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "minTimestamp": "0" + }, + { + "globalExitRoot": "0x16994edfddddb9480667b64174fc00d3b6da7290d37b8db3a16571b4ddf0789f", + "blockHash": "0x24a5871d68723340d9eadc674aa8ad75f3e33b61d5a9db7db92af856a19270bb", + "minTimestamp": "42" + }, + { + "globalExitRoot": "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "blockHash": "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "minTimestamp": "3" + }, + { + "globalExitRoot": "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", + "blockHash": "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", + "minTimestamp": "56" + }, + { + "globalExitRoot": "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "blockHash": "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "minTimestamp": "4" + }, + { + "globalExitRoot": "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", + "blockHash": "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "minTimestamp": "11" + }, + { + "globalExitRoot": "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", + "blockHash": "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", + "minTimestamp": "2" + }, + { + "globalExitRoot": "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", + "blockHash": "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", + "minTimestamp": "100" + } + ], + "l1InfoTreeIndexTransition": [2,5,8] +} \ No newline at end of file diff --git a/test/vectors/src/merkle-tree/l1-info-tree-recursive/smt-full-output.json b/test/vectors/src/merkle-tree/l1-info-tree-recursive/smt-full-output.json index e81cb2bc30..e9b8c8ffce 100644 --- a/test/vectors/src/merkle-tree/l1-info-tree-recursive/smt-full-output.json +++ b/test/vectors/src/merkle-tree/l1-info-tree-recursive/smt-full-output.json @@ -1,92 +1,317 @@ [ - { - "globalExitRoot": "0x16994edfddddb9480667b64174fc00d3b6da7290d37b8db3a16571b4ddf0789f", - "blockHash": "0x24a5871d68723340d9eadc674aa8ad75f3e33b61d5a9db7db92af856a19270bb", - "timestamp": 42, - "smtProofPreviousIndex": [ - "0xcc4105312818e9b7f692c9c807ea09699f4f290e5e31671a8e0c2c937f1c43f0", - "0xece5fbe7739fd48f4931ce884ee9cf5f373d0a6c63b80ce836bc0ae674e78540", - "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", - "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", - "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", - "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", - "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", - "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", - "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", - "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", - "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", - "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", - "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", - "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", - "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", - "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", - "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", - "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", - "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", - "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", - "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", - "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", - "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", - "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", - "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", - "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", - "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", - "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", - "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", - "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", - "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", - "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" - ], - "index": 1, - "previousIndex": 0, - "previousL1InfoTreeRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", - "l1DataHash": "0x0312659ccc1839f6cdc8db9cbaefabc1ee9a9c1f71b3a20ceb906d80575c5736", - "l1InfoTreeRoot": "0xcc4105312818e9b7f692c9c807ea09699f4f290e5e31671a8e0c2c937f1c43f0", - "historicL1InfoRoot": "0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757" - }, - { - "globalExitRoot": "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", - "blockHash": "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", - "timestamp": 42, - "smtProofPreviousIndex": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0xece5fbe7739fd48f4931ce884ee9cf5f373d0a6c63b80ce836bc0ae674e78540", - "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", - "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", - "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", - "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", - "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", - "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", - "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", - "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", - "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", - "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", - "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", - "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", - "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", - "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", - "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", - "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", - "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", - "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", - "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", - "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", - "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", - "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", - "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", - "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", - "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", - "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", - "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", - "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", - "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", - "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" - ], - "index": 2, - "previousIndex": 1, - "previousL1InfoTreeRoot": "0xcc4105312818e9b7f692c9c807ea09699f4f290e5e31671a8e0c2c937f1c43f0", - "l1DataHash": "0xf6340c5e4f2b138f56f028a5a7fd42b4976ba3fe5fae98d040dd86b9ed59f172", - "l1InfoTreeRoot": "0x264008191dde377bbc6e99914cf1f1222143542afe28d4b7d4c9ecba02dd2273", - "historicL1InfoRoot": "0x4c9ea822b94a2367aeba9ce15cc881edfda28e1763e377b54a141068b08002f4" - } - ] \ No newline at end of file + { + "globalExitRoot": "0x16994edfddddb9480667b64174fc00d3b6da7290d37b8db3a16571b4ddf0789f", + "blockHash": "0x24a5871d68723340d9eadc674aa8ad75f3e33b61d5a9db7db92af856a19270bb", + "minTimestamp": "42", + "smtProof": [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xedcb6d5c11463d1261739c3923fdd7d83ca946ab40cd9c82a4a640d47bf5b2b0", + "0x2be5771dfdb370788faeb6e60ead7815023d2b3a387dd48c0a9112ceba7d6786", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", + "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", + "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", + "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", + "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", + "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", + "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", + "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", + "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", + "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", + "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", + "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", + "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", + "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", + "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", + "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" + ], + "index": 1, + "previousIndex": 0, + "previousL1InfoTreeRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "l1DataHash": "0x0312659ccc1839f6cdc8db9cbaefabc1ee9a9c1f71b3a20ceb906d80575c5736", + "l1InfoTreeRoot": "0xcc4105312818e9b7f692c9c807ea09699f4f290e5e31671a8e0c2c937f1c43f0", + "historicL1InfoRoot": "0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757" + }, + { + "globalExitRoot": "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "blockHash": "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "minTimestamp": "3", + "smtProof": [ + "0xe52ab75d5a441a7327f611047fd27a95a8d852d291ec5e420697f41f9c6521d6", + "0x5208554db01ad89751c5bfea8c72835636748e07c8214f8d7ea97a14953eac39", + "0x2be5771dfdb370788faeb6e60ead7815023d2b3a387dd48c0a9112ceba7d6786", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", + "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", + "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", + "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", + "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", + "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", + "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", + "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", + "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", + "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", + "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", + "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", + "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", + "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", + "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", + "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" + ], + "index": 2, + "previousIndex": 1, + "previousL1InfoTreeRoot": "0xcc4105312818e9b7f692c9c807ea09699f4f290e5e31671a8e0c2c937f1c43f0", + "l1DataHash": "0xc6ba931c9a5f94157bce1ddb799c2a79920278ae2f26841d298161b470379020", + "l1InfoTreeRoot": "0x07ad5a2a0fcfe97230d277f3aea3adad6e790fa5e1ddeecbaf6718d878cbc4b3", + "historicL1InfoRoot": "0x4c9ea822b94a2367aeba9ce15cc881edfda28e1763e377b54a141068b08002f4" + }, + { + "globalExitRoot": "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", + "blockHash": "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", + "minTimestamp": "56", + "smtProof": [ + "0x07ad5a2a0fcfe97230d277f3aea3adad6e790fa5e1ddeecbaf6718d878cbc4b3", + "0x5208554db01ad89751c5bfea8c72835636748e07c8214f8d7ea97a14953eac39", + "0x2be5771dfdb370788faeb6e60ead7815023d2b3a387dd48c0a9112ceba7d6786", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", + "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", + "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", + "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", + "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", + "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", + "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", + "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", + "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", + "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", + "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", + "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", + "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", + "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", + "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", + "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" + ], + "index": 3, + "previousIndex": 2, + "previousL1InfoTreeRoot": "0x07ad5a2a0fcfe97230d277f3aea3adad6e790fa5e1ddeecbaf6718d878cbc4b3", + "l1DataHash": "0xd3c0cf40ab4a607f90163c37db44043c589bd70ebc778de207337b0a7681ca8b", + "l1InfoTreeRoot": "0xe52ab75d5a441a7327f611047fd27a95a8d852d291ec5e420697f41f9c6521d6", + "historicL1InfoRoot": "0x166f9fc4ebf4d76e96265c4c03d059ed2a53b581353648a74c19908089a844a2" + }, + { + "globalExitRoot": "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "blockHash": "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "minTimestamp": "4", + "smtProof": [ + "0x22daf907474be1e06fb93ea85f80bce20c6a088df1e82d0f038397eace22b981", + "0x7e7cbdbaca48ab4f047a277bca68965a3aa212fd8c02d8e2017f8ae4d232fa80", + "0xf270584877d3197ff6fde4a0da3ed337409045b31e8385165cb105e64ee2c64a", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", + "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", + "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", + "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", + "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", + "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", + "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", + "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", + "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", + "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", + "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", + "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", + "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", + "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", + "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", + "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" + ], + "index": 4, + "previousIndex": 3, + "previousL1InfoTreeRoot": "0xe52ab75d5a441a7327f611047fd27a95a8d852d291ec5e420697f41f9c6521d6", + "l1DataHash": "0x228485bea27b6b9a29d1818e1e52951197b735c8059f1006e3621cec13dc3aeb", + "l1InfoTreeRoot": "0xd752324c089a4b27c520127ee4ef36d3cc956478cb969121d2e06c321e31a2a5", + "historicL1InfoRoot": "0xbea44d1ae3d2bbd87581f13b605bcd076dd2b300feb22c5794215c693b480819" + }, + { + "globalExitRoot": "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", + "blockHash": "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "minTimestamp": "11", + "smtProof": [ + "0xd752324c089a4b27c520127ee4ef36d3cc956478cb969121d2e06c321e31a2a5", + "0x7e7cbdbaca48ab4f047a277bca68965a3aa212fd8c02d8e2017f8ae4d232fa80", + "0xf270584877d3197ff6fde4a0da3ed337409045b31e8385165cb105e64ee2c64a", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", + "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", + "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", + "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", + "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", + "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", + "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", + "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", + "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", + "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", + "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", + "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", + "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", + "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", + "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", + "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" + ], + "index": 5, + "previousIndex": 4, + "previousL1InfoTreeRoot": "0xd752324c089a4b27c520127ee4ef36d3cc956478cb969121d2e06c321e31a2a5", + "l1DataHash": "0x861784267fa32515144747e895a2b9282f5356cffb535ad9741fe2d5ea686d95", + "l1InfoTreeRoot": "0x22daf907474be1e06fb93ea85f80bce20c6a088df1e82d0f038397eace22b981", + "historicL1InfoRoot": "0x46023e040446edf42d5c7b60040700cb5c3c219a84cc5483c41e7ec4af4fa0a0" + }, + { + "globalExitRoot": "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", + "blockHash": "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", + "minTimestamp": "2", + "smtProof": [ + "0xe0c4fe427ee2d7f5300bfc9967cbe03cf2f3c9874765baa3e107cf6bc0c0adc0", + "0x261cc392cba4301583e80987297c896076d457ad5df620d6f5be2d5824fcd926", + "0xf270584877d3197ff6fde4a0da3ed337409045b31e8385165cb105e64ee2c64a", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", + "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", + "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", + "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", + "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", + "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", + "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", + "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", + "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", + "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", + "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", + "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", + "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", + "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", + "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", + "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" + ], + "index": 6, + "previousIndex": 5, + "previousL1InfoTreeRoot": "0x22daf907474be1e06fb93ea85f80bce20c6a088df1e82d0f038397eace22b981", + "l1DataHash": "0x78fef5acedef420760f56ff370fa0b23132b197729a9529d0d13220c80fc0e96", + "l1InfoTreeRoot": "0x62ea53405a7d1388e070bd7e919f1933564801ce0afb2bd4f4236bdf6cf16022", + "historicL1InfoRoot": "0x94efbe488e5947534a19dc8e7d426cc53264b707de1eac513b91a457938bbc60" + }, + { + "globalExitRoot": "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", + "blockHash": "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", + "minTimestamp": "100", + "smtProof": [ + "0x62ea53405a7d1388e070bd7e919f1933564801ce0afb2bd4f4236bdf6cf16022", + "0x261cc392cba4301583e80987297c896076d457ad5df620d6f5be2d5824fcd926", + "0xf270584877d3197ff6fde4a0da3ed337409045b31e8385165cb105e64ee2c64a", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", + "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", + "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", + "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", + "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", + "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", + "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", + "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", + "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", + "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", + "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", + "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", + "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", + "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", + "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", + "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" + ], + "index": 7, + "previousIndex": 6, + "previousL1InfoTreeRoot": "0x62ea53405a7d1388e070bd7e919f1933564801ce0afb2bd4f4236bdf6cf16022", + "l1DataHash": "0x8b9375e1c575859b4be1a020c789dfd3a7daf19fd8d2318ca937588bef8e147e", + "l1InfoTreeRoot": "0xe0c4fe427ee2d7f5300bfc9967cbe03cf2f3c9874765baa3e107cf6bc0c0adc0", + "historicL1InfoRoot": "0xc9b848f02399959947317cff144d83536a0b87123e41a3faa13f26ca1d1e2ba5" + } +] \ No newline at end of file From 541ec06f71a89a8e45aebad560d6069a6660dc68 Mon Sep 17 00:00:00 2001 From: tclemos Date: Wed, 27 Mar 2024 20:58:04 -0300 Subject: [PATCH 08/17] fix linter issues --- state/l1infotree_recursive.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/state/l1infotree_recursive.go b/state/l1infotree_recursive.go index f33047bd98..f2f11b2f0e 100644 --- a/state/l1infotree_recursive.go +++ b/state/l1infotree_recursive.go @@ -38,7 +38,7 @@ func (s *State) buildL1InfoTreeRecursiveCacheIfNeed(ctx context.Context, dbTx pg return nil } -// AddL1InfoTreeLeaf adds a new leaf to the L1InfoTree and returns the entry and error +// AddLeafToL1InfoTreeRecursive adds a new leaf to the L1InfoTree and returns the entry and error func (s *State) AddLeafToL1InfoTreeRecursive(ctx context.Context, l1InfoTreeLeaf *L1InfoTreeLeaf, dbTx pgx.Tx) (*L1InfoTreeExitRootStorageEntry, error) { var newIndex uint32 gerIndex, err := s.GetLatestIndex(ctx, dbTx) @@ -72,7 +72,7 @@ func (s *State) AddLeafToL1InfoTreeRecursive(ctx context.Context, l1InfoTreeLeaf return &entry, nil } -// GetCurrentL1InfoRoot Return current L1InfoRoot +// GetCurrentL1InfoTreeRecursiveRoot Return current L1InfoRoot func (s *State) GetCurrentL1InfoTreeRecursiveRoot(ctx context.Context, dbTx pgx.Tx) (common.Hash, error) { err := s.buildL1InfoTreeRecursiveCacheIfNeed(ctx, dbTx) if err != nil { From 27583fee29506a5c3439bbbd1f9bdb802a1d790e Mon Sep 17 00:00:00 2001 From: tclemos Date: Thu, 28 Mar 2024 16:44:57 -0300 Subject: [PATCH 09/17] refactoring l1InfoTreeRecursive to fix the empty root and have snapshots --- l1infotree/tree_recursive.go | 74 ++++++++++++++++------------- l1infotree/tree_recursive_test.go | 77 +++++++++++++++++++++++-------- state/l1infotree_recursive.go | 4 +- 3 files changed, 102 insertions(+), 53 deletions(-) diff --git a/l1infotree/tree_recursive.go b/l1infotree/tree_recursive.go index 6a2562b5d8..deae8489ec 100644 --- a/l1infotree/tree_recursive.go +++ b/l1infotree/tree_recursive.go @@ -5,15 +5,15 @@ import ( "github.com/ethereum/go-ethereum/crypto" ) -const ( - firstLeafHistoricL1InfoTree = "0x0000000000000000000000000000000000000000000000000000000000000000" -) - // L1InfoTreeRecursive is a recursive implementation of the L1InfoTree of Feijoa type L1InfoTreeRecursive struct { historicL1InfoTree *L1InfoTree - l1InfoTreeDataHash *common.Hash - leaves [][32]byte + snapShot L1InfoTreeRecursiveSnapshot +} +type L1InfoTreeRecursiveSnapshot struct { + HistoricL1InfoTreeRoot common.Hash + L1Data common.Hash + L1InfoTreeRoot common.Hash } // NewL1InfoTreeRecursive creates a new empty L1InfoTreeRecursive @@ -23,54 +23,64 @@ func NewL1InfoTreeRecursive(height uint8) (*L1InfoTreeRecursive, error) { return nil, err } - return &L1InfoTreeRecursive{ + mtr := &L1InfoTreeRecursive{ historicL1InfoTree: historic, - }, nil + snapShot: L1InfoTreeRecursiveSnapshot{ + HistoricL1InfoTreeRoot: common.Hash{}, + L1Data: common.Hash{}, + L1InfoTreeRoot: common.Hash{}, + }, + } + + return mtr, nil } -// NewL1InfoTreeRecursiveFromLeaves creates a new L1InfoTreeRecursive from leaves +// NewL1InfoTreeRecursiveFromLeaves creates a new L1InfoTreeRecursive from leaves as they are func NewL1InfoTreeRecursiveFromLeaves(height uint8, leaves [][32]byte) (*L1InfoTreeRecursive, error) { - res, err := NewL1InfoTreeRecursive(height) + mtr, err := NewL1InfoTreeRecursive(height) if err != nil { return nil, err } - for _, leaf := range leaves { - _, err := res.AddLeaf(uint32(len(res.leaves)), leaf) + for i, leaf := range leaves { + snapShot, err := mtr.AddLeaf(uint32(i), leaf) if err != nil { return nil, err } + mtr.snapShot = snapShot } - return res, nil + return mtr, nil } -// AddLeaf adds a new leaf to the L1InfoTreeRecursive -func (mt *L1InfoTreeRecursive) AddLeaf(index uint32, leaf [32]byte) (common.Hash, error) { - previousRoot := mt.GetRoot() - _, err := mt.historicL1InfoTree.AddLeaf(index, previousRoot) +// AddLeaf hashes the current historicL1InfoRoot + leaf data into the new leaf value and then adds it to the historicL1InfoTree +func (mt *L1InfoTreeRecursive) AddLeaf(index uint32, leaf [32]byte) (L1InfoTreeRecursiveSnapshot, error) { + //adds the current l1InfoTreeRoot into the tree to generate the next historicL2InfoTree + _, err := mt.historicL1InfoTree.AddLeaf(index, mt.snapShot.L1InfoTreeRoot) if err != nil { - return common.Hash{}, err + return L1InfoTreeRecursiveSnapshot{}, err } - mt.leaves = append(mt.leaves, leaf) - leafHash := common.Hash(leaf) - mt.l1InfoTreeDataHash = &leafHash - return mt.GetRoot(), nil + + //creates the new snapshot + snapShot := L1InfoTreeRecursiveSnapshot{} + snapShot.HistoricL1InfoTreeRoot = mt.historicL1InfoTree.GetRoot() + snapShot.L1Data = common.BytesToHash(leaf[:]) + snapShot.L1InfoTreeRoot = crypto.Keccak256Hash(snapShot.HistoricL1InfoTreeRoot.Bytes(), snapShot.L1Data.Bytes()) + mt.snapShot = snapShot + + return snapShot, nil } // GetRoot returns the root of the L1InfoTreeRecursive func (mt *L1InfoTreeRecursive) GetRoot() common.Hash { - if mt.l1InfoTreeDataHash == nil { - return common.HexToHash(firstLeafHistoricL1InfoTree) - } - return crypto.Keccak256Hash(mt.historicL1InfoTree.GetRoot().Bytes(), mt.l1InfoTreeDataHash.Bytes()) + return mt.snapShot.L1InfoTreeRoot } -// ComputeMerkleProofFromLeaves computes the Merkle proof from the leaves -func (mt *L1InfoTreeRecursive) ComputeMerkleProofFromLeaves(gerIndex uint32, leaves [][32]byte) ([][32]byte, common.Hash, error) { - return mt.historicL1InfoTree.ComputeMerkleProof(gerIndex, leaves) +// GetHistoricRoot returns the root of the HistoricL1InfoTree +func (mt *L1InfoTreeRecursive) GetHistoricRoot() common.Hash { + return mt.historicL1InfoTree.GetRoot() } -// ComputeMerkleProof computes the Merkle proof from the current leaves -func (mt *L1InfoTreeRecursive) ComputeMerkleProof(gerIndex uint32) ([][32]byte, common.Hash, error) { - return mt.historicL1InfoTree.ComputeMerkleProof(gerIndex, mt.leaves) +// ComputeMerkleProof computes the Merkle proof from the leaves +func (mt *L1InfoTreeRecursive) ComputeMerkleProof(gerIndex uint32, leaves [][32]byte) ([][32]byte, common.Hash, error) { + return mt.historicL1InfoTree.ComputeMerkleProof(gerIndex, leaves) } diff --git a/l1infotree/tree_recursive_test.go b/l1infotree/tree_recursive_test.go index ca141f1b18..8597ba2ad5 100644 --- a/l1infotree/tree_recursive_test.go +++ b/l1infotree/tree_recursive_test.go @@ -14,10 +14,9 @@ import ( ) const ( - L1InfoRootRecursiveHeight = uint8(32) - EmptyL1InfoRootRecursive = "0x0000000000000000000000000000000000000000000000000000000000000000" + L1InfoRootRecursiveHeight = uint8(32) + EmptyL1InfoTreeRecursiveRoot = "0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757" - root1 = "0xcc4105312818e9b7f692c9c807ea09699f4f290e5e31671a8e0c2c937f1c43f0" filenameTestData = "../test/vectors/src/merkle-tree/l1-info-tree-recursive/smt-full-output.json" ) @@ -43,6 +42,22 @@ func readData(t *testing.T) []vectorTestData { return mtTestVectors } +func TestEmptyL1InfoRootRecursive(t *testing.T) { + mtr, err := l1infotree.NewL1InfoTreeRecursive(L1InfoRootRecursiveHeight) + require.NoError(t, err) + require.NotNil(t, mtr) + root := mtr.GetRoot() + require.Equal(t, EmptyL1InfoTreeRecursiveRoot, root.String()) +} + +func TestEmptyHistoricL1InfoRootRecursive(t *testing.T) { + mtr, err := l1infotree.NewL1InfoTreeRecursive(L1InfoRootRecursiveHeight) + require.NoError(t, err) + require.NotNil(t, mtr) + root := mtr.GetHistoricRoot() + require.Equal(t, EmptyL1InfoTreeRecursiveRoot, root.String()) +} + func TestBuildTreeVectorData(t *testing.T) { data := readData(t) mtr, err := l1infotree.NewL1InfoTreeRecursive(L1InfoRootRecursiveHeight) @@ -50,37 +65,61 @@ func TestBuildTreeVectorData(t *testing.T) { for _, testVector := range data { minTimestamp, err := strconv.ParseUint(testVector.MinTimestamp, 10, 0) require.NoError(t, err) - leafData := l1infotree.HashLeafData(testVector.GlobalExitRoot, testVector.BlockHash, minTimestamp) - leafDataHash := common.BytesToHash(leafData[:]) - assert.Equal(t, testVector.L1DataHash.String(), leafDataHash.String(), "leafData do not match leaf", testVector.Index) + l1Data := l1infotree.HashLeafData(testVector.GlobalExitRoot, testVector.BlockHash, minTimestamp) + l1DataHash := common.BytesToHash(l1Data[:]) + assert.Equal(t, testVector.L1DataHash.String(), l1DataHash.String(), "l1Data doesn't match leaf", testVector.Index) - root, err := mtr.AddLeaf(testVector.Index-1, leafData) + snapShot, err := mtr.AddLeaf(testVector.Index-1, l1Data) require.NoError(t, err) - assert.Equal(t, testVector.L1InfoTreeRoot.String(), root.String(), "Roots do not match leaf", testVector.Index) + assert.Equal(t, testVector.HistoricL1InfoRoot.String(), snapShot.HistoricL1InfoTreeRoot.String(), "HistoricL1InfoTreeRoot doesn't match leaf", testVector.Index) + assert.Equal(t, testVector.L1DataHash.String(), snapShot.L1Data.String(), "l1Data doesn't match leaf", testVector.Index) + assert.Equal(t, testVector.L1InfoTreeRoot.String(), snapShot.L1InfoTreeRoot.String(), "l1InfoTreeRoot doesn't match leaf", testVector.Index) } } -func TestEmptyL1InfoRootRecursive(t *testing.T) { - // empty - sut, err := l1infotree.NewL1InfoTreeRecursive(L1InfoRootRecursiveHeight) +func TestBuildTreeFromLeaves(t *testing.T) { + data := readData(t) + mtr, err := l1infotree.NewL1InfoTreeRecursive(L1InfoRootRecursiveHeight) require.NoError(t, err) - require.NotNil(t, sut) - root := sut.GetRoot() - require.Equal(t, EmptyL1InfoRootRecursive, root.String()) + leaves := [][32]byte{} + var lastSnapshot l1infotree.L1InfoTreeRecursiveSnapshot + for _, testVector := range data { + minTimestamp, err := strconv.ParseUint(testVector.MinTimestamp, 10, 0) + require.NoError(t, err) + l1Data := l1infotree.HashLeafData(testVector.GlobalExitRoot, testVector.BlockHash, minTimestamp) + l1DataHash := common.BytesToHash(l1Data[:]) + assert.Equal(t, testVector.L1DataHash.String(), l1DataHash.String(), "l1Data doesn't match leaf", testVector.Index) + + snapShot, err := mtr.AddLeaf(testVector.Index-1, l1Data) + require.NoError(t, err) + leaves = append(leaves, snapShot.L1Data) + lastSnapshot = snapShot + } + + newMtr, err := l1infotree.NewL1InfoTreeRecursiveFromLeaves(L1InfoRootRecursiveHeight, leaves) + require.NoError(t, err) + assert.Equal(t, lastSnapshot.L1InfoTreeRoot.String(), newMtr.GetRoot().String(), "L1InfoTreeRoot doesn't match leaf") } + func TestProofsTreeVectorData(t *testing.T) { data := readData(t) - sut, err := l1infotree.NewL1InfoTreeRecursive(L1InfoRootRecursiveHeight) + mtr, err := l1infotree.NewL1InfoTreeRecursive(L1InfoRootRecursiveHeight) require.NoError(t, err) + + leaves := [][32]byte{} for _, testVector := range data { - // Add leaf minTimestamp, err := strconv.ParseUint(testVector.MinTimestamp, 10, 0) require.NoError(t, err) - leafData := l1infotree.HashLeafData(testVector.GlobalExitRoot, testVector.BlockHash, minTimestamp) + l1Data := l1infotree.HashLeafData(testVector.GlobalExitRoot, testVector.BlockHash, minTimestamp) + l1DataHash := common.BytesToHash(l1Data[:]) + assert.Equal(t, testVector.L1DataHash.String(), l1DataHash.String(), "l1Data doesn't match leaf", testVector.Index) - _, err = sut.AddLeaf(testVector.Index-1, leafData) + snapShot, err := mtr.AddLeaf(testVector.Index-1, l1Data) require.NoError(t, err) - mp, _, err := sut.ComputeMerkleProof(testVector.Index) + + leaves = append(leaves, snapShot.L1InfoTreeRoot) + + mp, _, err := mtr.ComputeMerkleProof(testVector.Index, leaves) require.NoError(t, err) for i, v := range mp { c := common.Hash(v) diff --git a/state/l1infotree_recursive.go b/state/l1infotree_recursive.go index f2f11b2f0e..a0a4fc6bc2 100644 --- a/state/l1infotree_recursive.go +++ b/state/l1infotree_recursive.go @@ -54,14 +54,14 @@ func (s *State) AddLeafToL1InfoTreeRecursive(ctx context.Context, l1InfoTreeLeaf return nil, err } log.Debug("latestIndex: ", gerIndex) - root, err := s.l1InfoTreeRecursive.AddLeaf(newIndex, l1InfoTreeLeaf.Hash()) + snapShot, err := s.l1InfoTreeRecursive.AddLeaf(newIndex, l1InfoTreeLeaf.Hash()) if err != nil { log.Error("error add new leaf to the L1InfoTreeRecursive. Error: ", err) return nil, err } entry := L1InfoTreeExitRootStorageEntry{ L1InfoTreeLeaf: *l1InfoTreeLeaf, - L1InfoTreeRoot: root, + L1InfoTreeRoot: snapShot.L1InfoTreeRoot, L1InfoTreeIndex: newIndex, } err = s.AddL1InfoRootToExitRoot(ctx, &entry, dbTx) From 51cd19e6591b08a558f172fec32d9c3556ebfddf Mon Sep 17 00:00:00 2001 From: tclemos Date: Mon, 1 Apr 2024 15:47:18 -0300 Subject: [PATCH 10/17] add feijoa l1InfoTree Processor; refactor ProcessorBase and ForkIDs --- state/l1infotree_recursive.go | 4 +- .../processor_l1_initial_sequence_batches.go | 6 +- .../processor_l1_sequence_batches.go | 6 +- .../etrog/processor_l1_info_tree_update.go | 6 +- .../etrog/processor_l1_sequence_batches.go | 6 +- .../processor_l1_update_etrog_sequence.go | 6 +- .../feijoa/processor_l1_info_tree_update.go | 53 ++++++++++++++++ synchronizer/actions/forksids.go | 23 ++++++- .../incaberry/processor_l1_forced_batches.go | 6 +- .../actions/incaberry/processor_l1_forkid.go | 7 +-- .../processor_l1_global_exit_root.go | 6 +- .../processor_l1_sequence_batches.go | 6 +- .../processor_l1_sequence_force_batches.go | 6 +- .../incaberry/processor_l1_verify_batch.go | 6 +- synchronizer/actions/processor_base.go | 21 +++++-- .../mocks/state_full_interface.go | 60 +++++++++++++++++++ synchronizer/common/syncinterfaces/state.go | 1 + synchronizer/default_l1processors.go | 2 + 18 files changed, 188 insertions(+), 43 deletions(-) diff --git a/state/l1infotree_recursive.go b/state/l1infotree_recursive.go index a0a4fc6bc2..29e0c2d794 100644 --- a/state/l1infotree_recursive.go +++ b/state/l1infotree_recursive.go @@ -38,8 +38,8 @@ func (s *State) buildL1InfoTreeRecursiveCacheIfNeed(ctx context.Context, dbTx pg return nil } -// AddLeafToL1InfoTreeRecursive adds a new leaf to the L1InfoTree and returns the entry and error -func (s *State) AddLeafToL1InfoTreeRecursive(ctx context.Context, l1InfoTreeLeaf *L1InfoTreeLeaf, dbTx pgx.Tx) (*L1InfoTreeExitRootStorageEntry, error) { +// AddL1InfoTreeRecursiveLeaf adds a new leaf to the L1InfoTree and returns the entry and error +func (s *State) AddL1InfoTreeRecursiveLeaf(ctx context.Context, l1InfoTreeLeaf *L1InfoTreeLeaf, dbTx pgx.Tx) (*L1InfoTreeExitRootStorageEntry, error) { var newIndex uint32 gerIndex, err := s.GetLatestIndex(ctx, dbTx) if err != nil && !errors.Is(err, ErrNotFound) { diff --git a/synchronizer/actions/elderberry/processor_l1_initial_sequence_batches.go b/synchronizer/actions/elderberry/processor_l1_initial_sequence_batches.go index 943ca5e5bf..56b6b45f76 100644 --- a/synchronizer/actions/elderberry/processor_l1_initial_sequence_batches.go +++ b/synchronizer/actions/elderberry/processor_l1_initial_sequence_batches.go @@ -19,9 +19,9 @@ type ProcessorL1InitialSequenceBatchesElderberry struct { // NewProcessorL1InitialSequenceBatchesElderberry returns instance of a processor for InitialSequenceBatchesOrder func NewProcessorL1InitialSequenceBatchesElderberry(previousProcessor actions.L1EventProcessor) *ProcessorL1InitialSequenceBatchesElderberry { return &ProcessorL1InitialSequenceBatchesElderberry{ - ProcessorBase: actions.ProcessorBase[ProcessorL1InitialSequenceBatchesElderberry]{ - SupportedEvent: []etherman.EventOrder{etherman.InitialSequenceBatchesOrder}, - SupportedForkdIds: &actions.ForksIdOnlyElderberry}, + ProcessorBase: *actions.NewProcessorBase[ProcessorL1InitialSequenceBatchesElderberry]( + []etherman.EventOrder{etherman.InitialSequenceBatchesOrder}, + actions.ForksIdOnlyElderberry), previousProcessor: previousProcessor, } } diff --git a/synchronizer/actions/elderberry/processor_l1_sequence_batches.go b/synchronizer/actions/elderberry/processor_l1_sequence_batches.go index 1432130198..f6786fd8ca 100644 --- a/synchronizer/actions/elderberry/processor_l1_sequence_batches.go +++ b/synchronizer/actions/elderberry/processor_l1_sequence_batches.go @@ -40,9 +40,9 @@ type ProcessorL1SequenceBatchesElderberry struct { // NewProcessorL1SequenceBatchesElderberry returns instance of a processor for SequenceBatchesOrder func NewProcessorL1SequenceBatchesElderberry(previousProcessor PreviousProcessor, state StateL1SequenceBatchesElderberry) *ProcessorL1SequenceBatchesElderberry { return &ProcessorL1SequenceBatchesElderberry{ - ProcessorBase: actions.ProcessorBase[ProcessorL1SequenceBatchesElderberry]{ - SupportedEvent: []etherman.EventOrder{etherman.SequenceBatchesOrder}, - SupportedForkdIds: &actions.ForksIdOnlyElderberry}, + ProcessorBase: *actions.NewProcessorBase[ProcessorL1SequenceBatchesElderberry]( + []etherman.EventOrder{etherman.SequenceBatchesOrder}, + actions.ForksIdOnlyElderberry), previousProcessor: previousProcessor, state: state, } diff --git a/synchronizer/actions/etrog/processor_l1_info_tree_update.go b/synchronizer/actions/etrog/processor_l1_info_tree_update.go index f82ce4f7a7..8456ced8b8 100644 --- a/synchronizer/actions/etrog/processor_l1_info_tree_update.go +++ b/synchronizer/actions/etrog/processor_l1_info_tree_update.go @@ -24,9 +24,9 @@ type ProcessorL1InfoTreeUpdate struct { // NewProcessorL1InfoTreeUpdate new processor for GlobalExitRootsOrder func NewProcessorL1InfoTreeUpdate(state stateProcessorL1InfoTreeInterface) *ProcessorL1InfoTreeUpdate { return &ProcessorL1InfoTreeUpdate{ - ProcessorBase: actions.ProcessorBase[ProcessorL1InfoTreeUpdate]{ - SupportedEvent: []etherman.EventOrder{etherman.L1InfoTreeOrder}, - SupportedForkdIds: &actions.ForksIdAll}, + ProcessorBase: *actions.NewProcessorBase[ProcessorL1InfoTreeUpdate]( + []etherman.EventOrder{etherman.L1InfoTreeOrder}, + actions.ForksIdToEtrog), state: state} } diff --git a/synchronizer/actions/etrog/processor_l1_sequence_batches.go b/synchronizer/actions/etrog/processor_l1_sequence_batches.go index aa82c9c791..5c89cbf710 100644 --- a/synchronizer/actions/etrog/processor_l1_sequence_batches.go +++ b/synchronizer/actions/etrog/processor_l1_sequence_batches.go @@ -55,9 +55,9 @@ func NewProcessorL1SequenceBatches(state stateProcessSequenceBatches, timeProvider syncCommon.TimeProvider, halter syncinterfaces.CriticalErrorHandler) *ProcessorL1SequenceBatchesEtrog { return &ProcessorL1SequenceBatchesEtrog{ - ProcessorBase: actions.ProcessorBase[ProcessorL1SequenceBatchesEtrog]{ - SupportedEvent: []etherman.EventOrder{etherman.SequenceBatchesOrder, etherman.InitialSequenceBatchesOrder}, - SupportedForkdIds: &actions.ForksIdOnlyEtrog}, + ProcessorBase: *actions.NewProcessorBase[ProcessorL1SequenceBatchesEtrog]( + []etherman.EventOrder{etherman.SequenceBatchesOrder, etherman.InitialSequenceBatchesOrder}, + actions.ForksIdOnlyEtrog), state: state, sync: sync, timeProvider: timeProvider, diff --git a/synchronizer/actions/etrog/processor_l1_update_etrog_sequence.go b/synchronizer/actions/etrog/processor_l1_update_etrog_sequence.go index 3bd7e03cef..564154d7e5 100644 --- a/synchronizer/actions/etrog/processor_l1_update_etrog_sequence.go +++ b/synchronizer/actions/etrog/processor_l1_update_etrog_sequence.go @@ -38,9 +38,9 @@ func NewProcessorL1UpdateEtrogSequence(state stateProcessUpdateEtrogSequence, sync syncProcessUpdateEtrogSequenceInterface, timeProvider syncCommon.TimeProvider) *ProcessorL1UpdateEtrogSequence { return &ProcessorL1UpdateEtrogSequence{ - ProcessorBase: actions.ProcessorBase[ProcessorL1UpdateEtrogSequence]{ - SupportedEvent: []etherman.EventOrder{etherman.UpdateEtrogSequenceOrder}, - SupportedForkdIds: &actions.ForksIdOnlyEtrog}, + ProcessorBase: *actions.NewProcessorBase[ProcessorL1UpdateEtrogSequence]( + []etherman.EventOrder{etherman.UpdateEtrogSequenceOrder}, + actions.ForksIdOnlyEtrog), state: state, sync: sync, timeProvider: timeProvider, diff --git a/synchronizer/actions/feijoa/processor_l1_info_tree_update.go b/synchronizer/actions/feijoa/processor_l1_info_tree_update.go index c9f60c8b7f..0d1f8dcccf 100644 --- a/synchronizer/actions/feijoa/processor_l1_info_tree_update.go +++ b/synchronizer/actions/feijoa/processor_l1_info_tree_update.go @@ -1 +1,54 @@ package feijoa + +import ( + "context" + + "github.com/0xPolygonHermez/zkevm-node/etherman" + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/0xPolygonHermez/zkevm-node/synchronizer/actions" + "github.com/jackc/pgx/v4" +) + +// stateProcessorL1InfoTreeInterface interface required from state +type stateProcessorL1InfoTreeRecursiveInterface interface { + AddL1InfoTreeRecursiveLeaf(ctx context.Context, L1InfoTreeLeaf *state.L1InfoTreeLeaf, dbTx pgx.Tx) (*state.L1InfoTreeExitRootStorageEntry, error) +} + +// ProcessorL1InfoTreeUpdate implements L1EventProcessor for GlobalExitRootsOrder +type ProcessorL1InfoTreeUpdate struct { + actions.ProcessorBase[ProcessorL1InfoTreeUpdate] + state stateProcessorL1InfoTreeRecursiveInterface +} + +// NewProcessorL1InfoTreeUpdate new processor for GlobalExitRootsOrder +func NewProcessorL1InfoTreeUpdate(state stateProcessorL1InfoTreeRecursiveInterface) *ProcessorL1InfoTreeUpdate { + return &ProcessorL1InfoTreeUpdate{ + ProcessorBase: *actions.NewProcessorBase[ProcessorL1InfoTreeUpdate]( + []etherman.EventOrder{etherman.L1InfoTreeOrder}, + actions.ForksIdOnlyEtrog), + state: state} +} + +// Process process event +func (p *ProcessorL1InfoTreeUpdate) Process(ctx context.Context, order etherman.Order, l1Block *etherman.Block, dbTx pgx.Tx) error { + l1InfoTree := l1Block.L1InfoTree[order.Pos] + ger := state.GlobalExitRoot{ + BlockNumber: l1InfoTree.BlockNumber, + MainnetExitRoot: l1InfoTree.MainnetExitRoot, + RollupExitRoot: l1InfoTree.RollupExitRoot, + GlobalExitRoot: l1InfoTree.GlobalExitRoot, + Timestamp: l1InfoTree.Timestamp, + } + l1IntoTreeLeaf := state.L1InfoTreeLeaf{ + GlobalExitRoot: ger, + PreviousBlockHash: l1InfoTree.PreviousBlockHash, + } + entry, err := p.state.AddL1InfoTreeRecursiveLeaf(ctx, &l1IntoTreeLeaf, dbTx) + if err != nil { + log.Errorf("error storing the l1InfoTree(feijoa). BlockNumber: %d, error: %v", l1Block.BlockNumber, err) + return err + } + log.Infof("L1InfoTree(feijoa) stored. BlockNumber: %d,GER:%s L1InfoTreeIndex: %d L1InfoRoot:%s", l1Block.BlockNumber, entry.GlobalExitRoot.GlobalExitRoot, entry.L1InfoTreeIndex, entry.L1InfoTreeRoot) + return nil +} diff --git a/synchronizer/actions/forksids.go b/synchronizer/actions/forksids.go index 58b8250376..ddc35ec44c 100644 --- a/synchronizer/actions/forksids.go +++ b/synchronizer/actions/forksids.go @@ -12,19 +12,40 @@ const ( ForkIDEtrog = ForkIdType(7) //nolint:gomnd // ForkIDElderberry is the forkId for Elderberry ForkIDElderberry = ForkIdType(8) //nolint:gomnd + // ForkIDElderberry2 is the forkId for Elderberry2 + ForkIDElderberry2 = ForkIdType(9) //nolint:gomnd + // ForkIDFeijoa is the forkId for Feijoa + ForkIDFeijoa = ForkIdType(10) //nolint:gomnd ) var ( + /// ************** ALL ***************/// // ForksIdAll support all forkIds ForksIdAll = []ForkIdType{WildcardForkId} + /// ************** SINGLE ***************/// + + // ForksIdOnlyFeijoa support only etrog forkId + ForksIdOnlyFeijoa = []ForkIdType{ForkIDFeijoa} + // ForksIdOnlyElderberry support only elderberry forkId - ForksIdOnlyElderberry = []ForkIdType{ForkIDElderberry} + ForksIdOnlyElderberry = []ForkIdType{ForkIDElderberry, ForkIDElderberry2} // ForksIdOnlyEtrog support only etrog forkId ForksIdOnlyEtrog = []ForkIdType{ForkIDEtrog} + /// ************** MULTIPLE ***************/// + // ForksIdToIncaberry support all forkIds till incaberry ForksIdToIncaberry = []ForkIdType{1, 2, 3, 4, 5, ForkIDIncaberry} + + // ForksIdToEtrog support all forkIds till etrog + ForksIdToEtrog = append(ForksIdToIncaberry, ForksIdOnlyEtrog...) + + // ForksIdToElderberry support all forkIds till elderberry + ForksIdToElderberry = append(ForksIdToEtrog, ForksIdOnlyElderberry...) + + // ForksIdToFeijoa support all forkIds till feijoa + ForksIdToFeijoa = append(ForksIdToElderberry, ForksIdOnlyFeijoa...) ) diff --git a/synchronizer/actions/incaberry/processor_l1_forced_batches.go b/synchronizer/actions/incaberry/processor_l1_forced_batches.go index f0d15094f1..4b180c006c 100644 --- a/synchronizer/actions/incaberry/processor_l1_forced_batches.go +++ b/synchronizer/actions/incaberry/processor_l1_forced_batches.go @@ -23,9 +23,9 @@ type ProcessL1ForcedBatches struct { // NewProcessL1ForcedBatches returns instance of a processor for ForcedBatchesOrder func NewProcessL1ForcedBatches(state stateProcessL1ForcedBatchesInterface) *ProcessL1ForcedBatches { return &ProcessL1ForcedBatches{ - ProcessorBase: actions.ProcessorBase[ProcessL1ForcedBatches]{ - SupportedEvent: []etherman.EventOrder{etherman.ForcedBatchesOrder}, - SupportedForkdIds: &actions.ForksIdAll}, + ProcessorBase: *actions.NewProcessorBase[ProcessL1ForcedBatches]( + []etherman.EventOrder{etherman.ForcedBatchesOrder}, + actions.ForksIdAll), state: state} } diff --git a/synchronizer/actions/incaberry/processor_l1_forkid.go b/synchronizer/actions/incaberry/processor_l1_forkid.go index baeeff6d5e..41112b8752 100644 --- a/synchronizer/actions/incaberry/processor_l1_forkid.go +++ b/synchronizer/actions/incaberry/processor_l1_forkid.go @@ -36,10 +36,9 @@ type ProcessorForkId struct { // NewProcessorForkId returns instance of a processor for ForkIDsOrder func NewProcessorForkId(state stateProcessorForkIdInterface, sync syncProcessorForkIdInterface) *ProcessorForkId { return &ProcessorForkId{ - ProcessorBase: actions.ProcessorBase[ProcessorForkId]{ - SupportedEvent: []etherman.EventOrder{etherman.ForkIDsOrder}, - SupportedForkdIds: &actions.ForksIdAll, - }, + ProcessorBase: *actions.NewProcessorBase[ProcessorForkId]( + []etherman.EventOrder{etherman.ForkIDsOrder}, + actions.ForksIdAll), state: state, sync: sync} } diff --git a/synchronizer/actions/incaberry/processor_l1_global_exit_root.go b/synchronizer/actions/incaberry/processor_l1_global_exit_root.go index 285cd95687..80091c6d1d 100644 --- a/synchronizer/actions/incaberry/processor_l1_global_exit_root.go +++ b/synchronizer/actions/incaberry/processor_l1_global_exit_root.go @@ -24,9 +24,9 @@ type ProcessorL1GlobalExitRoot struct { // NewProcessorL1GlobalExitRoot new processor for GlobalExitRootsOrder func NewProcessorL1GlobalExitRoot(state stateProcessorL1GlobalExitRootInterface) *ProcessorL1GlobalExitRoot { return &ProcessorL1GlobalExitRoot{ - ProcessorBase: actions.ProcessorBase[ProcessorL1GlobalExitRoot]{ - SupportedEvent: []etherman.EventOrder{etherman.GlobalExitRootsOrder}, - SupportedForkdIds: &actions.ForksIdToIncaberry}, + ProcessorBase: *actions.NewProcessorBase[ProcessorL1GlobalExitRoot]( + []etherman.EventOrder{etherman.GlobalExitRootsOrder}, + actions.ForksIdToIncaberry), state: state} } diff --git a/synchronizer/actions/incaberry/processor_l1_sequence_batches.go b/synchronizer/actions/incaberry/processor_l1_sequence_batches.go index 4b76e316a9..2d6fe9cce9 100644 --- a/synchronizer/actions/incaberry/processor_l1_sequence_batches.go +++ b/synchronizer/actions/incaberry/processor_l1_sequence_batches.go @@ -64,9 +64,9 @@ type ProcessorL1SequenceBatches struct { func NewProcessorL1SequenceBatches(state stateProcessSequenceBatches, etherMan ethermanProcessSequenceBatches, pool poolProcessSequenceBatchesInterface, eventLog syncinterfaces.EventLogInterface, sync syncProcessSequenceBatchesInterface) *ProcessorL1SequenceBatches { return &ProcessorL1SequenceBatches{ - ProcessorBase: actions.ProcessorBase[ProcessorL1SequenceBatches]{ - SupportedEvent: []etherman.EventOrder{etherman.SequenceBatchesOrder}, - SupportedForkdIds: &actions.ForksIdToIncaberry}, + ProcessorBase: *actions.NewProcessorBase[ProcessorL1SequenceBatches]( + []etherman.EventOrder{etherman.SequenceBatchesOrder}, + actions.ForksIdToIncaberry), state: state, etherMan: etherMan, pool: pool, diff --git a/synchronizer/actions/incaberry/processor_l1_sequence_force_batches.go b/synchronizer/actions/incaberry/processor_l1_sequence_force_batches.go index 9948af987e..782eef1b5a 100644 --- a/synchronizer/actions/incaberry/processor_l1_sequence_force_batches.go +++ b/synchronizer/actions/incaberry/processor_l1_sequence_force_batches.go @@ -39,9 +39,9 @@ type ProcessL1SequenceForcedBatches struct { func NewProcessL1SequenceForcedBatches(state stateProcessL1SequenceForcedBatchesInterface, sync syncProcessL1SequenceForcedBatchesInterface) *ProcessL1SequenceForcedBatches { return &ProcessL1SequenceForcedBatches{ - ProcessorBase: actions.ProcessorBase[ProcessL1SequenceForcedBatches]{ - SupportedEvent: []etherman.EventOrder{etherman.SequenceForceBatchesOrder}, - SupportedForkdIds: &actions.ForksIdAll}, + ProcessorBase: *actions.NewProcessorBase[ProcessL1SequenceForcedBatches]( + []etherman.EventOrder{etherman.SequenceForceBatchesOrder}, + actions.ForksIdAll), state: state, sync: sync} } diff --git a/synchronizer/actions/incaberry/processor_l1_verify_batch.go b/synchronizer/actions/incaberry/processor_l1_verify_batch.go index 6cdc39e5e7..142071e89d 100644 --- a/synchronizer/actions/incaberry/processor_l1_verify_batch.go +++ b/synchronizer/actions/incaberry/processor_l1_verify_batch.go @@ -26,9 +26,9 @@ type ProcessorL1VerifyBatch struct { // NewProcessorL1VerifyBatch returns instance of a processor for VerifyBatchOrder func NewProcessorL1VerifyBatch(state stateL1VerifyBatchInterface) *ProcessorL1VerifyBatch { return &ProcessorL1VerifyBatch{ - ProcessorBase: actions.ProcessorBase[ProcessorL1VerifyBatch]{ - SupportedEvent: []etherman.EventOrder{etherman.VerifyBatchOrder, etherman.TrustedVerifyBatchOrder}, - SupportedForkdIds: &actions.ForksIdAll}, + ProcessorBase: *actions.NewProcessorBase[ProcessorL1VerifyBatch]( + []etherman.EventOrder{etherman.VerifyBatchOrder, etherman.TrustedVerifyBatchOrder}, + actions.ForksIdAll), state: state, } } diff --git a/synchronizer/actions/processor_base.go b/synchronizer/actions/processor_base.go index ad9c61495b..972520b922 100644 --- a/synchronizer/actions/processor_base.go +++ b/synchronizer/actions/processor_base.go @@ -9,8 +9,17 @@ import ( // ProcessorBase is the base struct for all the processors, if reduces the boilerplate // implementing the Name, SupportedEvents and SupportedForkIds functions type ProcessorBase[T any] struct { - SupportedEvent []etherman.EventOrder - SupportedForkdIds *[]ForkIdType + supportedEvent []etherman.EventOrder + supportedForkIds []ForkIdType +} + +func NewProcessorBase[T any](supportedEvent []etherman.EventOrder, supportedForkIds []ForkIdType) *ProcessorBase[T] { + p := &ProcessorBase[T]{ + supportedEvent: supportedEvent, + supportedForkIds: supportedForkIds, + } + + return p } // Name returns the name of the struct T @@ -23,13 +32,13 @@ func (g *ProcessorBase[T]) Name() string { // SupportedEvents returns the supported events in the struct func (p *ProcessorBase[T]) SupportedEvents() []etherman.EventOrder { - return p.SupportedEvent + return p.supportedEvent } -// SupportedForkIds returns the supported forkIds in the struct or the dafault till incaberry forkId +// SupportedForkIds returns the supported forkIds in the struct or the default till incaberry forkId func (p *ProcessorBase[T]) SupportedForkIds() []ForkIdType { - if p.SupportedForkdIds != nil { - return *p.SupportedForkdIds + if len(p.supportedForkIds) != 0 { + return p.supportedForkIds } // returns none return []ForkIdType{} diff --git a/synchronizer/common/syncinterfaces/mocks/state_full_interface.go b/synchronizer/common/syncinterfaces/mocks/state_full_interface.go index f41e906728..5747b17f73 100644 --- a/synchronizer/common/syncinterfaces/mocks/state_full_interface.go +++ b/synchronizer/common/syncinterfaces/mocks/state_full_interface.go @@ -336,6 +336,66 @@ func (_c *StateFullInterface_AddL1InfoTreeLeaf_Call) RunAndReturn(run func(conte return _c } +// AddL1InfoTreeRecursiveLeaf provides a mock function with given fields: ctx, L1InfoTreeLeaf, dbTx +func (_m *StateFullInterface) AddL1InfoTreeRecursiveLeaf(ctx context.Context, L1InfoTreeLeaf *state.L1InfoTreeLeaf, dbTx pgx.Tx) (*state.L1InfoTreeExitRootStorageEntry, error) { + ret := _m.Called(ctx, L1InfoTreeLeaf, dbTx) + + if len(ret) == 0 { + panic("no return value specified for AddL1InfoTreeRecursiveLeaf") + } + + var r0 *state.L1InfoTreeExitRootStorageEntry + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *state.L1InfoTreeLeaf, pgx.Tx) (*state.L1InfoTreeExitRootStorageEntry, error)); ok { + return rf(ctx, L1InfoTreeLeaf, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, *state.L1InfoTreeLeaf, pgx.Tx) *state.L1InfoTreeExitRootStorageEntry); ok { + r0 = rf(ctx, L1InfoTreeLeaf, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.L1InfoTreeExitRootStorageEntry) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *state.L1InfoTreeLeaf, pgx.Tx) error); ok { + r1 = rf(ctx, L1InfoTreeLeaf, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StateFullInterface_AddL1InfoTreeRecursiveLeaf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddL1InfoTreeRecursiveLeaf' +type StateFullInterface_AddL1InfoTreeRecursiveLeaf_Call struct { + *mock.Call +} + +// AddL1InfoTreeRecursiveLeaf is a helper method to define mock.On call +// - ctx context.Context +// - L1InfoTreeLeaf *state.L1InfoTreeLeaf +// - dbTx pgx.Tx +func (_e *StateFullInterface_Expecter) AddL1InfoTreeRecursiveLeaf(ctx interface{}, L1InfoTreeLeaf interface{}, dbTx interface{}) *StateFullInterface_AddL1InfoTreeRecursiveLeaf_Call { + return &StateFullInterface_AddL1InfoTreeRecursiveLeaf_Call{Call: _e.mock.On("AddL1InfoTreeRecursiveLeaf", ctx, L1InfoTreeLeaf, dbTx)} +} + +func (_c *StateFullInterface_AddL1InfoTreeRecursiveLeaf_Call) Run(run func(ctx context.Context, L1InfoTreeLeaf *state.L1InfoTreeLeaf, dbTx pgx.Tx)) *StateFullInterface_AddL1InfoTreeRecursiveLeaf_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*state.L1InfoTreeLeaf), args[2].(pgx.Tx)) + }) + return _c +} + +func (_c *StateFullInterface_AddL1InfoTreeRecursiveLeaf_Call) Return(_a0 *state.L1InfoTreeExitRootStorageEntry, _a1 error) *StateFullInterface_AddL1InfoTreeRecursiveLeaf_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StateFullInterface_AddL1InfoTreeRecursiveLeaf_Call) RunAndReturn(run func(context.Context, *state.L1InfoTreeLeaf, pgx.Tx) (*state.L1InfoTreeExitRootStorageEntry, error)) *StateFullInterface_AddL1InfoTreeRecursiveLeaf_Call { + _c.Call.Return(run) + return _c +} + // AddSequence provides a mock function with given fields: ctx, sequence, dbTx func (_m *StateFullInterface) AddSequence(ctx context.Context, sequence state.Sequence, dbTx pgx.Tx) error { ret := _m.Called(ctx, sequence, dbTx) diff --git a/synchronizer/common/syncinterfaces/state.go b/synchronizer/common/syncinterfaces/state.go index e92a098c6f..350cc42228 100644 --- a/synchronizer/common/syncinterfaces/state.go +++ b/synchronizer/common/syncinterfaces/state.go @@ -67,6 +67,7 @@ type StateFullInterface interface { GetForkIDByBlockNumber(blockNumber uint64) uint64 GetStoredFlushID(ctx context.Context) (uint64, string, error) AddL1InfoTreeLeaf(ctx context.Context, L1InfoTreeLeaf *state.L1InfoTreeLeaf, dbTx pgx.Tx) (*state.L1InfoTreeExitRootStorageEntry, error) + AddL1InfoTreeRecursiveLeaf(ctx context.Context, L1InfoTreeLeaf *state.L1InfoTreeLeaf, dbTx pgx.Tx) (*state.L1InfoTreeExitRootStorageEntry, error) StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *state.ProcessBlockResponse, txsEGPLog []*state.EffectiveGasPriceLog, dbTx pgx.Tx) error GetL1InfoRootLeafByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) (state.L1InfoTreeExitRootStorageEntry, error) UpdateWIPBatch(ctx context.Context, receipt state.ProcessingReceipt, dbTx pgx.Tx) error diff --git a/synchronizer/default_l1processors.go b/synchronizer/default_l1processors.go index 671d28cea7..58e2c3ce36 100644 --- a/synchronizer/default_l1processors.go +++ b/synchronizer/default_l1processors.go @@ -4,6 +4,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/synchronizer/actions" "github.com/0xPolygonHermez/zkevm-node/synchronizer/actions/elderberry" "github.com/0xPolygonHermez/zkevm-node/synchronizer/actions/etrog" + "github.com/0xPolygonHermez/zkevm-node/synchronizer/actions/feijoa" "github.com/0xPolygonHermez/zkevm-node/synchronizer/actions/incaberry" "github.com/0xPolygonHermez/zkevm-node/synchronizer/actions/processor_manager" "github.com/0xPolygonHermez/zkevm-node/synchronizer/common" @@ -17,6 +18,7 @@ func defaultsL1EventProcessors(sync *ClientSynchronizer, l2Blockchecker *actions p.Register(actions.NewCheckL2BlockDecorator(incaberry.NewProcessL1SequenceForcedBatches(sync.state, sync), l2Blockchecker)) p.Register(incaberry.NewProcessorForkId(sync.state, sync)) p.Register(etrog.NewProcessorL1InfoTreeUpdate(sync.state)) + p.Register(feijoa.NewProcessorL1InfoTreeUpdate(sync.state)) sequenceBatchesProcessor := etrog.NewProcessorL1SequenceBatches(sync.state, sync, common.DefaultTimeProvider{}, sync.halter) p.Register(actions.NewCheckL2BlockDecorator(sequenceBatchesProcessor, l2Blockchecker)) p.Register(incaberry.NewProcessorL1VerifyBatch(sync.state)) From 19d730388af4a8b990f27eec368f3f7e17d18e5b Mon Sep 17 00:00:00 2001 From: tclemos Date: Tue, 2 Apr 2024 09:35:27 -0300 Subject: [PATCH 11/17] fix linter issues --- l1infotree/tree_recursive.go | 3 +++ synchronizer/actions/forksids.go | 1 + synchronizer/actions/processor_base.go | 1 + 3 files changed, 5 insertions(+) diff --git a/l1infotree/tree_recursive.go b/l1infotree/tree_recursive.go index deae8489ec..7a0e857730 100644 --- a/l1infotree/tree_recursive.go +++ b/l1infotree/tree_recursive.go @@ -10,6 +10,9 @@ type L1InfoTreeRecursive struct { historicL1InfoTree *L1InfoTree snapShot L1InfoTreeRecursiveSnapshot } + +// L1InfoTreeRecursiveSnapshot provides the information generated when a new +// leaf is added to the tree type L1InfoTreeRecursiveSnapshot struct { HistoricL1InfoTreeRoot common.Hash L1Data common.Hash diff --git a/synchronizer/actions/forksids.go b/synchronizer/actions/forksids.go index ddc35ec44c..5355bef9d8 100644 --- a/synchronizer/actions/forksids.go +++ b/synchronizer/actions/forksids.go @@ -21,6 +21,7 @@ const ( var ( /// ************** ALL ***************/// + // ForksIdAll support all forkIds ForksIdAll = []ForkIdType{WildcardForkId} diff --git a/synchronizer/actions/processor_base.go b/synchronizer/actions/processor_base.go index 972520b922..fce6a0f971 100644 --- a/synchronizer/actions/processor_base.go +++ b/synchronizer/actions/processor_base.go @@ -13,6 +13,7 @@ type ProcessorBase[T any] struct { supportedForkIds []ForkIdType } +// NewProcessorBase creates and initializes internal fields of an new instance of ProcessorBase func NewProcessorBase[T any](supportedEvent []etherman.EventOrder, supportedForkIds []ForkIdType) *ProcessorBase[T] { p := &ProcessorBase[T]{ supportedEvent: supportedEvent, From c5b8291c7024d484043e99f77c05301e876decec Mon Sep 17 00:00:00 2001 From: tclemos Date: Tue, 2 Apr 2024 20:27:02 -0300 Subject: [PATCH 12/17] revert l1InfoTreeRecursive snapshot implementation; adjust etrog and feijoa info tree processor forkid; refactoring l1InfoTreeV1 naming --- l1infotree/tree_recursive.go | 49 ++++++++-------- l1infotree/tree_recursive_test.go | 56 +++++++------------ state/l1infotree_recursive.go | 4 +- state/pgstatestorage/l1infotree.go | 16 +++--- .../etrog/processor_l1_info_tree_update.go | 2 +- .../feijoa/processor_l1_info_tree_update.go | 2 +- 6 files changed, 58 insertions(+), 71 deletions(-) diff --git a/l1infotree/tree_recursive.go b/l1infotree/tree_recursive.go index 7a0e857730..69db0d6e8a 100644 --- a/l1infotree/tree_recursive.go +++ b/l1infotree/tree_recursive.go @@ -5,10 +5,14 @@ import ( "github.com/ethereum/go-ethereum/crypto" ) +const ( + emptyHistoricL1InfoTreeRoot = "0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757" +) + // L1InfoTreeRecursive is a recursive implementation of the L1InfoTree of Feijoa type L1InfoTreeRecursive struct { historicL1InfoTree *L1InfoTree - snapShot L1InfoTreeRecursiveSnapshot + currentLeaf common.Hash } // L1InfoTreeRecursiveSnapshot provides the information generated when a new @@ -28,13 +32,8 @@ func NewL1InfoTreeRecursive(height uint8) (*L1InfoTreeRecursive, error) { mtr := &L1InfoTreeRecursive{ historicL1InfoTree: historic, - snapShot: L1InfoTreeRecursiveSnapshot{ - HistoricL1InfoTreeRoot: common.Hash{}, - L1Data: common.Hash{}, - L1InfoTreeRoot: common.Hash{}, - }, + currentLeaf: common.Hash{}, } - return mtr, nil } @@ -46,36 +45,42 @@ func NewL1InfoTreeRecursiveFromLeaves(height uint8, leaves [][32]byte) (*L1InfoT } for i, leaf := range leaves { - snapShot, err := mtr.AddLeaf(uint32(i), leaf) + _, err := mtr.AddLeaf(uint32(i), leaf) if err != nil { return nil, err } - mtr.snapShot = snapShot + mtr.currentLeaf = leaf } return mtr, nil } -// AddLeaf hashes the current historicL1InfoRoot + leaf data into the new leaf value and then adds it to the historicL1InfoTree -func (mt *L1InfoTreeRecursive) AddLeaf(index uint32, leaf [32]byte) (L1InfoTreeRecursiveSnapshot, error) { - //adds the current l1InfoTreeRoot into the tree to generate the next historicL2InfoTree - _, err := mt.historicL1InfoTree.AddLeaf(index, mt.snapShot.L1InfoTreeRoot) +// AddLeaf hashes the current historicL1InfoRoot + currentLeaf data into the new historicLeaf value, +// then adds it to the historicL1InfoTree and finally stores the new leaf as the currentLeaf +func (mt *L1InfoTreeRecursive) AddLeaf(index uint32, leaf [32]byte) (common.Hash, error) { + // adds the current l1InfoTreeRoot into the historic tree to generate + // the next historicL2InfoTreeRoot + l1InfoTreeRoot := mt.GetRoot() + _, err := mt.historicL1InfoTree.AddLeaf(index, l1InfoTreeRoot) if err != nil { - return L1InfoTreeRecursiveSnapshot{}, err + return common.Hash{}, err } - //creates the new snapshot - snapShot := L1InfoTreeRecursiveSnapshot{} - snapShot.HistoricL1InfoTreeRoot = mt.historicL1InfoTree.GetRoot() - snapShot.L1Data = common.BytesToHash(leaf[:]) - snapShot.L1InfoTreeRoot = crypto.Keccak256Hash(snapShot.HistoricL1InfoTreeRoot.Bytes(), snapShot.L1Data.Bytes()) - mt.snapShot = snapShot + mt.currentLeaf = leaf - return snapShot, nil + return mt.GetRoot(), nil } // GetRoot returns the root of the L1InfoTreeRecursive func (mt *L1InfoTreeRecursive) GetRoot() common.Hash { - return mt.snapShot.L1InfoTreeRoot + // if the historicL1InfoTree is empty and the the current leaf is also empty + // returns the root as all zeros 0x0000...0000 + if mt.historicL1InfoTree.GetRoot().String() == emptyHistoricL1InfoTreeRoot && + mt.currentLeaf.Cmp(common.Hash{}) == 0 { + return common.Hash{} + } + + l1InfoTreeRoot := crypto.Keccak256Hash(mt.historicL1InfoTree.GetRoot().Bytes(), mt.currentLeaf[:]) + return l1InfoTreeRoot } // GetHistoricRoot returns the root of the HistoricL1InfoTree diff --git a/l1infotree/tree_recursive_test.go b/l1infotree/tree_recursive_test.go index 8597ba2ad5..df34bbf28d 100644 --- a/l1infotree/tree_recursive_test.go +++ b/l1infotree/tree_recursive_test.go @@ -14,10 +14,9 @@ import ( ) const ( - L1InfoRootRecursiveHeight = uint8(32) - EmptyL1InfoTreeRecursiveRoot = "0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757" - - filenameTestData = "../test/vectors/src/merkle-tree/l1-info-tree-recursive/smt-full-output.json" + l1InfoRootRecursiveHeight = uint8(32) + emptyL1InfoTreeRecursiveRoot = "0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757" + filenameTestData = "../test/vectors/src/merkle-tree/l1-info-tree-recursive/smt-full-output.json" ) type vectorTestData struct { @@ -43,24 +42,24 @@ func readData(t *testing.T) []vectorTestData { } func TestEmptyL1InfoRootRecursive(t *testing.T) { - mtr, err := l1infotree.NewL1InfoTreeRecursive(L1InfoRootRecursiveHeight) + mtr, err := l1infotree.NewL1InfoTreeRecursive(l1InfoRootRecursiveHeight) require.NoError(t, err) require.NotNil(t, mtr) root := mtr.GetRoot() - require.Equal(t, EmptyL1InfoTreeRecursiveRoot, root.String()) + require.Equal(t, common.Hash{}.String(), root.String()) } func TestEmptyHistoricL1InfoRootRecursive(t *testing.T) { - mtr, err := l1infotree.NewL1InfoTreeRecursive(L1InfoRootRecursiveHeight) + mtr, err := l1infotree.NewL1InfoTreeRecursive(l1InfoRootRecursiveHeight) require.NoError(t, err) require.NotNil(t, mtr) root := mtr.GetHistoricRoot() - require.Equal(t, EmptyL1InfoTreeRecursiveRoot, root.String()) + require.Equal(t, emptyL1InfoTreeRecursiveRoot, root.String()) } func TestBuildTreeVectorData(t *testing.T) { data := readData(t) - mtr, err := l1infotree.NewL1InfoTreeRecursive(L1InfoRootRecursiveHeight) + mtr, err := l1infotree.NewL1InfoTreeRecursive(l1InfoRootRecursiveHeight) require.NoError(t, err) for _, testVector := range data { minTimestamp, err := strconv.ParseUint(testVector.MinTimestamp, 10, 0) @@ -69,55 +68,38 @@ func TestBuildTreeVectorData(t *testing.T) { l1DataHash := common.BytesToHash(l1Data[:]) assert.Equal(t, testVector.L1DataHash.String(), l1DataHash.String(), "l1Data doesn't match leaf", testVector.Index) - snapShot, err := mtr.AddLeaf(testVector.Index-1, l1Data) + l1InfoTreeRoot, err := mtr.AddLeaf(testVector.Index-1, l1Data) require.NoError(t, err) - assert.Equal(t, testVector.HistoricL1InfoRoot.String(), snapShot.HistoricL1InfoTreeRoot.String(), "HistoricL1InfoTreeRoot doesn't match leaf", testVector.Index) - assert.Equal(t, testVector.L1DataHash.String(), snapShot.L1Data.String(), "l1Data doesn't match leaf", testVector.Index) - assert.Equal(t, testVector.L1InfoTreeRoot.String(), snapShot.L1InfoTreeRoot.String(), "l1InfoTreeRoot doesn't match leaf", testVector.Index) + assert.Equal(t, testVector.L1InfoTreeRoot.String(), l1InfoTreeRoot.String(), "l1InfoTreeRoot doesn't match leaf", testVector.Index) + assert.Equal(t, testVector.L1InfoTreeRoot.String(), mtr.GetRoot().String(), "l1InfoTreeRoot doesn't match leaf", testVector.Index) + assert.Equal(t, testVector.HistoricL1InfoRoot.String(), mtr.GetHistoricRoot().String(), "HistoricL1InfoTreeRoot doesn't match leaf", testVector.Index) } } func TestBuildTreeFromLeaves(t *testing.T) { data := readData(t) - mtr, err := l1infotree.NewL1InfoTreeRecursive(L1InfoRootRecursiveHeight) - require.NoError(t, err) + leaves := [][32]byte{} - var lastSnapshot l1infotree.L1InfoTreeRecursiveSnapshot for _, testVector := range data { - minTimestamp, err := strconv.ParseUint(testVector.MinTimestamp, 10, 0) - require.NoError(t, err) - l1Data := l1infotree.HashLeafData(testVector.GlobalExitRoot, testVector.BlockHash, minTimestamp) - l1DataHash := common.BytesToHash(l1Data[:]) - assert.Equal(t, testVector.L1DataHash.String(), l1DataHash.String(), "l1Data doesn't match leaf", testVector.Index) - - snapShot, err := mtr.AddLeaf(testVector.Index-1, l1Data) - require.NoError(t, err) - leaves = append(leaves, snapShot.L1Data) - lastSnapshot = snapShot + leaves = append(leaves, testVector.L1DataHash) } - newMtr, err := l1infotree.NewL1InfoTreeRecursiveFromLeaves(L1InfoRootRecursiveHeight, leaves) + newMtr, err := l1infotree.NewL1InfoTreeRecursiveFromLeaves(l1InfoRootRecursiveHeight, leaves) require.NoError(t, err) - assert.Equal(t, lastSnapshot.L1InfoTreeRoot.String(), newMtr.GetRoot().String(), "L1InfoTreeRoot doesn't match leaf") + assert.Equal(t, data[len(data)-1].L1InfoTreeRoot.String(), newMtr.GetRoot().String(), "L1InfoTreeRoot doesn't match leaf") } func TestProofsTreeVectorData(t *testing.T) { data := readData(t) - mtr, err := l1infotree.NewL1InfoTreeRecursive(L1InfoRootRecursiveHeight) + mtr, err := l1infotree.NewL1InfoTreeRecursive(l1InfoRootRecursiveHeight) require.NoError(t, err) leaves := [][32]byte{} for _, testVector := range data { - minTimestamp, err := strconv.ParseUint(testVector.MinTimestamp, 10, 0) - require.NoError(t, err) - l1Data := l1infotree.HashLeafData(testVector.GlobalExitRoot, testVector.BlockHash, minTimestamp) - l1DataHash := common.BytesToHash(l1Data[:]) - assert.Equal(t, testVector.L1DataHash.String(), l1DataHash.String(), "l1Data doesn't match leaf", testVector.Index) - - snapShot, err := mtr.AddLeaf(testVector.Index-1, l1Data) + l1InfoTreeRoot, err := mtr.AddLeaf(testVector.Index-1, testVector.L1DataHash) require.NoError(t, err) - leaves = append(leaves, snapShot.L1InfoTreeRoot) + leaves = append(leaves, l1InfoTreeRoot) mp, _, err := mtr.ComputeMerkleProof(testVector.Index, leaves) require.NoError(t, err) diff --git a/state/l1infotree_recursive.go b/state/l1infotree_recursive.go index 29e0c2d794..94329ad82a 100644 --- a/state/l1infotree_recursive.go +++ b/state/l1infotree_recursive.go @@ -54,14 +54,14 @@ func (s *State) AddL1InfoTreeRecursiveLeaf(ctx context.Context, l1InfoTreeLeaf * return nil, err } log.Debug("latestIndex: ", gerIndex) - snapShot, err := s.l1InfoTreeRecursive.AddLeaf(newIndex, l1InfoTreeLeaf.Hash()) + l1InfoTreeRoot, err := s.l1InfoTreeRecursive.AddLeaf(newIndex, l1InfoTreeLeaf.Hash()) if err != nil { log.Error("error add new leaf to the L1InfoTreeRecursive. Error: ", err) return nil, err } entry := L1InfoTreeExitRootStorageEntry{ L1InfoTreeLeaf: *l1InfoTreeLeaf, - L1InfoTreeRoot: snapShot.L1InfoTreeRoot, + L1InfoTreeRoot: l1InfoTreeRoot, L1InfoTreeIndex: newIndex, } err = s.AddL1InfoRootToExitRoot(ctx, &entry, dbTx) diff --git a/state/pgstatestorage/l1infotree.go b/state/pgstatestorage/l1infotree.go index 07a892f14e..d2081f5cfc 100644 --- a/state/pgstatestorage/l1infotree.go +++ b/state/pgstatestorage/l1infotree.go @@ -11,12 +11,12 @@ import ( ) const ( - l1InfoTreeIndexFieldNameV1 = "l1_info_tree_index" + l1InfoTreeIndexFieldName = "l1_info_tree_index" ) // AddL1InfoRootToExitRoot adds a new entry in ExitRoot and returns index of L1InfoTree and error func (p *PostgresStorage) AddL1InfoRootToExitRoot(ctx context.Context, exitRoot *state.L1InfoTreeExitRootStorageEntry, dbTx pgx.Tx) error { - return p.addL1InfoRootToExitRootVx(ctx, exitRoot, dbTx, l1InfoTreeIndexFieldNameV1) + return p.addL1InfoRootToExitRootVx(ctx, exitRoot, dbTx, l1InfoTreeIndexFieldName) } func (p *PostgresStorage) addL1InfoRootToExitRootVx(ctx context.Context, exitRoot *state.L1InfoTreeExitRootStorageEntry, dbTx pgx.Tx, indexFieldName string) error { @@ -33,7 +33,7 @@ func (p *PostgresStorage) addL1InfoRootToExitRootVx(ctx context.Context, exitRoo } func (p *PostgresStorage) GetAllL1InfoRootEntries(ctx context.Context, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) { - return p.GetAllL1InfoRootEntriesVx(ctx, dbTx, l1InfoTreeIndexFieldNameV1) + return p.GetAllL1InfoRootEntriesVx(ctx, dbTx, l1InfoTreeIndexFieldName) } func (p *PostgresStorage) GetAllL1InfoRootEntriesVx(ctx context.Context, dbTx pgx.Tx, indexFieldName string) ([]state.L1InfoTreeExitRootStorageEntry, error) { @@ -65,7 +65,7 @@ func (p *PostgresStorage) GetAllL1InfoRootEntriesVx(ctx context.Context, dbTx pg // GetLatestL1InfoRoot is used to get the latest L1InfoRoot func (p *PostgresStorage) GetLatestL1InfoRoot(ctx context.Context, maxBlockNumber uint64) (state.L1InfoTreeExitRootStorageEntry, error) { - return p.GetLatestL1InfoRootVx(ctx, maxBlockNumber, nil, l1InfoTreeIndexFieldNameV1) + return p.GetLatestL1InfoRootVx(ctx, maxBlockNumber, nil, l1InfoTreeIndexFieldName) } // GetLatestL1InfoRoot is used to get the latest L1InfoRoot @@ -90,7 +90,7 @@ func (p *PostgresStorage) GetLatestL1InfoRootVx(ctx context.Context, maxBlockNum return entry, nil } func (p *PostgresStorage) GetLatestIndex(ctx context.Context, dbTx pgx.Tx) (uint32, error) { - return p.GetLatestIndexVx(ctx, dbTx, l1InfoTreeIndexFieldNameV1) + return p.GetLatestIndexVx(ctx, dbTx, l1InfoTreeIndexFieldName) } func (p *PostgresStorage) GetLatestIndexVx(ctx context.Context, dbTx pgx.Tx, indexFieldName string) (uint32, error) { const getLatestIndexSQL = `SELECT max(%s) as %s FROM state.exit_root @@ -110,7 +110,7 @@ func (p *PostgresStorage) GetLatestIndexVx(ctx context.Context, dbTx pgx.Tx, ind } func (p *PostgresStorage) GetL1InfoRootLeafByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) (state.L1InfoTreeExitRootStorageEntry, error) { - return p.GetL1InfoRootLeafByL1InfoRootVx(ctx, l1InfoRoot, dbTx, l1InfoTreeIndexFieldNameV1) + return p.GetL1InfoRootLeafByL1InfoRootVx(ctx, l1InfoRoot, dbTx, l1InfoTreeIndexFieldName) } func (p *PostgresStorage) GetL1InfoRootLeafByL1InfoRootVx(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx, indexFieldName string) (state.L1InfoTreeExitRootStorageEntry, error) { @@ -129,7 +129,7 @@ func (p *PostgresStorage) GetL1InfoRootLeafByL1InfoRootVx(ctx context.Context, l } func (p *PostgresStorage) GetL1InfoRootLeafByIndex(ctx context.Context, l1InfoTreeIndex uint32, dbTx pgx.Tx) (state.L1InfoTreeExitRootStorageEntry, error) { - return p.GetL1InfoRootLeafByIndexVx(ctx, l1InfoTreeIndex, dbTx, l1InfoTreeIndexFieldNameV1) + return p.GetL1InfoRootLeafByIndexVx(ctx, l1InfoTreeIndex, dbTx, l1InfoTreeIndexFieldName) } func (p *PostgresStorage) GetL1InfoRootLeafByIndexVx(ctx context.Context, l1InfoTreeIndex uint32, dbTx pgx.Tx, indexFieldName string) (state.L1InfoTreeExitRootStorageEntry, error) { @@ -147,7 +147,7 @@ func (p *PostgresStorage) GetL1InfoRootLeafByIndexVx(ctx context.Context, l1Info return entry, nil } func (p *PostgresStorage) GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) { - return p.GetLeafsByL1InfoRootVx(ctx, l1InfoRoot, dbTx, l1InfoTreeIndexFieldNameV1) + return p.GetLeafsByL1InfoRootVx(ctx, l1InfoRoot, dbTx, l1InfoTreeIndexFieldName) } func (p *PostgresStorage) GetLeafsByL1InfoRootVx(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx, indexFieldName string) ([]state.L1InfoTreeExitRootStorageEntry, error) { diff --git a/synchronizer/actions/etrog/processor_l1_info_tree_update.go b/synchronizer/actions/etrog/processor_l1_info_tree_update.go index 8456ced8b8..3d3a27311b 100644 --- a/synchronizer/actions/etrog/processor_l1_info_tree_update.go +++ b/synchronizer/actions/etrog/processor_l1_info_tree_update.go @@ -26,7 +26,7 @@ func NewProcessorL1InfoTreeUpdate(state stateProcessorL1InfoTreeInterface) *Proc return &ProcessorL1InfoTreeUpdate{ ProcessorBase: *actions.NewProcessorBase[ProcessorL1InfoTreeUpdate]( []etherman.EventOrder{etherman.L1InfoTreeOrder}, - actions.ForksIdToEtrog), + actions.ForksIdToElderberry), state: state} } diff --git a/synchronizer/actions/feijoa/processor_l1_info_tree_update.go b/synchronizer/actions/feijoa/processor_l1_info_tree_update.go index 0d1f8dcccf..818f54fc20 100644 --- a/synchronizer/actions/feijoa/processor_l1_info_tree_update.go +++ b/synchronizer/actions/feijoa/processor_l1_info_tree_update.go @@ -26,7 +26,7 @@ func NewProcessorL1InfoTreeUpdate(state stateProcessorL1InfoTreeRecursiveInterfa return &ProcessorL1InfoTreeUpdate{ ProcessorBase: *actions.NewProcessorBase[ProcessorL1InfoTreeUpdate]( []etherman.EventOrder{etherman.L1InfoTreeOrder}, - actions.ForksIdOnlyEtrog), + actions.ForksIdOnlyFeijoa), state: state} } From 3ce462fb2b8c69bd22fa64f2cb7642e1f603a5e6 Mon Sep 17 00:00:00 2001 From: tclemos Date: Tue, 2 Apr 2024 20:32:04 -0300 Subject: [PATCH 13/17] force initialize l1InfoTreeRecursive --- cmd/run.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cmd/run.go b/cmd/run.go index d3a227393e..62c536e95b 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -498,6 +498,12 @@ func newState(ctx context.Context, c *config.Config, etherman *etherman.Client, } log.Infof("Starting L1InfoRoot: %v", l1InfoRoot.String()) + l1InfoTreeRecursiveRoot, err := st.GetCurrentL1InfoTreeRecursiveRoot(ctx, nil) + if err != nil { + log.Fatal("error getting current l1InfoTreeRecursiveRoot. Error: ", err) + } + log.Infof("Starting l1InfoTreeRecursiveRoot: %v", l1InfoTreeRecursiveRoot.String()) + forkIDIntervals, err := forkIDIntervals(ctx, st, etherman, c.NetworkConfig.Genesis.BlockNumber) if err != nil { log.Fatal("error getting forkIDs. Error: ", err) From 1988705b47b12e37951564b63b7c471bcfdc58ff Mon Sep 17 00:00:00 2001 From: tclemos Date: Wed, 3 Apr 2024 13:10:40 -0300 Subject: [PATCH 14/17] docker compose fix --- docs/snap_restore.md | 4 ++-- test/Makefile | 2 +- tools/executor/README.md | 2 +- tools/executor/main.go | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/snap_restore.md b/docs/snap_restore.md index cd59a13b85..2e56c2945f 100644 --- a/docs/snap_restore.md +++ b/docs/snap_restore.md @@ -79,7 +79,7 @@ You could use `test/docker-compose.yml` to interact with `zkevm-node`: * Run the containers: `make run` * Launch an interactive container: ``` -docker-compose up -d zkevm-sh -docker-compose exec zkevm-sh /bin/sh +docker compose up -d zkevm-sh +docker compose exec zkevm-sh /bin/sh ``` * Inside this shell you can execute the examples of invocation diff --git a/test/Makefile b/test/Makefile index 7b6df67f6d..306cb71c98 100644 --- a/test/Makefile +++ b/test/Makefile @@ -1,4 +1,4 @@ -DOCKERCOMPOSE := docker-compose -f docker-compose.yml +DOCKERCOMPOSE := docker compose -f docker-compose.yml DOCKERCOMPOSEAPPSEQ := zkevm-sequencer DOCKERCOMPOSEAPPSEQV1TOV2 := zkevm-sequencer-v1tov2 DOCKERCOMPOSEAPPSEQSENDER := zkevm-sequence-sender diff --git a/tools/executor/README.md b/tools/executor/README.md index 990bf2cd9e..2f9e25a583 100644 --- a/tools/executor/README.md +++ b/tools/executor/README.md @@ -70,7 +70,7 @@ In case some vector doesn't use the default genesis: ```bash make run-db make run-zkprover -docker-compose up -d zkevm-sync +docker compose up -d zkevm-sync ``` 2. Get the entries of the merkletree in JSON format: `PGPASSWORD=prover_pass psql -h 127.0.0.1 -p 5432 -U prover_user -d prover_db -c "select row_to_json(t) from (select encode(hash, 'hex') as hash, encode(data, 'hex') as data from state.merkletree) t" > newGenesis.json` diff --git a/tools/executor/main.go b/tools/executor/main.go index c9e3b3aa86..1c1fc41a2d 100644 --- a/tools/executor/main.go +++ b/tools/executor/main.go @@ -28,20 +28,20 @@ const ( func main() { // Start containers defer func() { - cmd := exec.Command("docker-compose", "down", "--remove-orphans") + cmd := exec.Command("docker compose", "down", "--remove-orphans") if err := cmd.Run(); err != nil { log.Errorf("Failed stop containers: %v", err) return } }() log.Info("Starting DB and prover") - cmd := exec.Command("docker-compose", "up", "-d", "executor-tool-db") + cmd := exec.Command("docker compose", "up", "-d", "executor-tool-db") if out, err := cmd.CombinedOutput(); err != nil { log.Errorf("Failed to star DB: %w. %v", err, out) return } time.Sleep(time.Second * waitForDBSeconds) - cmd = exec.Command("docker-compose", "up", "-d", "executor-tool-prover") + cmd = exec.Command("docker compose", "up", "-d", "executor-tool-prover") if out, err := cmd.CombinedOutput(); err != nil { log.Errorf("Failed to star prover: %v. %v", err, out) return From 5466d889a156e146842233e5fd33ea76a58e3753 Mon Sep 17 00:00:00 2001 From: tclemos Date: Wed, 3 Apr 2024 13:41:07 -0300 Subject: [PATCH 15/17] fix linter issues --- tools/executor/main.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/executor/main.go b/tools/executor/main.go index 1c1fc41a2d..97162d0922 100644 --- a/tools/executor/main.go +++ b/tools/executor/main.go @@ -28,20 +28,20 @@ const ( func main() { // Start containers defer func() { - cmd := exec.Command("docker compose", "down", "--remove-orphans") + cmd := exec.Command("docker", "compose", "down", "--remove-orphans") if err := cmd.Run(); err != nil { log.Errorf("Failed stop containers: %v", err) return } }() log.Info("Starting DB and prover") - cmd := exec.Command("docker compose", "up", "-d", "executor-tool-db") + cmd := exec.Command("docker", "compose", "up", "-d", "executor-tool-db") if out, err := cmd.CombinedOutput(); err != nil { log.Errorf("Failed to star DB: %w. %v", err, out) return } time.Sleep(time.Second * waitForDBSeconds) - cmd = exec.Command("docker compose", "up", "-d", "executor-tool-prover") + cmd = exec.Command("docker", "compose", "up", "-d", "executor-tool-prover") if out, err := cmd.CombinedOutput(); err != nil { log.Errorf("Failed to star prover: %v. %v", err, out) return From 9d106a5bd31163e671d237175c91013a8710d097 Mon Sep 17 00:00:00 2001 From: tclemos Date: Thu, 4 Apr 2024 10:50:46 -0300 Subject: [PATCH 16/17] update docker compose check --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 83d4fed5fe..18972f2847 100644 --- a/Makefile +++ b/Makefile @@ -43,7 +43,7 @@ check-docker: # Check for Docker-compose .PHONY: check-docker-compose check-docker-compose: - @which docker-compose > /dev/null || (echo "Error: docker-compose is not installed" && exit 1) + @docker compose version > /dev/null || (echo "Error: docker compose is not installed" && exit 1) # Check for Protoc .PHONY: check-protoc From ea4de0e85cc48f1046d9c1457f5512e22297feb6 Mon Sep 17 00:00:00 2001 From: tclemos Date: Thu, 4 Apr 2024 10:56:12 -0300 Subject: [PATCH 17/17] Revert "update docker compose check" This reverts commit 9d106a5bd31163e671d237175c91013a8710d097. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 18972f2847..83d4fed5fe 100644 --- a/Makefile +++ b/Makefile @@ -43,7 +43,7 @@ check-docker: # Check for Docker-compose .PHONY: check-docker-compose check-docker-compose: - @docker compose version > /dev/null || (echo "Error: docker compose is not installed" && exit 1) + @which docker-compose > /dev/null || (echo "Error: docker-compose is not installed" && exit 1) # Check for Protoc .PHONY: check-protoc