From ecc39d462ca2647ff835d712411971b9a3a044d3 Mon Sep 17 00:00:00 2001 From: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 20 Feb 2024 09:52:39 +0100 Subject: [PATCH] add sanityChecks --- .../processor_l1_sequence_batches.go | 68 ++++++++++++++++++- .../processor_l1_sequence_batches_test.go | 6 ++ synchronizer/common/syncinterfaces/state.go | 1 + synchronizer/default_l1processors.go | 2 +- 4 files changed, 73 insertions(+), 4 deletions(-) create mode 100644 synchronizer/actions/elderberry/processor_l1_sequence_batches_test.go diff --git a/synchronizer/actions/elderberry/processor_l1_sequence_batches.go b/synchronizer/actions/elderberry/processor_l1_sequence_batches.go index 06414bf11f..2e075121f0 100644 --- a/synchronizer/actions/elderberry/processor_l1_sequence_batches.go +++ b/synchronizer/actions/elderberry/processor_l1_sequence_batches.go @@ -2,33 +2,46 @@ package elderberry import ( "context" + "errors" "fmt" "time" "github.com/0xPolygonHermez/zkevm-node/etherman" "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/synchronizer/actions" "github.com/jackc/pgx/v4" ) +var ( + ErrInvalidInitialBatchNumber = errors.New("invalid initial batch number") +) + // PreviousProcessor is the interface that the previous processor (Etrog) type PreviousProcessor interface { ProcessSequenceBatches(ctx context.Context, sequencedBatches []etherman.SequencedBatch, blockNumber uint64, l1BlockTimestamp time.Time, dbTx pgx.Tx) error } +type stateL1SequenceBatchesElderberry interface { + GetLastVirtualBatchNum(ctx context.Context, dbTx pgx.Tx) (uint64, error) + GetL2BlocksByBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) ([]state.L2Block, error) +} + // ProcessorL1SequenceBatchesElderberry is the processor for SequenceBatches for Elderberry type ProcessorL1SequenceBatchesElderberry struct { actions.ProcessorBase[ProcessorL1SequenceBatchesElderberry] previousProcessor PreviousProcessor + state stateL1SequenceBatchesElderberry } // NewProcessorL1SequenceBatchesElderberry returns instance of a processor for SequenceBatchesOrder -func NewProcessorL1SequenceBatchesElderberry(previousProcessor PreviousProcessor) *ProcessorL1SequenceBatchesElderberry { +func NewProcessorL1SequenceBatchesElderberry(previousProcessor PreviousProcessor, state stateL1SequenceBatchesElderberry) *ProcessorL1SequenceBatchesElderberry { return &ProcessorL1SequenceBatchesElderberry{ ProcessorBase: actions.ProcessorBase[ProcessorL1SequenceBatchesElderberry]{ SupportedEvent: []etherman.EventOrder{etherman.SequenceBatchesOrder}, SupportedForkdIds: &ForksIdOnlyElderberry}, previousProcessor: previousProcessor, + state: state, } } @@ -46,8 +59,57 @@ func (g *ProcessorL1SequenceBatchesElderberry) Process(ctx context.Context, orde log.Errorf("No elderberry sequenced batch data for batch %d", sbatch.BatchNumber) return fmt.Errorf("no elderberry sequenced batch data for batch %d", sbatch.BatchNumber) } - // We known that the MaxSequenceTimestamp is the same for all the batches + // We need to check that the sequence match + err := g.sanityCheckExpectedSequence(sbatch.SequencedBatchElderberryData.InitSequencedBatchNumber, dbTx) + if err != nil { + return err + } + // We known that the MaxSequenceTimestamp is the same for all the batches so we can use the first one timeLimit := time.Unix(int64(sbatch.SequencedBatchElderberryData.MaxSequenceTimestamp), 0) - err := g.previousProcessor.ProcessSequenceBatches(ctx, l1Block.SequencedBatches[order.Pos], l1Block.BlockNumber, timeLimit, dbTx) + err = g.previousProcessor.ProcessSequenceBatches(ctx, l1Block.SequencedBatches[order.Pos], l1Block.BlockNumber, timeLimit, dbTx) + // The last L2block timestamp must match MaxSequenceTimestamp + if err != nil { + return err + } + err = g.sanityCheckTstampLastL2Block(timeLimit, dbTx) + if err != nil { + return err + } return err } + +func (g *ProcessorL1SequenceBatchesElderberry) sanityCheckExpectedSequence(initialBatchNumber uint64, dbTx pgx.Tx) error { + // We need to check that the sequence match + lastVirtualBatchNum, err := g.state.GetLastVirtualBatchNum(context.Background(), dbTx) + if err != nil { + log.Errorf("Error getting last virtual batch number: %s", err) + return err + } + if lastVirtualBatchNum+1 != initialBatchNumber { + log.Errorf("The last virtual batch number is not the expected one. Expected: %d (last on DB), got: %d (L1 event)", lastVirtualBatchNum+1, initialBatchNumber) + return fmt.Errorf("the last virtual batch number is not the expected one. Expected: %d (last on DB), got: %d (L1 event) err:%w", lastVirtualBatchNum+1, initialBatchNumber, ErrInvalidInitialBatchNumber) + } + return nil +} + +func (g *ProcessorL1SequenceBatchesElderberry) sanityCheckTstampLastL2Block(timeLimit time.Time, dbTx pgx.Tx) error { + lastVirtualBatchNum, err := g.state.GetLastVirtualBatchNum(context.Background(), dbTx) + if err != nil { + log.Errorf("Error getting last virtual batch number: %s", err) + return err + } + l2blocks, err := g.state.GetL2BlocksByBatchNumber(context.Background(), lastVirtualBatchNum, dbTx) + if err != nil { + log.Errorf("Error getting last virtual batch number: %s", err) + return err + } + if len(l2blocks) == 0 { + return nil + } + lastL2Block := l2blocks[len(l2blocks)-1] + if lastL2Block.ReceivedAt != timeLimit { + log.Errorf("The last L2 block timestamp is not the expected one. Expected: %s (L1 event), got: %s (last L2Block)", timeLimit, lastL2Block.ReceivedAt) + return fmt.Errorf("dont match last L2 block timestamp with L1 event timestamp") + } + return nil +} diff --git a/synchronizer/actions/elderberry/processor_l1_sequence_batches_test.go b/synchronizer/actions/elderberry/processor_l1_sequence_batches_test.go new file mode 100644 index 0000000000..9d99dee1a2 --- /dev/null +++ b/synchronizer/actions/elderberry/processor_l1_sequence_batches_test.go @@ -0,0 +1,6 @@ +package elderberry_test + +import "testing" + +func TestProcessorL1InfoTreeUpdate_Process(t *testing.T) { +} diff --git a/synchronizer/common/syncinterfaces/state.go b/synchronizer/common/syncinterfaces/state.go index ef39017869..a35ac56039 100644 --- a/synchronizer/common/syncinterfaces/state.go +++ b/synchronizer/common/syncinterfaces/state.go @@ -69,4 +69,5 @@ type StateFullInterface interface { GetL1InfoTreeDataFromBatchL2Data(ctx context.Context, batchL2Data []byte, dbTx pgx.Tx) (map[uint32]state.L1DataV2, common.Hash, common.Hash, error) GetExitRootByGlobalExitRoot(ctx context.Context, ger common.Hash, dbTx pgx.Tx) (*state.GlobalExitRoot, error) GetForkIDInMemory(forkId uint64) *state.ForkIDInterval + GetL2BlocksByBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) ([]state.L2Block, error) } diff --git a/synchronizer/default_l1processors.go b/synchronizer/default_l1processors.go index 7debbb4b80..29429caac8 100644 --- a/synchronizer/default_l1processors.go +++ b/synchronizer/default_l1processors.go @@ -20,6 +20,6 @@ func defaultsL1EventProcessors(sync *ClientSynchronizer) *processor_manager.L1Ev p.Register(sequenceBatchesProcessor) p.Register(incaberry.NewProcessorL1VerifyBatch(sync.state)) p.Register(etrog.NewProcessorL1UpdateEtrogSequence(sync.state, sync, common.DefaultTimeProvider{})) - p.Register(elderberry.NewProcessorL1SequenceBatchesElderberry(sequenceBatchesProcessor)) + p.Register(elderberry.NewProcessorL1SequenceBatchesElderberry(sequenceBatchesProcessor, sync.state)) return p.Build() }