diff --git a/op-node/cmd/batch_decoder/reassemble/reassemble.go b/op-node/cmd/batch_decoder/reassemble/reassemble.go index 18ebba79f15e..8472ba8c03ca 100644 --- a/op-node/cmd/batch_decoder/reassemble/reassemble.go +++ b/op-node/cmd/batch_decoder/reassemble/reassemble.go @@ -107,14 +107,18 @@ func processFrames(cfg *rollup.Config, id derive.ChannelID, frames []FrameWithMe var batches []derive.SingularBatch invalidBatches := false if ch.IsReady() { - br, err := derive.BatchReader(cfg, ch.Reader(), eth.L1BlockRef{}) + br, err := derive.BatchReader(ch.Reader()) if err == nil { for batch, err := br(); err != io.EOF; batch, err = br() { if err != nil { fmt.Printf("Error reading batch for channel %v. Err: %v\n", id.String(), err) invalidBatches = true } else { - batches = append(batches, batch.Batch.SingularBatch) + if batch.BatchType != derive.SingularBatchType { + batches = append(batches, batch.SingularBatch) + } else { + fmt.Printf("batch-type %d is not supported", batch.BatchType) + } } } } else { diff --git a/op-node/rollup/derive/attributes_queue.go b/op-node/rollup/derive/attributes_queue.go index 016c39d696b8..023fbb3dccda 100644 --- a/op-node/rollup/derive/attributes_queue.go +++ b/op-node/rollup/derive/attributes_queue.go @@ -32,7 +32,7 @@ type AttributesQueue struct { config *rollup.Config builder AttributesBuilder prev *BatchQueue - batch *BatchData + batch *SingularBatch } func NewAttributesQueue(log log.Logger, cfg *rollup.Config, builder AttributesBuilder, prev *BatchQueue) *AttributesQueue { @@ -71,7 +71,7 @@ func (aq *AttributesQueue) NextAttributes(ctx context.Context, l2SafeHead eth.L2 // createNextAttributes transforms a batch into a payload attributes. This sets `NoTxPool` and appends the batched transactions // to the attributes transaction list -func (aq *AttributesQueue) createNextAttributes(ctx context.Context, batch *BatchData, l2SafeHead eth.L2BlockRef) (*eth.PayloadAttributes, error) { +func (aq *AttributesQueue) createNextAttributes(ctx context.Context, batch *SingularBatch, l2SafeHead eth.L2BlockRef) (*eth.PayloadAttributes, error) { // sanity check parent hash if batch.ParentHash != l2SafeHead.Hash { return nil, NewResetError(fmt.Errorf("valid batch has bad parent hash %s, expected %s", batch.ParentHash, l2SafeHead.Hash)) diff --git a/op-node/rollup/derive/attributes_queue_test.go b/op-node/rollup/derive/attributes_queue_test.go index fef240c576bc..c5b2cde5eca6 100644 --- a/op-node/rollup/derive/attributes_queue_test.go +++ b/op-node/rollup/derive/attributes_queue_test.go @@ -42,13 +42,13 @@ func TestAttributesQueue(t *testing.T) { safeHead.L1Origin = l1Info.ID() safeHead.Time = l1Info.InfoTime - batch := NewSingularBatchData(SingularBatch{ + batch := SingularBatch{ ParentHash: safeHead.Hash, EpochNum: rollup.Epoch(l1Info.InfoNum), EpochHash: l1Info.InfoHash, Timestamp: safeHead.Time + cfg.BlockTime, Transactions: []eth.Data{eth.Data("foobar"), eth.Data("example")}, - }) + } parentL1Cfg := eth.SystemConfig{ BatcherAddr: common.Address{42}, @@ -80,7 +80,7 @@ func TestAttributesQueue(t *testing.T) { aq := NewAttributesQueue(testlog.Logger(t, log.LvlError), cfg, attrBuilder, nil) - actual, err := aq.createNextAttributes(context.Background(), batch, safeHead) + actual, err := aq.createNextAttributes(context.Background(), &batch, safeHead) require.NoError(t, err) require.Equal(t, attrs, *actual) diff --git a/op-node/rollup/derive/batch_queue.go b/op-node/rollup/derive/batch_queue.go index 5f57299879cb..b12f08c54fbd 100644 --- a/op-node/rollup/derive/batch_queue.go +++ b/op-node/rollup/derive/batch_queue.go @@ -29,7 +29,12 @@ import ( type NextBatchProvider interface { Origin() eth.L1BlockRef - NextBatch(ctx context.Context) (*BatchData, error) + NextBatch(ctx context.Context) (Batch, error) +} + +type SafeBlockFetcher interface { + L2BlockRefByNumber(context.Context, uint64) (eth.L2BlockRef, error) + PayloadByNumber(context.Context, uint64) (*eth.ExecutionPayload, error) } // BatchQueue contains a set of batches for every L1 block. @@ -42,16 +47,22 @@ type BatchQueue struct { l1Blocks []eth.L1BlockRef - // batches in order of when we've first seen them, grouped by L2 timestamp - batches map[uint64][]*BatchWithL1InclusionBlock + // batches in order of when we've first seen them + batches []*BatchWithL1InclusionBlock + + // nextSpan is cached SingularBatches derived from SpanBatch + nextSpan []*SingularBatch + + l2 SafeBlockFetcher } // NewBatchQueue creates a BatchQueue, which should be Reset(origin) before use. -func NewBatchQueue(log log.Logger, cfg *rollup.Config, prev NextBatchProvider) *BatchQueue { +func NewBatchQueue(log log.Logger, cfg *rollup.Config, prev NextBatchProvider, l2 SafeBlockFetcher) *BatchQueue { return &BatchQueue{ log: log, config: cfg, prev: prev, + l2: l2, } } @@ -59,7 +70,37 @@ func (bq *BatchQueue) Origin() eth.L1BlockRef { return bq.prev.Origin() } -func (bq *BatchQueue) NextBatch(ctx context.Context, safeL2Head eth.L2BlockRef) (*BatchData, error) { +// popNextBatch pops the next batch from the current queued up span-batch nextSpan. +// The queue must be non-empty, or the function will panic. +func (bq *BatchQueue) popNextBatch(safeL2Head eth.L2BlockRef) *SingularBatch { + if len(bq.nextSpan) == 0 { + panic("popping non-existent span-batch, invalid state") + } + nextBatch := bq.nextSpan[0] + bq.nextSpan = bq.nextSpan[1:] + // Must set ParentHash before return. we can use safeL2Head because the parentCheck is verified in CheckBatch(). + nextBatch.ParentHash = safeL2Head.Hash + return nextBatch +} + +func (bq *BatchQueue) maybeAdvanceEpoch(nextBatch *SingularBatch) { + if len(bq.l1Blocks) == 0 { + return + } + if nextBatch.GetEpochNum() == rollup.Epoch(bq.l1Blocks[0].Number)+1 { + // Advance epoch if necessary + bq.l1Blocks = bq.l1Blocks[1:] + } +} + +func (bq *BatchQueue) NextBatch(ctx context.Context, safeL2Head eth.L2BlockRef) (*SingularBatch, error) { + if len(bq.nextSpan) > 0 { + // If there are cached singular batches, pop first one and return. + nextBatch := bq.popNextBatch(safeL2Head) + bq.maybeAdvanceEpoch(nextBatch) + return nextBatch, nil + } + // Note: We use the origin that we will have to determine if it's behind. This is important // because it's the future origin that gets saved into the l1Blocks array. // We always update the origin of this stage if it is not the same so after the update code @@ -89,7 +130,7 @@ func (bq *BatchQueue) NextBatch(ctx context.Context, safeL2Head eth.L2BlockRef) } else if err != nil { return nil, err } else if !originBehind { - bq.AddBatch(batch, safeL2Head) + bq.AddBatch(ctx, batch, safeL2Head) } // Skip adding data unless we are up to date with the origin, but do fully @@ -111,43 +152,71 @@ func (bq *BatchQueue) NextBatch(ctx context.Context, safeL2Head eth.L2BlockRef) } else if err != nil { return nil, err } - return batch, nil + + var nextBatch *SingularBatch + switch batch.GetBatchType() { + case SingularBatchType: + singularBatch, ok := batch.(*SingularBatch) + if !ok { + return nil, NewCriticalError(errors.New("failed type assertion to SingularBatch")) + } + nextBatch = singularBatch + case SpanBatchType: + spanBatch, ok := batch.(*SpanBatch) + if !ok { + return nil, NewCriticalError(errors.New("failed type assertion to SpanBatch")) + } + // If next batch is SpanBatch, convert it to SingularBatches. + singularBatches, err := spanBatch.GetSingularBatches(bq.l1Blocks, safeL2Head) + if err != nil { + return nil, NewCriticalError(err) + } + bq.nextSpan = singularBatches + // span-batches are non-empty, so the below pop is safe. + nextBatch = bq.popNextBatch(safeL2Head) + default: + return nil, NewCriticalError(fmt.Errorf("unrecognized batch type: %d", batch.GetBatchType())) + } + + bq.maybeAdvanceEpoch(nextBatch) + return nextBatch, nil } func (bq *BatchQueue) Reset(ctx context.Context, base eth.L1BlockRef, _ eth.SystemConfig) error { // Copy over the Origin from the next stage // It is set in the engine queue (two stages away) such that the L2 Safe Head origin is the progress bq.origin = base - bq.batches = make(map[uint64][]*BatchWithL1InclusionBlock) + bq.batches = []*BatchWithL1InclusionBlock{} // Include the new origin as an origin to build on // Note: This is only for the initialization case. During normal resets we will later // throw out this block. bq.l1Blocks = bq.l1Blocks[:0] bq.l1Blocks = append(bq.l1Blocks, base) + bq.nextSpan = bq.nextSpan[:0] return io.EOF } -func (bq *BatchQueue) AddBatch(batch *BatchData, l2SafeHead eth.L2BlockRef) { +func (bq *BatchQueue) AddBatch(ctx context.Context, batch Batch, l2SafeHead eth.L2BlockRef) { if len(bq.l1Blocks) == 0 { - panic(fmt.Errorf("cannot add batch with timestamp %d, no origin was prepared", batch.Timestamp)) + panic(fmt.Errorf("cannot add batch with timestamp %d, no origin was prepared", batch.GetTimestamp())) } data := BatchWithL1InclusionBlock{ L1InclusionBlock: bq.origin, Batch: batch, } - validity := CheckBatch(bq.config, bq.log, bq.l1Blocks, l2SafeHead, &data) + validity := CheckBatch(ctx, bq.config, bq.log, bq.l1Blocks, l2SafeHead, &data, bq.l2) if validity == BatchDrop { return // if we do drop the batch, CheckBatch will log the drop reason with WARN level. } - bq.log.Debug("Adding batch", "batch_timestamp", batch.Timestamp, "parent_hash", batch.ParentHash, "batch_epoch", batch.Epoch(), "txs", len(batch.Transactions)) - bq.batches[batch.Timestamp] = append(bq.batches[batch.Timestamp], &data) + batch.LogContext(bq.log).Debug("Adding batch") + bq.batches = append(bq.batches, &data) } // deriveNextBatch derives the next batch to apply on top of the current L2 safe head, // following the validity rules imposed on consecutive batches, // based on currently available buffered batch and L1 origin information. // If no batch can be derived yet, then (nil, io.EOF) is returned. -func (bq *BatchQueue) deriveNextBatch(ctx context.Context, outOfData bool, l2SafeHead eth.L2BlockRef) (*BatchData, error) { +func (bq *BatchQueue) deriveNextBatch(ctx context.Context, outOfData bool, l2SafeHead eth.L2BlockRef) (Batch, error) { if len(bq.l1Blocks) == 0 { return nil, NewCriticalError(errors.New("cannot derive next batch, no origin was prepared")) } @@ -170,19 +239,15 @@ func (bq *BatchQueue) deriveNextBatch(ctx context.Context, outOfData bool, l2Saf // Go over all batches, in order of inclusion, and find the first batch we can accept. // We filter in-place by only remembering the batches that may be processed in the future, or those we are undecided on. var remaining []*BatchWithL1InclusionBlock - candidates := bq.batches[nextTimestamp] batchLoop: - for i, batch := range candidates { - validity := CheckBatch(bq.config, bq.log.New("batch_index", i), bq.l1Blocks, l2SafeHead, batch) + for i, batch := range bq.batches { + validity := CheckBatch(ctx, bq.config, bq.log.New("batch_index", i), bq.l1Blocks, l2SafeHead, batch, bq.l2) switch validity { case BatchFuture: - return nil, NewCriticalError(fmt.Errorf("found batch with timestamp %d marked as future batch, but expected timestamp %d", batch.Batch.Timestamp, nextTimestamp)) + remaining = append(remaining, batch) + continue case BatchDrop: - bq.log.Warn("dropping batch", - "batch_timestamp", batch.Batch.Timestamp, - "parent_hash", batch.Batch.ParentHash, - "batch_epoch", batch.Batch.Epoch(), - "txs", len(batch.Batch.Transactions), + batch.Batch.LogContext(bq.log).Warn("Dropping batch", "l2_safe_head", l2SafeHead.ID(), "l2_safe_head_time", l2SafeHead.Time, ) @@ -191,29 +256,20 @@ batchLoop: nextBatch = batch // don't keep the current batch in the remaining items since we are processing it now, // but retain every batch we didn't get to yet. - remaining = append(remaining, candidates[i+1:]...) + remaining = append(remaining, bq.batches[i+1:]...) break batchLoop case BatchUndecided: - remaining = append(remaining, batch) - bq.batches[nextTimestamp] = remaining + remaining = append(remaining, bq.batches[i:]...) + bq.batches = remaining return nil, io.EOF default: return nil, NewCriticalError(fmt.Errorf("unknown batch validity type: %d", validity)) } } - // clean up if we remove the final batch for this timestamp - if len(remaining) == 0 { - delete(bq.batches, nextTimestamp) - } else { - bq.batches[nextTimestamp] = remaining - } + bq.batches = remaining if nextBatch != nil { - // advance epoch if necessary - if nextBatch.Batch.EpochNum == rollup.Epoch(epoch.Number)+1 { - bq.l1Blocks = bq.l1Blocks[1:] - } - bq.log.Info("Found next batch", "epoch", epoch, "batch_epoch", nextBatch.Batch.EpochNum, "batch_timestamp", nextBatch.Batch.Timestamp) + nextBatch.Batch.LogContext(bq.log).Info("Found next batch") return nextBatch.Batch, nil } @@ -243,15 +299,13 @@ batchLoop: // batch to ensure that we at least have one batch per epoch. if nextTimestamp < nextEpoch.Time || firstOfEpoch { bq.log.Info("Generating next batch", "epoch", epoch, "timestamp", nextTimestamp) - return NewSingularBatchData( - SingularBatch{ - ParentHash: l2SafeHead.Hash, - EpochNum: rollup.Epoch(epoch.Number), - EpochHash: epoch.Hash, - Timestamp: nextTimestamp, - Transactions: nil, - }, - ), nil + return &SingularBatch{ + ParentHash: l2SafeHead.Hash, + EpochNum: rollup.Epoch(epoch.Number), + EpochHash: epoch.Hash, + Timestamp: nextTimestamp, + Transactions: nil, + }, nil } // At this point we have auto generated every batch for the current epoch diff --git a/op-node/rollup/derive/batch_queue_test.go b/op-node/rollup/derive/batch_queue_test.go index 8bad5f22a6a6..3580f78c40d6 100644 --- a/op-node/rollup/derive/batch_queue_test.go +++ b/op-node/rollup/derive/batch_queue_test.go @@ -3,12 +3,15 @@ package derive import ( "context" "encoding/binary" + "errors" "io" + "math/big" "math/rand" "testing" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/stretchr/testify/require" @@ -20,7 +23,7 @@ import ( type fakeBatchQueueInput struct { i int - batches []*BatchData + batches []Batch errors []error origin eth.L1BlockRef } @@ -29,7 +32,7 @@ func (f *fakeBatchQueueInput) Origin() eth.L1BlockRef { return f.origin } -func (f *fakeBatchQueueInput) NextBatch(ctx context.Context) (*BatchData, error) { +func (f *fakeBatchQueueInput) NextBatch(ctx context.Context) (Batch, error) { if f.i >= len(f.batches) { return nil, io.EOF } @@ -45,16 +48,74 @@ func mockHash(time uint64, layer uint8) common.Hash { return hash } -func b(timestamp uint64, epoch eth.L1BlockRef) *BatchData { +func b(chainId *big.Int, timestamp uint64, epoch eth.L1BlockRef) *SingularBatch { rng := rand.New(rand.NewSource(int64(timestamp))) - data := testutils.RandomData(rng, 20) - return NewSingularBatchData(SingularBatch{ + signer := types.NewLondonSigner(chainId) + tx := testutils.RandomTx(rng, new(big.Int).SetUint64(rng.Uint64()), signer) + txData, _ := tx.MarshalBinary() + return &SingularBatch{ ParentHash: mockHash(timestamp-2, 2), Timestamp: timestamp, EpochNum: rollup.Epoch(epoch.Number), EpochHash: epoch.Hash, - Transactions: []hexutil.Bytes{data}, - }) + Transactions: []hexutil.Bytes{txData}, + } +} + +func buildSpanBatches(t *testing.T, parent *eth.L2BlockRef, singularBatches []*SingularBatch, blockCounts []int, chainId *big.Int) []Batch { + var spanBatches []Batch + idx := 0 + for _, count := range blockCounts { + span := NewSpanBatch(singularBatches[idx : idx+count]) + spanBatches = append(spanBatches, span) + idx += count + } + return spanBatches +} + +func getSpanBatchTime(batchType int) *uint64 { + minTs := uint64(0) + if batchType == SpanBatchType { + return &minTs + } + return nil +} + +func l1InfoDepositTx(t *testing.T, l1BlockNum uint64) hexutil.Bytes { + l1Info := L1BlockInfo{ + Number: l1BlockNum, + BaseFee: big.NewInt(0), + } + infoData, err := l1Info.MarshalBinary() + require.NoError(t, err) + depositTx := &types.DepositTx{ + Data: infoData, + } + txData, err := types.NewTx(depositTx).MarshalBinary() + require.NoError(t, err) + return txData +} + +func singularBatchToPayload(t *testing.T, batch *SingularBatch, blockNumber uint64) eth.ExecutionPayload { + txs := []hexutil.Bytes{l1InfoDepositTx(t, uint64(batch.EpochNum))} + txs = append(txs, batch.Transactions...) + return eth.ExecutionPayload{ + BlockHash: mockHash(batch.Timestamp, 2), + ParentHash: batch.ParentHash, + BlockNumber: hexutil.Uint64(blockNumber), + Timestamp: hexutil.Uint64(batch.Timestamp), + Transactions: txs, + } +} + +func singularBatchToBlockRef(t *testing.T, batch *SingularBatch, blockNumber uint64) eth.L2BlockRef { + return eth.L2BlockRef{ + Hash: mockHash(batch.Timestamp, 2), + Number: blockNumber, + ParentHash: batch.ParentHash, + Time: batch.Timestamp, + L1Origin: eth.BlockID{Hash: batch.EpochHash, Number: uint64(batch.EpochNum)}, + } } func L1Chain(l1Times []uint64) []eth.L1BlockRef { @@ -73,10 +134,37 @@ func L1Chain(l1Times []uint64) []eth.L1BlockRef { return out } -// TestBatchQueueNewOrigin tests that the batch queue properly saves the new origin +func TestBatchQueue(t *testing.T) { + tests := []struct { + name string + f func(t *testing.T, batchType int) + }{ + {"BatchQueueNewOrigin", BatchQueueNewOrigin}, + {"BatchQueueEager", BatchQueueEager}, + {"BatchQueueInvalidInternalAdvance", BatchQueueInvalidInternalAdvance}, + {"BatchQueueMissing", BatchQueueMissing}, + {"BatchQueueAdvancedEpoch", BatchQueueAdvancedEpoch}, + {"BatchQueueShuffle", BatchQueueShuffle}, + } + for _, test := range tests { + test := test + t.Run(test.name+"_SingularBatch", func(t *testing.T) { + test.f(t, SingularBatchType) + }) + } + + for _, test := range tests { + test := test + t.Run(test.name+"_SpanBatch", func(t *testing.T) { + test.f(t, SpanBatchType) + }) + } +} + +// BatchQueueNewOrigin tests that the batch queue properly saves the new origin // when the safehead's origin is ahead of the pipeline's origin (as is after a reset). // This issue was fixed in https://github.com/ethereum-optimism/optimism/pull/3694 -func TestBatchQueueNewOrigin(t *testing.T) { +func BatchQueueNewOrigin(t *testing.T, batchType int) { log := testlog.Logger(t, log.LvlCrit) l1 := L1Chain([]uint64{10, 15, 20, 25}) safeHead := eth.L2BlockRef{ @@ -94,15 +182,16 @@ func TestBatchQueueNewOrigin(t *testing.T) { BlockTime: 2, MaxSequencerDrift: 600, SeqWindowSize: 2, + SpanBatchTime: getSpanBatchTime(batchType), } input := &fakeBatchQueueInput{ - batches: []*BatchData{nil}, + batches: []Batch{nil}, errors: []error{io.EOF}, origin: l1[0], } - bq := NewBatchQueue(log, cfg, input) + bq := NewBatchQueue(log, cfg, input, nil) _ = bq.Reset(context.Background(), l1[0], eth.SystemConfig{}) require.Equal(t, []eth.L1BlockRef{l1[0]}, bq.l1Blocks) @@ -133,11 +222,12 @@ func TestBatchQueueNewOrigin(t *testing.T) { require.Equal(t, l1[2], bq.origin) } -// TestBatchQueueEager adds a bunch of contiguous batches and asserts that +// BatchQueueEager adds a bunch of contiguous batches and asserts that // enough calls to `NextBatch` return all of those batches. -func TestBatchQueueEager(t *testing.T) { +func BatchQueueEager(t *testing.T, batchType int) { log := testlog.Logger(t, log.LvlCrit) l1 := L1Chain([]uint64{10, 20, 30}) + chainId := big.NewInt(1234) safeHead := eth.L2BlockRef{ Hash: mockHash(10, 2), Number: 0, @@ -153,41 +243,69 @@ func TestBatchQueueEager(t *testing.T) { BlockTime: 2, MaxSequencerDrift: 600, SeqWindowSize: 30, + SpanBatchTime: getSpanBatchTime(batchType), + L2ChainID: chainId, } - batches := []*BatchData{b(12, l1[0]), b(14, l1[0]), b(16, l1[0]), b(18, l1[0]), b(20, l1[0]), b(22, l1[0]), b(24, l1[1]), nil} - errors := []error{nil, nil, nil, nil, nil, nil, nil, io.EOF} + // expected output of BatchQueue.NextBatch() + expectedOutputBatches := []*SingularBatch{ + b(cfg.L2ChainID, 12, l1[0]), + b(cfg.L2ChainID, 14, l1[0]), + b(cfg.L2ChainID, 16, l1[0]), + b(cfg.L2ChainID, 18, l1[0]), + b(cfg.L2ChainID, 20, l1[0]), + b(cfg.L2ChainID, 22, l1[0]), + nil, + } + // expected error of BatchQueue.NextBatch() + expectedOutputErrors := []error{nil, nil, nil, nil, nil, nil, io.EOF} + // errors will be returned by fakeBatchQueueInput.NextBatch() + inputErrors := expectedOutputErrors + // batches will be returned by fakeBatchQueueInput + var inputBatches []Batch + if batchType == SpanBatchType { + spanBlockCounts := []int{1, 2, 3} + inputErrors = []error{nil, nil, nil, io.EOF} + inputBatches = buildSpanBatches(t, &safeHead, expectedOutputBatches, spanBlockCounts, chainId) + inputBatches = append(inputBatches, nil) + } else { + for _, singularBatch := range expectedOutputBatches { + inputBatches = append(inputBatches, singularBatch) + } + } input := &fakeBatchQueueInput{ - batches: batches, - errors: errors, + batches: inputBatches, + errors: inputErrors, origin: l1[0], } - bq := NewBatchQueue(log, cfg, input) + bq := NewBatchQueue(log, cfg, input, nil) _ = bq.Reset(context.Background(), l1[0], eth.SystemConfig{}) // Advance the origin input.origin = l1[1] - for i := 0; i < len(batches); i++ { + for i := 0; i < len(expectedOutputBatches); i++ { b, e := bq.NextBatch(context.Background(), safeHead) - require.ErrorIs(t, e, errors[i]) - require.Equal(t, batches[i], b) - - if b != nil { + require.ErrorIs(t, e, expectedOutputErrors[i]) + if b == nil { + require.Nil(t, expectedOutputBatches[i]) + } else { + require.Equal(t, expectedOutputBatches[i], b) safeHead.Number += 1 - safeHead.Time += 2 + safeHead.Time += cfg.BlockTime safeHead.Hash = mockHash(b.Timestamp, 2) safeHead.L1Origin = b.Epoch() } } } -// TestBatchQueueInvalidInternalAdvance asserts that we do not miss an epoch when generating batches. +// BatchQueueInvalidInternalAdvance asserts that we do not miss an epoch when generating batches. // This is a regression test for CLI-3378. -func TestBatchQueueInvalidInternalAdvance(t *testing.T) { +func BatchQueueInvalidInternalAdvance(t *testing.T, batchType int) { log := testlog.Logger(t, log.LvlTrace) l1 := L1Chain([]uint64{10, 15, 20, 25, 30}) + chainId := big.NewInt(1234) safeHead := eth.L2BlockRef{ Hash: mockHash(10, 2), Number: 0, @@ -203,27 +321,54 @@ func TestBatchQueueInvalidInternalAdvance(t *testing.T) { BlockTime: 2, MaxSequencerDrift: 600, SeqWindowSize: 2, + SpanBatchTime: getSpanBatchTime(batchType), + L2ChainID: chainId, } - batches := []*BatchData{b(12, l1[0]), b(14, l1[0]), b(16, l1[0]), b(18, l1[0]), b(20, l1[0]), b(22, l1[0]), nil} - errors := []error{nil, nil, nil, nil, nil, nil, io.EOF} + // expected output of BatchQueue.NextBatch() + expectedOutputBatches := []*SingularBatch{ + b(cfg.L2ChainID, 12, l1[0]), + b(cfg.L2ChainID, 14, l1[0]), + b(cfg.L2ChainID, 16, l1[0]), + b(cfg.L2ChainID, 18, l1[0]), + b(cfg.L2ChainID, 20, l1[0]), + b(cfg.L2ChainID, 22, l1[0]), + nil, + } + // expected error of BatchQueue.NextBatch() + expectedOutputErrors := []error{nil, nil, nil, nil, nil, nil, io.EOF} + // errors will be returned by fakeBatchQueueInput.NextBatch() + inputErrors := expectedOutputErrors + // batches will be returned by fakeBatchQueueInput + var inputBatches []Batch + if batchType == SpanBatchType { + spanBlockCounts := []int{1, 2, 3} + inputErrors = []error{nil, nil, nil, io.EOF} + inputBatches = buildSpanBatches(t, &safeHead, expectedOutputBatches, spanBlockCounts, chainId) + inputBatches = append(inputBatches, nil) + } else { + for _, singularBatch := range expectedOutputBatches { + inputBatches = append(inputBatches, singularBatch) + } + } input := &fakeBatchQueueInput{ - batches: batches, - errors: errors, + batches: inputBatches, + errors: inputErrors, origin: l1[0], } - bq := NewBatchQueue(log, cfg, input) + bq := NewBatchQueue(log, cfg, input, nil) _ = bq.Reset(context.Background(), l1[0], eth.SystemConfig{}) // Load continuous batches for epoch 0 - for i := 0; i < len(batches); i++ { + for i := 0; i < len(expectedOutputBatches); i++ { b, e := bq.NextBatch(context.Background(), safeHead) - require.ErrorIs(t, e, errors[i]) - require.Equal(t, batches[i], b) - - if b != nil { + require.ErrorIs(t, e, expectedOutputErrors[i]) + if b == nil { + require.Nil(t, expectedOutputBatches[i]) + } else { + require.Equal(t, expectedOutputBatches[i], b) safeHead.Number += 1 safeHead.Time += 2 safeHead.Hash = mockHash(b.Timestamp, 2) @@ -276,9 +421,10 @@ func TestBatchQueueInvalidInternalAdvance(t *testing.T) { } -func TestBatchQueueMissing(t *testing.T) { +func BatchQueueMissing(t *testing.T, batchType int) { log := testlog.Logger(t, log.LvlCrit) l1 := L1Chain([]uint64{10, 15, 20, 25}) + chainId := big.NewInt(1234) safeHead := eth.L2BlockRef{ Hash: mockHash(10, 2), Number: 0, @@ -294,30 +440,49 @@ func TestBatchQueueMissing(t *testing.T) { BlockTime: 2, MaxSequencerDrift: 600, SeqWindowSize: 2, + SpanBatchTime: getSpanBatchTime(batchType), + L2ChainID: chainId, } - // The batches at 18 and 20 are skipped to stop 22 from being eagerly processed. + // The inputBatches at 18 and 20 are skipped to stop 22 from being eagerly processed. // This test checks that batch timestamp 12 & 14 are created, 16 is used, and 18 is advancing the epoch. - // Due to the large sequencer time drift 16 is perfectly valid to have epoch 0 as origin. - batches := []*BatchData{b(16, l1[0]), b(22, l1[1])} - errors := []error{nil, nil} + // Due to the large sequencer time drift 16 is perfectly valid to have epoch 0 as origin.a + + // expected output of BatchQueue.NextBatch() + expectedOutputBatches := []*SingularBatch{ + b(cfg.L2ChainID, 16, l1[0]), + b(cfg.L2ChainID, 22, l1[1]), + } + // errors will be returned by fakeBatchQueueInput.NextBatch() + inputErrors := []error{nil, nil} + // batches will be returned by fakeBatchQueueInput + var inputBatches []Batch + if batchType == SpanBatchType { + spanBlockCounts := []int{1, 1} + inputErrors = []error{nil, nil, nil, io.EOF} + inputBatches = buildSpanBatches(t, &safeHead, expectedOutputBatches, spanBlockCounts, chainId) + } else { + for _, singularBatch := range expectedOutputBatches { + inputBatches = append(inputBatches, singularBatch) + } + } input := &fakeBatchQueueInput{ - batches: batches, - errors: errors, + batches: inputBatches, + errors: inputErrors, origin: l1[0], } - bq := NewBatchQueue(log, cfg, input) + bq := NewBatchQueue(log, cfg, input, nil) _ = bq.Reset(context.Background(), l1[0], eth.SystemConfig{}) - for i := 0; i < len(batches); i++ { + for i := 0; i < len(expectedOutputBatches); i++ { b, e := bq.NextBatch(context.Background(), safeHead) require.ErrorIs(t, e, NotEnoughData) require.Nil(t, b) } - // advance origin. Underlying stage still has no more batches + // advance origin. Underlying stage still has no more inputBatches // This is not enough to auto advance yet input.origin = l1[1] b, e := bq.NextBatch(context.Background(), safeHead) @@ -331,7 +496,7 @@ func TestBatchQueueMissing(t *testing.T) { b, e = bq.NextBatch(context.Background(), safeHead) require.Nil(t, e) require.Equal(t, b.Timestamp, uint64(12)) - require.Empty(t, b.SingularBatch.Transactions) + require.Empty(t, b.Transactions) require.Equal(t, rollup.Epoch(0), b.EpochNum) safeHead.Number += 1 safeHead.Time += 2 @@ -341,7 +506,7 @@ func TestBatchQueueMissing(t *testing.T) { b, e = bq.NextBatch(context.Background(), safeHead) require.Nil(t, e) require.Equal(t, b.Timestamp, uint64(14)) - require.Empty(t, b.SingularBatch.Transactions) + require.Empty(t, b.Transactions) require.Equal(t, rollup.Epoch(0), b.EpochNum) safeHead.Number += 1 safeHead.Time += 2 @@ -350,7 +515,7 @@ func TestBatchQueueMissing(t *testing.T) { // Check for the inputted batch at t = 16 b, e = bq.NextBatch(context.Background(), safeHead) require.Nil(t, e) - require.Equal(t, b, batches[0]) + require.Equal(t, b, expectedOutputBatches[0]) require.Equal(t, rollup.Epoch(0), b.EpochNum) safeHead.Number += 1 safeHead.Time += 2 @@ -367,6 +532,419 @@ func TestBatchQueueMissing(t *testing.T) { b, e = bq.NextBatch(context.Background(), safeHead) require.Nil(t, e) require.Equal(t, b.Timestamp, uint64(18)) - require.Empty(t, b.SingularBatch.Transactions) + require.Empty(t, b.Transactions) require.Equal(t, rollup.Epoch(1), b.EpochNum) } + +// BatchQueueAdvancedEpoch tests that batch queue derives consecutive valid batches with advancing epochs. +// Batch queue's l1blocks list should be updated along epochs. +func BatchQueueAdvancedEpoch(t *testing.T, batchType int) { + log := testlog.Logger(t, log.LvlCrit) + l1 := L1Chain([]uint64{0, 6, 12, 18, 24}) // L1 block time: 6s + chainId := big.NewInt(1234) + safeHead := eth.L2BlockRef{ + Hash: mockHash(4, 2), + Number: 0, + ParentHash: common.Hash{}, + Time: 4, + L1Origin: l1[0].ID(), + SequenceNumber: 0, + } + cfg := &rollup.Config{ + Genesis: rollup.Genesis{ + L2Time: 10, + }, + BlockTime: 2, + MaxSequencerDrift: 600, + SeqWindowSize: 30, + SpanBatchTime: getSpanBatchTime(batchType), + L2ChainID: chainId, + } + + // expected output of BatchQueue.NextBatch() + expectedOutputBatches := []*SingularBatch{ + // 3 L2 blocks per L1 block + b(cfg.L2ChainID, 6, l1[1]), + b(cfg.L2ChainID, 8, l1[1]), + b(cfg.L2ChainID, 10, l1[1]), + b(cfg.L2ChainID, 12, l1[2]), + b(cfg.L2ChainID, 14, l1[2]), + b(cfg.L2ChainID, 16, l1[2]), + b(cfg.L2ChainID, 18, l1[3]), + b(cfg.L2ChainID, 20, l1[3]), + b(cfg.L2ChainID, 22, l1[3]), + nil, + } + // expected error of BatchQueue.NextBatch() + expectedOutputErrors := []error{nil, nil, nil, nil, nil, nil, nil, nil, nil, io.EOF} + // errors will be returned by fakeBatchQueueInput.NextBatch() + inputErrors := expectedOutputErrors + // batches will be returned by fakeBatchQueueInput + var inputBatches []Batch + if batchType == SpanBatchType { + spanBlockCounts := []int{2, 2, 2, 3} + inputErrors = []error{nil, nil, nil, nil, io.EOF} + inputBatches = buildSpanBatches(t, &safeHead, expectedOutputBatches, spanBlockCounts, chainId) + inputBatches = append(inputBatches, nil) + } else { + for _, singularBatch := range expectedOutputBatches { + inputBatches = append(inputBatches, singularBatch) + } + } + + // ChannelInReader origin number + inputOriginNumber := 2 + input := &fakeBatchQueueInput{ + batches: inputBatches, + errors: inputErrors, + origin: l1[inputOriginNumber], + } + + bq := NewBatchQueue(log, cfg, input, nil) + _ = bq.Reset(context.Background(), l1[1], eth.SystemConfig{}) + + for i := 0; i < len(expectedOutputBatches); i++ { + expectedOutput := expectedOutputBatches[i] + if expectedOutput != nil && uint64(expectedOutput.EpochNum) == l1[inputOriginNumber].Number { + // Advance ChannelInReader origin if needed + inputOriginNumber += 1 + input.origin = l1[inputOriginNumber] + } + b, e := bq.NextBatch(context.Background(), safeHead) + require.ErrorIs(t, e, expectedOutputErrors[i]) + if b == nil { + require.Nil(t, expectedOutput) + } else { + require.Equal(t, expectedOutput, b) + require.Equal(t, bq.l1Blocks[0].Number, uint64(b.EpochNum)) + safeHead.Number += 1 + safeHead.Time += cfg.BlockTime + safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.L1Origin = b.Epoch() + } + } +} + +// BatchQueueShuffle tests batch queue can reorder shuffled valid batches +func BatchQueueShuffle(t *testing.T, batchType int) { + log := testlog.Logger(t, log.LvlCrit) + l1 := L1Chain([]uint64{0, 6, 12, 18, 24}) // L1 block time: 6s + chainId := big.NewInt(1234) + safeHead := eth.L2BlockRef{ + Hash: mockHash(4, 2), + Number: 0, + ParentHash: common.Hash{}, + Time: 4, + L1Origin: l1[0].ID(), + SequenceNumber: 0, + } + cfg := &rollup.Config{ + Genesis: rollup.Genesis{ + L2Time: 10, + }, + BlockTime: 2, + MaxSequencerDrift: 600, + SeqWindowSize: 30, + SpanBatchTime: getSpanBatchTime(batchType), + L2ChainID: chainId, + } + + // expected output of BatchQueue.NextBatch() + expectedOutputBatches := []*SingularBatch{ + // 3 L2 blocks per L1 block + b(cfg.L2ChainID, 6, l1[1]), + b(cfg.L2ChainID, 8, l1[1]), + b(cfg.L2ChainID, 10, l1[1]), + b(cfg.L2ChainID, 12, l1[2]), + b(cfg.L2ChainID, 14, l1[2]), + b(cfg.L2ChainID, 16, l1[2]), + b(cfg.L2ChainID, 18, l1[3]), + b(cfg.L2ChainID, 20, l1[3]), + b(cfg.L2ChainID, 22, l1[3]), + } + // expected error of BatchQueue.NextBatch() + expectedOutputErrors := []error{nil, nil, nil, nil, nil, nil, nil, nil, nil, io.EOF} + // errors will be returned by fakeBatchQueueInput.NextBatch() + inputErrors := expectedOutputErrors + // batches will be returned by fakeBatchQueueInput + var inputBatches []Batch + if batchType == SpanBatchType { + spanBlockCounts := []int{2, 2, 2, 3} + inputErrors = []error{nil, nil, nil, nil, io.EOF} + inputBatches = buildSpanBatches(t, &safeHead, expectedOutputBatches, spanBlockCounts, chainId) + } else { + for _, singularBatch := range expectedOutputBatches { + inputBatches = append(inputBatches, singularBatch) + } + } + + // Shuffle the order of input batches + rand.Shuffle(len(inputBatches), func(i, j int) { + inputBatches[i], inputBatches[j] = inputBatches[j], inputBatches[i] + }) + inputBatches = append(inputBatches, nil) + + // ChannelInReader origin number + inputOriginNumber := 2 + input := &fakeBatchQueueInput{ + batches: inputBatches, + errors: inputErrors, + origin: l1[inputOriginNumber], + } + + bq := NewBatchQueue(log, cfg, input, nil) + _ = bq.Reset(context.Background(), l1[1], eth.SystemConfig{}) + + for i := 0; i < len(expectedOutputBatches); i++ { + expectedOutput := expectedOutputBatches[i] + if expectedOutput != nil && uint64(expectedOutput.EpochNum) == l1[inputOriginNumber].Number { + // Advance ChannelInReader origin if needed + inputOriginNumber += 1 + input.origin = l1[inputOriginNumber] + } + var b *SingularBatch + var e error + for j := 0; j < len(expectedOutputBatches); j++ { + // Multiple NextBatch() executions may be required because the order of input is shuffled + b, e = bq.NextBatch(context.Background(), safeHead) + if !errors.Is(e, NotEnoughData) { + break + } + } + require.ErrorIs(t, e, expectedOutputErrors[i]) + if b == nil { + require.Nil(t, expectedOutput) + } else { + require.Equal(t, expectedOutput, b) + require.Equal(t, bq.l1Blocks[0].Number, uint64(b.EpochNum)) + safeHead.Number += 1 + safeHead.Time += cfg.BlockTime + safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.L1Origin = b.Epoch() + } + } +} + +func TestBatchQueueOverlappingSpanBatch(t *testing.T) { + log := testlog.Logger(t, log.LvlCrit) + l1 := L1Chain([]uint64{10, 20, 30}) + chainId := big.NewInt(1234) + safeHead := eth.L2BlockRef{ + Hash: mockHash(10, 2), + Number: 0, + ParentHash: common.Hash{}, + Time: 10, + L1Origin: l1[0].ID(), + SequenceNumber: 0, + } + cfg := &rollup.Config{ + Genesis: rollup.Genesis{ + L2Time: 10, + }, + BlockTime: 2, + MaxSequencerDrift: 600, + SeqWindowSize: 30, + SpanBatchTime: getSpanBatchTime(SpanBatchType), + L2ChainID: chainId, + } + + // expected output of BatchQueue.NextBatch() + expectedOutputBatches := []*SingularBatch{ + b(cfg.L2ChainID, 12, l1[0]), + b(cfg.L2ChainID, 14, l1[0]), + b(cfg.L2ChainID, 16, l1[0]), + b(cfg.L2ChainID, 18, l1[0]), + b(cfg.L2ChainID, 20, l1[0]), + b(cfg.L2ChainID, 22, l1[0]), + nil, + } + // expected error of BatchQueue.NextBatch() + expectedOutputErrors := []error{nil, nil, nil, nil, nil, nil, io.EOF} + // errors will be returned by fakeBatchQueueInput.NextBatch() + inputErrors := []error{nil, nil, nil, nil, io.EOF} + + // batches will be returned by fakeBatchQueueInput + var inputBatches []Batch + batchSize := 3 + for i := 0; i < len(expectedOutputBatches)-batchSize; i++ { + inputBatches = append(inputBatches, NewSpanBatch(expectedOutputBatches[i:i+batchSize])) + } + inputBatches = append(inputBatches, nil) + // inputBatches: + // [ + // [12, 14, 16], // No overlap + // [14, 16, 18], // overlapped blocks: 14, 16 + // [16, 18, 20], // overlapped blocks: 16, 18 + // [18, 20, 22], // overlapped blocks: 18, 20 + // ] + + input := &fakeBatchQueueInput{ + batches: inputBatches, + errors: inputErrors, + origin: l1[0], + } + + l2Client := testutils.MockL2Client{} + var nilErr error + for i, batch := range expectedOutputBatches { + if batch != nil { + blockRef := singularBatchToBlockRef(t, batch, uint64(i+1)) + payload := singularBatchToPayload(t, batch, uint64(i+1)) + if i < 3 { + // In CheckBatch(), "L2BlockRefByNumber" is called when fetching the parent block of overlapped span batch + // so blocks at 12, 14, 16 should be called. + // CheckBatch() is called twice for a batch - before pushing to the queue, after popping from the queue + l2Client.Mock.On("L2BlockRefByNumber", uint64(i+1)).Times(2).Return(blockRef, &nilErr) + } + if i == 1 || i == 4 { + // In CheckBatch(), "PayloadByNumber" is called when fetching the overlapped blocks. + // blocks at 14, 20 are included in overlapped blocks once. + // CheckBatch() is called twice for a batch - before adding to the queue, after getting from the queue + l2Client.Mock.On("PayloadByNumber", uint64(i+1)).Times(2).Return(&payload, &nilErr) + } else if i == 2 || i == 3 { + // blocks at 16, 18 are included in overlapped blocks twice. + l2Client.Mock.On("PayloadByNumber", uint64(i+1)).Times(4).Return(&payload, &nilErr) + } + } + } + + bq := NewBatchQueue(log, cfg, input, &l2Client) + _ = bq.Reset(context.Background(), l1[0], eth.SystemConfig{}) + // Advance the origin + input.origin = l1[1] + + for i := 0; i < len(expectedOutputBatches); i++ { + b, e := bq.NextBatch(context.Background(), safeHead) + require.ErrorIs(t, e, expectedOutputErrors[i]) + if b == nil { + require.Nil(t, expectedOutputBatches[i]) + } else { + require.Equal(t, expectedOutputBatches[i], b) + safeHead.Number += 1 + safeHead.Time += cfg.BlockTime + safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.L1Origin = b.Epoch() + } + } + + l2Client.Mock.AssertExpectations(t) +} + +func TestBatchQueueComplex(t *testing.T) { + log := testlog.Logger(t, log.LvlCrit) + l1 := L1Chain([]uint64{0, 6, 12, 18, 24}) // L1 block time: 6s + chainId := big.NewInt(1234) + safeHead := eth.L2BlockRef{ + Hash: mockHash(4, 2), + Number: 0, + ParentHash: common.Hash{}, + Time: 4, + L1Origin: l1[0].ID(), + SequenceNumber: 0, + } + cfg := &rollup.Config{ + Genesis: rollup.Genesis{ + L2Time: 10, + }, + BlockTime: 2, + MaxSequencerDrift: 600, + SeqWindowSize: 30, + SpanBatchTime: getSpanBatchTime(SpanBatchType), + L2ChainID: chainId, + } + + // expected output of BatchQueue.NextBatch() + expectedOutputBatches := []*SingularBatch{ + // 3 L2 blocks per L1 block + b(cfg.L2ChainID, 6, l1[1]), + b(cfg.L2ChainID, 8, l1[1]), + b(cfg.L2ChainID, 10, l1[1]), + b(cfg.L2ChainID, 12, l1[2]), + b(cfg.L2ChainID, 14, l1[2]), + b(cfg.L2ChainID, 16, l1[2]), + b(cfg.L2ChainID, 18, l1[3]), + b(cfg.L2ChainID, 20, l1[3]), + b(cfg.L2ChainID, 22, l1[3]), + } + // expected error of BatchQueue.NextBatch() + expectedOutputErrors := []error{nil, nil, nil, nil, nil, nil, nil, nil, nil, io.EOF} + // errors will be returned by fakeBatchQueueInput.NextBatch() + inputErrors := []error{nil, nil, nil, nil, nil, nil, io.EOF} + // batches will be returned by fakeBatchQueueInput + inputBatches := []Batch{ + NewSpanBatch(expectedOutputBatches[0:2]), // [6, 8] - no overlap + expectedOutputBatches[2], // [10] - no overlap + NewSpanBatch(expectedOutputBatches[1:4]), // [8, 10, 12] - overlapped blocks: 8 or 8, 10 + expectedOutputBatches[4], // [14] - no overlap + NewSpanBatch(expectedOutputBatches[4:6]), // [14, 16] - overlapped blocks: nothing or 14 + NewSpanBatch(expectedOutputBatches[6:9]), // [18, 20, 22] - no overlap + } + + // Shuffle the order of input batches + rand.Shuffle(len(inputBatches), func(i, j int) { + inputBatches[i], inputBatches[j] = inputBatches[j], inputBatches[i] + }) + + inputBatches = append(inputBatches, nil) + + // ChannelInReader origin number + inputOriginNumber := 2 + input := &fakeBatchQueueInput{ + batches: inputBatches, + errors: inputErrors, + origin: l1[inputOriginNumber], + } + + l2Client := testutils.MockL2Client{} + var nilErr error + for i, batch := range expectedOutputBatches { + if batch != nil { + blockRef := singularBatchToBlockRef(t, batch, uint64(i+1)) + payload := singularBatchToPayload(t, batch, uint64(i+1)) + if i == 0 || i == 3 { + // In CheckBatch(), "L2BlockRefByNumber" is called when fetching the parent block of overlapped span batch + // so blocks at 6, 8 could be called, depends on the order of batches + l2Client.Mock.On("L2BlockRefByNumber", uint64(i+1)).Return(blockRef, &nilErr).Maybe() + } + if i == 1 || i == 2 || i == 4 { + // In CheckBatch(), "PayloadByNumber" is called when fetching the overlapped blocks. + // so blocks at 14, 20 could be called, depends on the order of batches + l2Client.Mock.On("PayloadByNumber", uint64(i+1)).Return(&payload, &nilErr).Maybe() + } + } + } + + bq := NewBatchQueue(log, cfg, input, &l2Client) + _ = bq.Reset(context.Background(), l1[1], eth.SystemConfig{}) + + for i := 0; i < len(expectedOutputBatches); i++ { + expectedOutput := expectedOutputBatches[i] + if expectedOutput != nil && uint64(expectedOutput.EpochNum) == l1[inputOriginNumber].Number { + // Advance ChannelInReader origin if needed + inputOriginNumber += 1 + input.origin = l1[inputOriginNumber] + } + var b *SingularBatch + var e error + for j := 0; j < len(expectedOutputBatches); j++ { + // Multiple NextBatch() executions may be required because the order of input is shuffled + b, e = bq.NextBatch(context.Background(), safeHead) + if !errors.Is(e, NotEnoughData) { + break + } + } + require.ErrorIs(t, e, expectedOutputErrors[i]) + if b == nil { + require.Nil(t, expectedOutput) + } else { + require.Equal(t, expectedOutput, b) + require.Equal(t, bq.l1Blocks[0].Number, uint64(b.EpochNum)) + safeHead.Number += 1 + safeHead.Time += cfg.BlockTime + safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.L1Origin = b.Epoch() + } + } + + l2Client.Mock.AssertExpectations(t) +} diff --git a/op-node/rollup/derive/batches.go b/op-node/rollup/derive/batches.go index 7790e471d9dd..e618fcc7433a 100644 --- a/op-node/rollup/derive/batches.go +++ b/op-node/rollup/derive/batches.go @@ -1,6 +1,9 @@ package derive import ( + "bytes" + "context" + "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/core/types" @@ -9,7 +12,7 @@ import ( type BatchWithL1InclusionBlock struct { L1InclusionBlock eth.L1BlockRef - Batch *BatchData + Batch Batch } type BatchValidity uint8 @@ -28,14 +31,37 @@ const ( // CheckBatch checks if the given batch can be applied on top of the given l2SafeHead, given the contextual L1 blocks the batch was included in. // The first entry of the l1Blocks should match the origin of the l2SafeHead. One or more consecutive l1Blocks should be provided. // In case of only a single L1 block, the decision whether a batch is valid may have to stay undecided. -func CheckBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef, batch *BatchWithL1InclusionBlock) BatchValidity { +func CheckBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, + l2SafeHead eth.L2BlockRef, batch *BatchWithL1InclusionBlock, l2Fetcher SafeBlockFetcher) BatchValidity { + switch batch.Batch.GetBatchType() { + case SingularBatchType: + singularBatch, ok := batch.Batch.(*SingularBatch) + if !ok { + log.Error("failed type assertion to SingularBatch") + return BatchDrop + } + return checkSingularBatch(cfg, log, l1Blocks, l2SafeHead, singularBatch, batch.L1InclusionBlock) + case SpanBatchType: + spanBatch, ok := batch.Batch.(*SpanBatch) + if !ok { + log.Error("failed type assertion to SpanBatch") + return BatchDrop + } + if !cfg.IsSpanBatch(batch.Batch.GetTimestamp()) { + log.Warn("received SpanBatch before SpanBatch hard fork") + return BatchDrop + } + return checkSpanBatch(ctx, cfg, log, l1Blocks, l2SafeHead, spanBatch, batch.L1InclusionBlock, l2Fetcher) + default: + log.Warn("Unrecognized batch type: %d", batch.Batch.GetBatchType()) + return BatchDrop + } +} + +// checkSingularBatch implements SingularBatch validation rule. +func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef, batch *SingularBatch, l1InclusionBlock eth.L1BlockRef) BatchValidity { // add details to the log - log = log.New( - "batch_timestamp", batch.Batch.Timestamp, - "parent_hash", batch.Batch.ParentHash, - "batch_epoch", batch.Batch.Epoch(), - "txs", len(batch.Batch.Transactions), - ) + log = batch.LogContext(log) // sanity check we have consistent inputs if len(l1Blocks) == 0 { @@ -45,36 +71,36 @@ func CheckBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l epoch := l1Blocks[0] nextTimestamp := l2SafeHead.Time + cfg.BlockTime - if batch.Batch.Timestamp > nextTimestamp { + if batch.Timestamp > nextTimestamp { log.Trace("received out-of-order batch for future processing after next batch", "next_timestamp", nextTimestamp) return BatchFuture } - if batch.Batch.Timestamp < nextTimestamp { + if batch.Timestamp < nextTimestamp { log.Warn("dropping batch with old timestamp", "min_timestamp", nextTimestamp) return BatchDrop } // dependent on above timestamp check. If the timestamp is correct, then it must build on top of the safe head. - if batch.Batch.ParentHash != l2SafeHead.Hash { + if batch.ParentHash != l2SafeHead.Hash { log.Warn("ignoring batch with mismatching parent hash", "current_safe_head", l2SafeHead.Hash) return BatchDrop } // Filter out batches that were included too late. - if uint64(batch.Batch.EpochNum)+cfg.SeqWindowSize < batch.L1InclusionBlock.Number { + if uint64(batch.EpochNum)+cfg.SeqWindowSize < l1InclusionBlock.Number { log.Warn("batch was included too late, sequence window expired") return BatchDrop } // Check the L1 origin of the batch batchOrigin := epoch - if uint64(batch.Batch.EpochNum) < epoch.Number { + if uint64(batch.EpochNum) < epoch.Number { log.Warn("dropped batch, epoch is too old", "minimum", epoch.ID()) // batch epoch too old return BatchDrop - } else if uint64(batch.Batch.EpochNum) == epoch.Number { + } else if uint64(batch.EpochNum) == epoch.Number { // Batch is sticking to the current epoch, continue. - } else if uint64(batch.Batch.EpochNum) == epoch.Number+1 { + } else if uint64(batch.EpochNum) == epoch.Number+1 { // With only 1 l1Block we cannot look at the next L1 Origin. // Note: This means that we are unable to determine validity of a batch // without more information. In this case we should bail out until we have @@ -90,19 +116,19 @@ func CheckBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l return BatchDrop } - if batch.Batch.EpochHash != batchOrigin.Hash { + if batch.EpochHash != batchOrigin.Hash { log.Warn("batch is for different L1 chain, epoch hash does not match", "expected", batchOrigin.ID()) return BatchDrop } - if batch.Batch.Timestamp < batchOrigin.Time { - log.Warn("batch timestamp is less than L1 origin timestamp", "l2_timestamp", batch.Batch.Timestamp, "l1_timestamp", batchOrigin.Time, "origin", batchOrigin.ID()) + if batch.Timestamp < batchOrigin.Time { + log.Warn("batch timestamp is less than L1 origin timestamp", "l2_timestamp", batch.Timestamp, "l1_timestamp", batchOrigin.Time, "origin", batchOrigin.ID()) return BatchDrop } // Check if we ran out of sequencer time drift - if max := batchOrigin.Time + cfg.MaxSequencerDrift; batch.Batch.Timestamp > max { - if len(batch.Batch.Transactions) == 0 { + if max := batchOrigin.Time + cfg.MaxSequencerDrift; batch.Timestamp > max { + if len(batch.Transactions) == 0 { // If the sequencer is co-operating by producing an empty batch, // then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant. // We only check batches that do not advance the epoch, to ensure epoch advancement regardless of time drift is allowed. @@ -112,7 +138,7 @@ func CheckBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l return BatchUndecided } nextOrigin := l1Blocks[1] - if batch.Batch.Timestamp >= nextOrigin.Time { // check if the next L1 origin could have been adopted + if batch.Timestamp >= nextOrigin.Time { // check if the next L1 origin could have been adopted log.Info("batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid") return BatchDrop } else { @@ -128,7 +154,7 @@ func CheckBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l } // We can do this check earlier, but it's a more intensive one, so we do this last. - for i, txBytes := range batch.Batch.Transactions { + for i, txBytes := range batch.Transactions { if len(txBytes) == 0 { log.Warn("transaction data must not be empty, but found empty tx", "tx_index", i) return BatchDrop @@ -141,3 +167,204 @@ func CheckBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l return BatchAccept } + +// checkSpanBatch implements SpanBatch validation rule. +func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef, + batch *SpanBatch, l1InclusionBlock eth.L1BlockRef, l2Fetcher SafeBlockFetcher) BatchValidity { + // add details to the log + log = batch.LogContext(log) + + // sanity check we have consistent inputs + if len(l1Blocks) == 0 { + log.Warn("missing L1 block input, cannot proceed with batch checking") + return BatchUndecided + } + epoch := l1Blocks[0] + + nextTimestamp := l2SafeHead.Time + cfg.BlockTime + + if batch.GetTimestamp() > nextTimestamp { + log.Trace("received out-of-order batch for future processing after next batch", "next_timestamp", nextTimestamp) + return BatchFuture + } + if batch.GetBlockTimestamp(batch.GetBlockCount()-1) < nextTimestamp { + log.Warn("span batch has no new blocks after safe head") + return BatchDrop + } + + // finding parent block of the span batch. + // if the span batch does not overlap the current safe chain, parentBLock should be l2SafeHead. + parentNum := l2SafeHead.Number + parentBlock := l2SafeHead + if batch.GetTimestamp() < nextTimestamp { + if batch.GetTimestamp() > l2SafeHead.Time { + // batch timestamp cannot be between safe head and next timestamp + log.Warn("batch has misaligned timestamp") + return BatchDrop + } + if (l2SafeHead.Time-batch.GetTimestamp())%cfg.BlockTime != 0 { + log.Warn("batch has misaligned timestamp") + return BatchDrop + } + parentNum = l2SafeHead.Number - (l2SafeHead.Time-batch.GetTimestamp())/cfg.BlockTime - 1 + var err error + parentBlock, err = l2Fetcher.L2BlockRefByNumber(ctx, parentNum) + if err != nil { + log.Error("failed to fetch L2 block", "number", parentNum, "err", err) + // unable to validate the batch for now. retry later. + return BatchUndecided + } + } + if !batch.CheckParentHash(parentBlock.Hash) { + log.Warn("ignoring batch with mismatching parent hash", "parent_block", parentBlock.Hash) + return BatchDrop + } + + startEpochNum := uint64(batch.GetStartEpochNum()) + + // Filter out batches that were included too late. + if startEpochNum+cfg.SeqWindowSize < l1InclusionBlock.Number { + log.Warn("batch was included too late, sequence window expired") + return BatchDrop + } + + // Check the L1 origin of the batch + if startEpochNum > parentBlock.L1Origin.Number+1 { + log.Warn("batch is for future epoch too far ahead, while it has the next timestamp, so it must be invalid", "current_epoch", epoch.ID()) + return BatchDrop + } + + endEpochNum := batch.GetBlockEpochNum(batch.GetBlockCount() - 1) + originChecked := false + for _, l1Block := range l1Blocks { + if l1Block.Number == endEpochNum { + if !batch.CheckOriginHash(l1Block.Hash) { + log.Warn("batch is for different L1 chain, epoch hash does not match", "expected", l1Block.Hash) + return BatchDrop + } + originChecked = true + break + } + } + if !originChecked { + log.Info("need more l1 blocks to check entire origins of span batch") + return BatchUndecided + } + + if startEpochNum < parentBlock.L1Origin.Number { + log.Warn("dropped batch, epoch is too old", "minimum", parentBlock.ID()) + return BatchDrop + } + + originIdx := 0 + originAdvanced := false + if startEpochNum == parentBlock.L1Origin.Number+1 { + originAdvanced = true + } + + for i := 0; i < batch.GetBlockCount(); i++ { + if batch.GetBlockTimestamp(i) <= l2SafeHead.Time { + continue + } + var l1Origin eth.L1BlockRef + for j := originIdx; j < len(l1Blocks); j++ { + if batch.GetBlockEpochNum(i) == l1Blocks[j].Number { + l1Origin = l1Blocks[j] + originIdx = j + break + } + + } + if i > 0 { + originAdvanced = false + if batch.GetBlockEpochNum(i) > batch.GetBlockEpochNum(i-1) { + originAdvanced = true + } + } + blockTimestamp := batch.GetBlockTimestamp(i) + if blockTimestamp < l1Origin.Time { + log.Warn("block timestamp is less than L1 origin timestamp", "l2_timestamp", blockTimestamp, "l1_timestamp", l1Origin.Time, "origin", l1Origin.ID()) + return BatchDrop + } + + // Check if we ran out of sequencer time drift + if max := l1Origin.Time + cfg.MaxSequencerDrift; blockTimestamp > max { + if len(batch.GetBlockTransactions(i)) == 0 { + // If the sequencer is co-operating by producing an empty batch, + // then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant. + // We only check batches that do not advance the epoch, to ensure epoch advancement regardless of time drift is allowed. + if !originAdvanced { + if originIdx+1 >= len(l1Blocks) { + log.Info("without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid") + return BatchUndecided + } + if blockTimestamp >= l1Blocks[originIdx+1].Time { // check if the next L1 origin could have been adopted + log.Info("batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid") + return BatchDrop + } else { + log.Info("continuing with empty batch before late L1 block to preserve L2 time invariant") + } + } + } else { + // If the sequencer is ignoring the time drift rule, then drop the batch and force an empty batch instead, + // as the sequencer is not allowed to include anything past this point without moving to the next epoch. + log.Warn("batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again", "max_time", max) + return BatchDrop + } + } + + for i, txBytes := range batch.GetBlockTransactions(i) { + if len(txBytes) == 0 { + log.Warn("transaction data must not be empty, but found empty tx", "tx_index", i) + return BatchDrop + } + if txBytes[0] == types.DepositTxType { + log.Warn("sequencers may not embed any deposits into batch data, but found tx that has one", "tx_index", i) + return BatchDrop + } + } + } + + // Check overlapped blocks + if batch.GetTimestamp() < nextTimestamp { + for i := uint64(0); i < l2SafeHead.Number-parentNum; i++ { + safeBlockNum := parentNum + i + 1 + safeBlockPayload, err := l2Fetcher.PayloadByNumber(ctx, safeBlockNum) + if err != nil { + log.Error("failed to fetch L2 block payload", "number", parentNum, "err", err) + // unable to validate the batch for now. retry later. + return BatchUndecided + } + safeBlockTxs := safeBlockPayload.Transactions + batchTxs := batch.GetBlockTransactions(int(i)) + // execution payload has deposit TXs, but batch does not. + depositCount := 0 + for _, tx := range safeBlockTxs { + if tx[0] == types.DepositTxType { + depositCount++ + } + } + if len(safeBlockTxs)-depositCount != len(batchTxs) { + log.Warn("overlapped block's tx count does not match", "safeBlockTxs", len(safeBlockTxs), "batchTxs", len(batchTxs)) + return BatchDrop + } + for j := 0; j < len(batchTxs); j++ { + if !bytes.Equal(safeBlockTxs[j+depositCount], batchTxs[j]) { + log.Warn("overlapped block's transaction does not match") + return BatchDrop + } + } + safeBlockRef, err := PayloadToBlockRef(safeBlockPayload, &cfg.Genesis) + if err != nil { + log.Error("failed to extract L2BlockRef from execution payload", "hash", safeBlockPayload.BlockHash, "err", err) + return BatchDrop + } + if safeBlockRef.L1Origin.Number != batch.GetBlockEpochNum(int(i)) { + log.Warn("overlapped block's L1 origin number does not match") + return BatchDrop + } + } + } + + return BatchAccept +} diff --git a/op-node/rollup/derive/batches_test.go b/op-node/rollup/derive/batches_test.go index cabcb6e4e147..a3948fa1a27a 100644 --- a/op-node/rollup/derive/batches_test.go +++ b/op-node/rollup/derive/batches_test.go @@ -1,6 +1,9 @@ package derive import ( + "context" + "errors" + "math/big" "math/rand" "testing" @@ -24,10 +27,19 @@ type ValidBatchTestCase struct { Expected BatchValidity } +type SpanBatchHardForkTestCase struct { + Name string + L1Blocks []eth.L1BlockRef + L2SafeHead eth.L2BlockRef + Batch BatchWithL1InclusionBlock + Expected BatchValidity + SpanBatchTime uint64 +} + var HashA = common.Hash{0x0a} var HashB = common.Hash{0x0b} -func TestValidBatch(t *testing.T) { +func TestValidSingularBatch(t *testing.T) { conf := rollup.Config{ Genesis: rollup.Genesis{ L2Time: 31, // a genesis time that itself does not align to make it more interesting @@ -174,13 +186,13 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A0, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1B, - Batch: NewSingularBatchData(SingularBatch{ + Batch: &SingularBatch{ ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, Timestamp: l2A1.Time, Transactions: nil, - }), + }, }, Expected: BatchUndecided, }, @@ -190,13 +202,13 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A0, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1B, - Batch: NewSingularBatchData(SingularBatch{ + Batch: &SingularBatch{ ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, Timestamp: l2A1.Time + 1, // 1 too high Transactions: nil, - }), + }, }, Expected: BatchFuture, }, @@ -206,13 +218,13 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A0, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1B, - Batch: NewSingularBatchData(SingularBatch{ + Batch: &SingularBatch{ ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, Timestamp: l2A0.Time, // repeating the same time Transactions: nil, - }), + }, }, Expected: BatchDrop, }, @@ -222,13 +234,13 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A0, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1B, - Batch: NewSingularBatchData(SingularBatch{ + Batch: &SingularBatch{ ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, Timestamp: l2A1.Time - 1, // block time is 2, so this is 1 too low Transactions: nil, - }), + }, }, Expected: BatchDrop, }, @@ -238,13 +250,13 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A0, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1B, - Batch: NewSingularBatchData(SingularBatch{ + Batch: &SingularBatch{ ParentHash: testutils.RandomHash(rng), EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, Timestamp: l2A1.Time, Transactions: nil, - }), + }, }, Expected: BatchDrop, }, @@ -254,13 +266,13 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A0, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1F, // included in 5th block after epoch of batch, while seq window is 4 - Batch: NewSingularBatchData(SingularBatch{ + Batch: &SingularBatch{ ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, Timestamp: l2A1.Time, Transactions: nil, - }), + }, }, Expected: BatchDrop, }, @@ -270,13 +282,13 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2B0, // we already moved on to B Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1C, - Batch: NewSingularBatchData(SingularBatch{ + Batch: &SingularBatch{ ParentHash: l2B0.Hash, // build on top of safe head to continue EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid EpochHash: l2A3.L1Origin.Hash, Timestamp: l2B0.Time + conf.BlockTime, // pass the timestamp check to get too epoch check Transactions: nil, - }), + }, }, Expected: BatchDrop, }, @@ -286,13 +298,13 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A3, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1C, - Batch: NewSingularBatchData(SingularBatch{ + Batch: &SingularBatch{ ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, Timestamp: l2B0.Time, Transactions: nil, - }), + }, }, Expected: BatchUndecided, }, @@ -302,13 +314,13 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A3, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1D, - Batch: NewSingularBatchData(SingularBatch{ + Batch: &SingularBatch{ ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l1C.Number), // invalid, we need to adopt epoch B before C EpochHash: l1C.Hash, Timestamp: l2B0.Time, Transactions: nil, - }), + }, }, Expected: BatchDrop, }, @@ -318,13 +330,13 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A3, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1C, - Batch: NewSingularBatchData(SingularBatch{ + Batch: &SingularBatch{ ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l1A.Hash, // invalid, epoch hash should be l1B Timestamp: l2B0.Time, Transactions: nil, - }), + }, }, Expected: BatchDrop, }, @@ -334,13 +346,13 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A3, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1B, - Batch: NewSingularBatchData(SingularBatch{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + Batch: &SingularBatch{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, Timestamp: l2A4.Time, Transactions: []hexutil.Bytes{[]byte("sequencer should not include this tx")}, - }), + }, }, Expected: BatchDrop, }, @@ -350,13 +362,13 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2X0, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1Z, - Batch: NewSingularBatchData(SingularBatch{ + Batch: &SingularBatch{ ParentHash: l2Y0.ParentHash, EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), EpochHash: l2Y0.L1Origin.Hash, Timestamp: l2Y0.Time, // valid, but more than 6 ahead of l1Y.Time Transactions: []hexutil.Bytes{[]byte("sequencer should not include this tx")}, - }), + }, }, Expected: BatchDrop, }, @@ -366,13 +378,13 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A3, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1BLate, - Batch: NewSingularBatchData(SingularBatch{ // l2A4 time < l1BLate time, so we cannot adopt origin B yet + Batch: &SingularBatch{ // l2A4 time < l1BLate time, so we cannot adopt origin B yet ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, Timestamp: l2A4.Time, Transactions: nil, - }), + }, }, Expected: BatchAccept, // accepted because empty & preserving L2 time invariant }, @@ -382,13 +394,13 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2X0, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1Z, - Batch: NewSingularBatchData(SingularBatch{ + Batch: &SingularBatch{ ParentHash: l2Y0.ParentHash, EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), EpochHash: l2Y0.L1Origin.Hash, Timestamp: l2Y0.Time, // valid, but more than 6 ahead of l1Y.Time Transactions: nil, - }), + }, }, Expected: BatchAccept, // accepted because empty & still advancing epoch }, @@ -398,13 +410,13 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A3, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1B, - Batch: NewSingularBatchData(SingularBatch{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + Batch: &SingularBatch{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, Timestamp: l2A4.Time, Transactions: nil, - }), + }, }, Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time }, @@ -414,13 +426,13 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A3, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1C, - Batch: NewSingularBatchData(SingularBatch{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + Batch: &SingularBatch{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, Timestamp: l2A4.Time, Transactions: nil, - }), + }, }, Expected: BatchDrop, // dropped because it could have advanced the epoch to B }, @@ -430,7 +442,7 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A0, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1B, - Batch: NewSingularBatchData(SingularBatch{ + Batch: &SingularBatch{ ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, @@ -438,7 +450,7 @@ func TestValidBatch(t *testing.T) { Transactions: []hexutil.Bytes{ []byte{}, // empty tx data }, - }), + }, }, Expected: BatchDrop, }, @@ -448,7 +460,7 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A0, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1B, - Batch: NewSingularBatchData(SingularBatch{ + Batch: &SingularBatch{ ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, @@ -456,7 +468,7 @@ func TestValidBatch(t *testing.T) { Transactions: []hexutil.Bytes{ []byte{types.DepositTxType, 0}, // piece of data alike to a deposit }, - }), + }, }, Expected: BatchDrop, }, @@ -466,7 +478,7 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A0, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1B, - Batch: NewSingularBatchData(SingularBatch{ + Batch: &SingularBatch{ ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, @@ -475,7 +487,7 @@ func TestValidBatch(t *testing.T) { []byte{0x02, 0x42, 0x13, 0x37}, []byte{0x02, 0xde, 0xad, 0xbe, 0xef}, }, - }), + }, }, Expected: BatchAccept, }, @@ -485,7 +497,7 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A3, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1C, - Batch: NewSingularBatchData(SingularBatch{ + Batch: &SingularBatch{ ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, @@ -494,7 +506,7 @@ func TestValidBatch(t *testing.T) { []byte{0x02, 0x42, 0x13, 0x37}, []byte{0x02, 0xde, 0xad, 0xbe, 0xef}, }, - }), + }, }, Expected: BatchAccept, }, @@ -504,13 +516,13 @@ func TestValidBatch(t *testing.T) { L2SafeHead: l2A2, Batch: BatchWithL1InclusionBlock{ L1InclusionBlock: l1B, - Batch: NewSingularBatchData(SingularBatch{ // we build l2B0', which starts a new epoch too early + Batch: &SingularBatch{ // we build l2B0', which starts a new epoch too early ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, Timestamp: l2A2.Time + conf.BlockTime, Transactions: nil, - }), + }, }, Expected: BatchDrop, }, @@ -521,7 +533,1026 @@ func TestValidBatch(t *testing.T) { for _, testCase := range testCases { t.Run(testCase.Name, func(t *testing.T) { - validity := CheckBatch(&conf, logger, testCase.L1Blocks, testCase.L2SafeHead, &testCase.Batch) + ctx := context.Background() + validity := CheckBatch(ctx, &conf, logger, testCase.L1Blocks, testCase.L2SafeHead, &testCase.Batch, nil) + require.Equal(t, testCase.Expected, validity, "batch check must return expected validity level") + }) + } +} + +func TestValidSpanBatch(t *testing.T) { + minTs := uint64(0) + conf := rollup.Config{ + Genesis: rollup.Genesis{ + L2Time: 31, // a genesis time that itself does not align to make it more interesting + }, + BlockTime: 2, + SeqWindowSize: 4, + MaxSequencerDrift: 6, + SpanBatchTime: &minTs, + // other config fields are ignored and can be left empty. + } + + rng := rand.New(rand.NewSource(1234)) + chainId := new(big.Int).SetUint64(rng.Uint64()) + signer := types.NewLondonSigner(chainId) + randTx := testutils.RandomTx(rng, new(big.Int).SetUint64(rng.Uint64()), signer) + randTxData, _ := randTx.MarshalBinary() + l1A := testutils.RandomBlockRef(rng) + l1B := eth.L1BlockRef{ + Hash: testutils.RandomHash(rng), + Number: l1A.Number + 1, + ParentHash: l1A.Hash, + Time: l1A.Time + 7, + } + l1C := eth.L1BlockRef{ + Hash: testutils.RandomHash(rng), + Number: l1B.Number + 1, + ParentHash: l1B.Hash, + Time: l1B.Time + 7, + } + l1D := eth.L1BlockRef{ + Hash: testutils.RandomHash(rng), + Number: l1C.Number + 1, + ParentHash: l1C.Hash, + Time: l1C.Time + 7, + } + l1E := eth.L1BlockRef{ + Hash: testutils.RandomHash(rng), + Number: l1D.Number + 1, + ParentHash: l1D.Hash, + Time: l1D.Time + 7, + } + l1F := eth.L1BlockRef{ + Hash: testutils.RandomHash(rng), + Number: l1E.Number + 1, + ParentHash: l1E.Hash, + Time: l1E.Time + 7, + } + + l2A0 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), + Number: 100, + ParentHash: testutils.RandomHash(rng), + Time: l1A.Time, + L1Origin: l1A.ID(), + SequenceNumber: 0, + } + + l2A1 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), + Number: l2A0.Number + 1, + ParentHash: l2A0.Hash, + Time: l2A0.Time + conf.BlockTime, + L1Origin: l1A.ID(), + SequenceNumber: 1, + } + + l2A2 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), + Number: l2A1.Number + 1, + ParentHash: l2A1.Hash, + Time: l2A1.Time + conf.BlockTime, + L1Origin: l1A.ID(), + SequenceNumber: 2, + } + + l2A3 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), + Number: l2A2.Number + 1, + ParentHash: l2A2.Hash, + Time: l2A2.Time + conf.BlockTime, + L1Origin: l1A.ID(), + SequenceNumber: 3, + } + + l2B0 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), + Number: l2A3.Number + 1, + ParentHash: l2A3.Hash, + Time: l2A3.Time + conf.BlockTime, // 8 seconds larger than l1A0, 1 larger than origin + L1Origin: l1B.ID(), + SequenceNumber: 0, + } + + l1X := eth.L1BlockRef{ + Hash: testutils.RandomHash(rng), + Number: 42, + ParentHash: testutils.RandomHash(rng), + Time: 10_000, + } + l1Y := eth.L1BlockRef{ + Hash: testutils.RandomHash(rng), + Number: l1X.Number + 1, + ParentHash: l1X.Hash, + Time: l1X.Time + 12, + } + l1Z := eth.L1BlockRef{ + Hash: testutils.RandomHash(rng), + Number: l1Y.Number + 1, + ParentHash: l1Y.Hash, + Time: l1Y.Time + 12, + } + l2X0 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), + Number: 1000, + ParentHash: testutils.RandomHash(rng), + Time: 10_000 + 12 + 6 - 1, // add one block, and you get ahead of next l1 block by more than the drift + L1Origin: l1X.ID(), + SequenceNumber: 0, + } + l2Y0 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), + Number: l2X0.Number + 1, + ParentHash: l2X0.Hash, + Time: l2X0.Time + conf.BlockTime, // exceeds sequencer time drift, forced to be empty block + L1Origin: l1Y.ID(), + SequenceNumber: 0, + } + + l2A4 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), + Number: l2A3.Number + 1, + ParentHash: l2A3.Hash, + Time: l2A3.Time + conf.BlockTime, // 4*2 = 8, higher than seq time drift + L1Origin: l1A.ID(), + SequenceNumber: 4, + } + + l1BLate := eth.L1BlockRef{ + Hash: testutils.RandomHash(rng), + Number: l1A.Number + 1, + ParentHash: l1A.Hash, + Time: l2A4.Time + 1, // too late for l2A4 to adopt yet + } + + testCases := []ValidBatchTestCase{ + { + Name: "missing L1 info", + L1Blocks: []eth.L1BlockRef{}, + L2SafeHead: l2A0, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2A1.ParentHash, + EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + EpochHash: l2A1.L1Origin.Hash, + Timestamp: l2A1.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchUndecided, + }, + { + Name: "future timestamp", + L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + L2SafeHead: l2A0, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2A1.ParentHash, + EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + EpochHash: l2A1.L1Origin.Hash, + Timestamp: l2A1.Time + 1, // 1 too high + Transactions: nil, + }, + }), + }, + Expected: BatchFuture, + }, + { + Name: "old timestamp", + L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + L2SafeHead: l2A0, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2A1.ParentHash, + EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + EpochHash: l2A1.L1Origin.Hash, + Timestamp: l2A0.Time, // repeating the same time + Transactions: nil, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "misaligned timestamp", + L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + L2SafeHead: l2A0, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2A1.ParentHash, + EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + EpochHash: l2A1.L1Origin.Hash, + Timestamp: l2A1.Time - 1, // block time is 2, so this is 1 too low + Transactions: nil, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "invalid parent block hash", + L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + L2SafeHead: l2A0, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: testutils.RandomHash(rng), + EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + EpochHash: l2A1.L1Origin.Hash, + Timestamp: l2A1.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "sequence window expired", + L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C, l1D, l1E, l1F}, + L2SafeHead: l2A0, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1F, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2A1.ParentHash, + EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + EpochHash: l2A1.L1Origin.Hash, + Timestamp: l2A1.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "epoch too old, but good parent hash and timestamp", // repeat of now outdated l2A3 data + L1Blocks: []eth.L1BlockRef{l1B, l1C, l1D}, + L2SafeHead: l2B0, // we already moved on to B + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1C, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2B0.Hash, // build on top of safe head to continue + EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid + EpochHash: l2A3.L1Origin.Hash, + Timestamp: l2B0.Time + conf.BlockTime, // pass the timestamp check to get too epoch check + Transactions: nil, + }, + { + EpochNum: rollup.Epoch(l1B.Number), + EpochHash: l1B.Hash, // pass the l1 origin check + Timestamp: l2B0.Time + conf.BlockTime*2, + Transactions: nil, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "insufficient L1 info for eager derivation", + L1Blocks: []eth.L1BlockRef{l1A}, // don't know about l1B yet + L2SafeHead: l2A3, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1C, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2B0.ParentHash, + EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + EpochHash: l2B0.L1Origin.Hash, + Timestamp: l2B0.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchUndecided, + }, + { + Name: "epoch too new", + L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C, l1D}, + L2SafeHead: l2A3, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1D, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2B0.ParentHash, + EpochNum: rollup.Epoch(l1C.Number), // invalid, we need to adopt epoch B before C + EpochHash: l1C.Hash, + Timestamp: l2B0.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "epoch hash wrong", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A3, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1C, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2B0.ParentHash, + EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + EpochHash: l1A.Hash, // invalid, epoch hash should be l1B + Timestamp: l2B0.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "epoch hash wrong - long span", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A2, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1C, + Batch: NewSpanBatch([]*SingularBatch{ + { // valid batch + ParentHash: l2A3.ParentHash, + EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + EpochHash: l1A.Hash, + Timestamp: l2A3.Time, + Transactions: nil, + }, + { + ParentHash: l2B0.ParentHash, + EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + EpochHash: l1A.Hash, // invalid, epoch hash should be l1B + Timestamp: l2B0.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "sequencer time drift on same epoch with non-empty txs", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A3, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + ParentHash: l2A4.ParentHash, + EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + EpochHash: l2A4.L1Origin.Hash, + Timestamp: l2A4.Time, + Transactions: []hexutil.Bytes{randTxData}, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "sequencer time drift on same epoch with non-empty txs - long span", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A2, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { // valid batch + ParentHash: l2A3.ParentHash, + EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + EpochHash: l2A3.L1Origin.Hash, + Timestamp: l2A3.Time, + Transactions: []hexutil.Bytes{randTxData}, + }, + { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + ParentHash: l2A4.ParentHash, + EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + EpochHash: l2A4.L1Origin.Hash, + Timestamp: l2A4.Time, + Transactions: []hexutil.Bytes{randTxData}, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "sequencer time drift on changing epoch with non-empty txs", + L1Blocks: []eth.L1BlockRef{l1X, l1Y, l1Z}, + L2SafeHead: l2X0, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1Z, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2Y0.ParentHash, + EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), + EpochHash: l2Y0.L1Origin.Hash, + Timestamp: l2Y0.Time, // valid, but more than 6 ahead of l1Y.Time + Transactions: []hexutil.Bytes{randTxData}, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "sequencer time drift on same epoch with empty txs and late next epoch", + L1Blocks: []eth.L1BlockRef{l1A, l1BLate}, + L2SafeHead: l2A3, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1BLate, + Batch: NewSpanBatch([]*SingularBatch{ + { // l2A4 time < l1BLate time, so we cannot adopt origin B yet + ParentHash: l2A4.ParentHash, + EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + EpochHash: l2A4.L1Origin.Hash, + Timestamp: l2A4.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchAccept, // accepted because empty & preserving L2 time invariant + }, + { + Name: "sequencer time drift on changing epoch with empty txs", + L1Blocks: []eth.L1BlockRef{l1X, l1Y, l1Z}, + L2SafeHead: l2X0, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1Z, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2Y0.ParentHash, + EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), + EpochHash: l2Y0.L1Origin.Hash, + Timestamp: l2Y0.Time, // valid, but more than 6 ahead of l1Y.Time + Transactions: nil, + }, + }), + }, + Expected: BatchAccept, // accepted because empty & still advancing epoch + }, + { + Name: "sequencer time drift on same epoch with empty txs and no next epoch in sight yet", + L1Blocks: []eth.L1BlockRef{l1A}, + L2SafeHead: l2A3, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + ParentHash: l2A4.ParentHash, + EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + EpochHash: l2A4.L1Origin.Hash, + Timestamp: l2A4.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time + }, + { + Name: "sequencer time drift on same epoch with empty txs and no next epoch in sight yet - long span", + L1Blocks: []eth.L1BlockRef{l1A}, + L2SafeHead: l2A2, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { // valid batch + ParentHash: l2A3.ParentHash, + EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + EpochHash: l2A3.L1Origin.Hash, + Timestamp: l2A3.Time, + Transactions: nil, + }, + { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + ParentHash: l2A4.ParentHash, + EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + EpochHash: l2A4.L1Origin.Hash, + Timestamp: l2A4.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time + }, + { + Name: "sequencer time drift on same epoch with empty txs and but in-sight epoch that invalidates it", + L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + L2SafeHead: l2A3, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1C, + Batch: NewSpanBatch([]*SingularBatch{ + { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + ParentHash: l2A4.ParentHash, + EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + EpochHash: l2A4.L1Origin.Hash, + Timestamp: l2A4.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchDrop, // dropped because it could have advanced the epoch to B + }, + { + Name: "sequencer time drift on same epoch with empty txs and but in-sight epoch that invalidates it - long span", + L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + L2SafeHead: l2A2, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1C, + Batch: NewSpanBatch([]*SingularBatch{ + { // valid batch + ParentHash: l2A3.ParentHash, + EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + EpochHash: l2A3.L1Origin.Hash, + Timestamp: l2A3.Time, + Transactions: nil, + }, + { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + ParentHash: l2A4.ParentHash, + EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + EpochHash: l2A4.L1Origin.Hash, + Timestamp: l2A4.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchDrop, // dropped because it could have advanced the epoch to B + }, + { + Name: "empty tx included", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A0, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2A1.ParentHash, + EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + EpochHash: l2A1.L1Origin.Hash, + Timestamp: l2A1.Time, + Transactions: []hexutil.Bytes{ + []byte{}, // empty tx data + }, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "deposit tx included", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A0, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2A1.ParentHash, + EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + EpochHash: l2A1.L1Origin.Hash, + Timestamp: l2A1.Time, + Transactions: []hexutil.Bytes{ + []byte{types.DepositTxType, 0}, // piece of data alike to a deposit + }, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "valid batch same epoch", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A0, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2A1.ParentHash, + EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + EpochHash: l2A1.L1Origin.Hash, + Timestamp: l2A1.Time, + Transactions: []hexutil.Bytes{randTxData}, + }, + }), + }, + Expected: BatchAccept, + }, + { + Name: "valid batch changing epoch", + L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + L2SafeHead: l2A3, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1C, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2B0.ParentHash, + EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + EpochHash: l2B0.L1Origin.Hash, + Timestamp: l2B0.Time, + Transactions: []hexutil.Bytes{randTxData}, + }, + }), + }, + Expected: BatchAccept, + }, + { + Name: "batch with L2 time before L1 time", + L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + L2SafeHead: l2A2, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { // we build l2B0, which starts a new epoch too early + ParentHash: l2A2.Hash, + EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + EpochHash: l2B0.L1Origin.Hash, + Timestamp: l2A2.Time + conf.BlockTime, + Transactions: nil, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "batch with L2 time before L1 time - long span", + L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + L2SafeHead: l2A1, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { // valid batch + ParentHash: l2A1.Hash, + EpochNum: rollup.Epoch(l2A2.L1Origin.Number), + EpochHash: l2A2.L1Origin.Hash, + Timestamp: l2A2.Time, + Transactions: nil, + }, + { // we build l2B0, which starts a new epoch too early + ParentHash: l2A2.Hash, + EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + EpochHash: l2B0.L1Origin.Hash, + Timestamp: l2A2.Time + conf.BlockTime, + Transactions: nil, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "valid overlapping batch", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A2, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2A1.Hash, + EpochNum: rollup.Epoch(l2A2.L1Origin.Number), + EpochHash: l2A2.L1Origin.Hash, + Timestamp: l2A2.Time, + Transactions: nil, + }, + { + ParentHash: l2A2.Hash, + EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + EpochHash: l2A3.L1Origin.Hash, + Timestamp: l2A3.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchAccept, + }, + { + Name: "longer overlapping batch", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A2, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2A0.Hash, + EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + EpochHash: l2A1.L1Origin.Hash, + Timestamp: l2A1.Time, + Transactions: nil, + }, + { + ParentHash: l2A1.Hash, + EpochNum: rollup.Epoch(l2A2.L1Origin.Number), + EpochHash: l2A2.L1Origin.Hash, + Timestamp: l2A2.Time, + Transactions: nil, + }, + { + ParentHash: l2A2.Hash, + EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + EpochHash: l2A3.L1Origin.Hash, + Timestamp: l2A3.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchAccept, + }, + { + Name: "fully overlapping batch", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A2, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2A0.Hash, + EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + EpochHash: l2A1.L1Origin.Hash, + Timestamp: l2A1.Time, + Transactions: nil, + }, + { + ParentHash: l2A1.Hash, + EpochNum: rollup.Epoch(l2A2.L1Origin.Number), + EpochHash: l2A2.L1Origin.Hash, + Timestamp: l2A2.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "overlapping batch with invalid parent hash", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A2, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2A0.Hash, + EpochNum: rollup.Epoch(l2A2.L1Origin.Number), + EpochHash: l2A2.L1Origin.Hash, + Timestamp: l2A2.Time, + Transactions: nil, + }, + { + ParentHash: l2A2.Hash, + EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + EpochHash: l2A3.L1Origin.Hash, + Timestamp: l2A3.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "overlapping batch with invalid origin number", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A2, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2A1.Hash, + EpochNum: rollup.Epoch(l2A2.L1Origin.Number) + 1, + EpochHash: l2A2.L1Origin.Hash, + Timestamp: l2A2.Time, + Transactions: nil, + }, + { + ParentHash: l2A2.Hash, + EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + EpochHash: l2A3.L1Origin.Hash, + Timestamp: l2A3.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "overlapping batch with invalid tx", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A2, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2A1.Hash, + EpochNum: rollup.Epoch(l2A2.L1Origin.Number), + EpochHash: l2A2.L1Origin.Hash, + Timestamp: l2A2.Time, + Transactions: []hexutil.Bytes{randTxData}, + }, + { + ParentHash: l2A2.Hash, + EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + EpochHash: l2A3.L1Origin.Hash, + Timestamp: l2A3.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchDrop, + }, + { + Name: "overlapping batch l2 fetcher error", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A1, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2A0.ParentHash, + EpochNum: rollup.Epoch(l2A0.L1Origin.Number), + EpochHash: l2A0.L1Origin.Hash, + Timestamp: l2A0.Time, + Transactions: nil, + }, + { + ParentHash: l2A0.Hash, + EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + EpochHash: l2A1.L1Origin.Hash, + Timestamp: l2A1.Time, + Transactions: nil, + }, + { + ParentHash: l2A1.Hash, + EpochNum: rollup.Epoch(l2A2.L1Origin.Number), + EpochHash: l2A2.L1Origin.Hash, + Timestamp: l2A2.Time, + Transactions: nil, + }, + }), + }, + Expected: BatchUndecided, + }, + } + + // Log level can be increased for debugging purposes + logger := testlog.Logger(t, log.LvlError) + + l2Client := testutils.MockL2Client{} + var nilErr error + // will be return error for block #99 (parent of l2A0) + tempErr := errors.New("temp error") + l2Client.Mock.On("L2BlockRefByNumber", l2A0.Number-1).Times(9999).Return(eth.L2BlockRef{}, &tempErr) + l2Client.Mock.On("PayloadByNumber", l2A0.Number-1).Times(9999).Return(nil, &tempErr) + + // make payloads for L2 blocks and set as expected return value of MockL2Client + for _, l2Block := range []eth.L2BlockRef{l2A0, l2A1, l2A2, l2A3, l2A4, l2B0} { + l2Client.ExpectL2BlockRefByNumber(l2Block.Number, l2Block, nil) + txData := l1InfoDepositTx(t, l2Block.L1Origin.Number) + payload := eth.ExecutionPayload{ + ParentHash: l2Block.ParentHash, + BlockNumber: hexutil.Uint64(l2Block.Number), + Timestamp: hexutil.Uint64(l2Block.Time), + BlockHash: l2Block.Hash, + Transactions: []hexutil.Bytes{txData}, + } + l2Client.Mock.On("L2BlockRefByNumber", l2Block.Number).Times(9999).Return(l2Block, &nilErr) + l2Client.Mock.On("PayloadByNumber", l2Block.Number).Times(9999).Return(&payload, &nilErr) + } + + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + ctx := context.Background() + validity := CheckBatch(ctx, &conf, logger, testCase.L1Blocks, testCase.L2SafeHead, &testCase.Batch, &l2Client) + require.Equal(t, testCase.Expected, validity, "batch check must return expected validity level") + }) + } +} + +func TestSpanBatchHardFork(t *testing.T) { + minTs := uint64(0) + conf := rollup.Config{ + Genesis: rollup.Genesis{ + L2Time: 31, // a genesis time that itself does not align to make it more interesting + }, + BlockTime: 2, + SeqWindowSize: 4, + MaxSequencerDrift: 6, + SpanBatchTime: &minTs, + // other config fields are ignored and can be left empty. + } + + rng := rand.New(rand.NewSource(1234)) + chainId := new(big.Int).SetUint64(rng.Uint64()) + signer := types.NewLondonSigner(chainId) + randTx := testutils.RandomTx(rng, new(big.Int).SetUint64(rng.Uint64()), signer) + randTxData, _ := randTx.MarshalBinary() + l1A := testutils.RandomBlockRef(rng) + l1B := eth.L1BlockRef{ + Hash: testutils.RandomHash(rng), + Number: l1A.Number + 1, + ParentHash: l1A.Hash, + Time: l1A.Time + 7, + } + + l2A0 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), + Number: 100, + ParentHash: testutils.RandomHash(rng), + Time: l1A.Time, + L1Origin: l1A.ID(), + SequenceNumber: 0, + } + + l2A1 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), + Number: l2A0.Number + 1, + ParentHash: l2A0.Hash, + Time: l2A0.Time + conf.BlockTime, + L1Origin: l1A.ID(), + SequenceNumber: 1, + } + + testCases := []SpanBatchHardForkTestCase{ + { + Name: "singular batch before hard fork", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A0, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: &SingularBatch{ + ParentHash: l2A1.ParentHash, + EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + EpochHash: l2A1.L1Origin.Hash, + Timestamp: l2A1.Time, + Transactions: []hexutil.Bytes{randTxData}, + }, + }, + SpanBatchTime: l2A1.Time + 2, + Expected: BatchAccept, + }, + { + Name: "span batch before hard fork", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A0, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2A1.ParentHash, + EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + EpochHash: l2A1.L1Origin.Hash, + Timestamp: l2A1.Time, + Transactions: []hexutil.Bytes{randTxData}, + }, + }), + }, + SpanBatchTime: l2A1.Time + 2, + Expected: BatchDrop, + }, + { + Name: "singular batch after hard fork", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A0, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: &SingularBatch{ + ParentHash: l2A1.ParentHash, + EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + EpochHash: l2A1.L1Origin.Hash, + Timestamp: l2A1.Time, + Transactions: []hexutil.Bytes{randTxData}, + }, + }, + SpanBatchTime: l2A1.Time - 2, + Expected: BatchAccept, + }, + { + Name: "span batch after hard fork", + L1Blocks: []eth.L1BlockRef{l1A, l1B}, + L2SafeHead: l2A0, + Batch: BatchWithL1InclusionBlock{ + L1InclusionBlock: l1B, + Batch: NewSpanBatch([]*SingularBatch{ + { + ParentHash: l2A1.ParentHash, + EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + EpochHash: l2A1.L1Origin.Hash, + Timestamp: l2A1.Time, + Transactions: []hexutil.Bytes{randTxData}, + }, + }), + }, + SpanBatchTime: l2A1.Time - 2, + Expected: BatchAccept, + }, + } + + // Log level can be increased for debugging purposes + logger := testlog.Logger(t, log.LvlInfo) + + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + rcfg := conf + rcfg.SpanBatchTime = &testCase.SpanBatchTime + ctx := context.Background() + validity := CheckBatch(ctx, &rcfg, logger, testCase.L1Blocks, testCase.L2SafeHead, &testCase.Batch, nil) require.Equal(t, testCase.Expected, validity, "batch check must return expected validity level") }) } diff --git a/op-node/rollup/derive/channel.go b/op-node/rollup/derive/channel.go index 1d9a85cc9027..7bd67f184995 100644 --- a/op-node/rollup/derive/channel.go +++ b/op-node/rollup/derive/channel.go @@ -6,8 +6,6 @@ import ( "fmt" "io" - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/rlp" ) @@ -146,7 +144,9 @@ func (ch *Channel) Reader() io.Reader { // BatchReader provides a function that iteratively consumes batches from the reader. // The L1Inclusion block is also provided at creation time. -func BatchReader(cfg *rollup.Config, r io.Reader, l1InclusionBlock eth.L1BlockRef) (func() (BatchWithL1InclusionBlock, error), error) { +// Warning: the batch reader can read every batch-type. +// The caller of the batch-reader should filter the results. +func BatchReader(r io.Reader) (func() (*BatchData, error), error) { // Setup decompressor stage + RLP reader zr, err := zlib.NewReader(r) if err != nil { @@ -154,17 +154,11 @@ func BatchReader(cfg *rollup.Config, r io.Reader, l1InclusionBlock eth.L1BlockRe } rlpReader := rlp.NewStream(zr, MaxRLPBytesPerChannel) // Read each batch iteratively - return func() (BatchWithL1InclusionBlock, error) { - ret := BatchWithL1InclusionBlock{ - L1InclusionBlock: l1InclusionBlock, - } - err := rlpReader.Decode(&ret.Batch) - if err != nil { - return ret, err - } - if ret.Batch.BatchType == SpanBatchType && !cfg.IsSpanBatch(ret.L1InclusionBlock.Time) { - return ret, fmt.Errorf("cannot accept span-batch in L1 block with time %d", ret.L1InclusionBlock.Time) + return func() (*BatchData, error) { + var batchData BatchData + if err = rlpReader.Decode(&batchData); err != nil { + return nil, err } - return ret, nil + return &batchData, nil }, nil } diff --git a/op-node/rollup/derive/channel_in_reader.go b/op-node/rollup/derive/channel_in_reader.go index 7487326d99bf..1553bd9a6656 100644 --- a/op-node/rollup/derive/channel_in_reader.go +++ b/op-node/rollup/derive/channel_in_reader.go @@ -3,12 +3,12 @@ package derive import ( "bytes" "context" + "fmt" "io" - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum/go-ethereum/log" + "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -21,7 +21,7 @@ type ChannelInReader struct { cfg *rollup.Config - nextBatchFn func() (BatchWithL1InclusionBlock, error) + nextBatchFn func() (*BatchData, error) prev *ChannelBank @@ -46,7 +46,7 @@ func (cr *ChannelInReader) Origin() eth.L1BlockRef { // TODO: Take full channel for better logging func (cr *ChannelInReader) WriteChannel(data []byte) error { - if f, err := BatchReader(cr.cfg, bytes.NewBuffer(data), cr.Origin()); err == nil { + if f, err := BatchReader(bytes.NewBuffer(data)); err == nil { cr.nextBatchFn = f cr.metrics.RecordChannelInputBytes(len(data)) return nil @@ -65,7 +65,7 @@ func (cr *ChannelInReader) NextChannel() { // NextBatch pulls out the next batch from the channel if it has it. // It returns io.EOF when it cannot make any more progress. // It will return a temporary error if it needs to be called again to advance some internal state. -func (cr *ChannelInReader) NextBatch(ctx context.Context) (*BatchData, error) { +func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) { if cr.nextBatchFn == nil { if data, err := cr.prev.NextData(ctx); err == io.EOF { return nil, io.EOF @@ -80,7 +80,7 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (*BatchData, error) { // TODO: can batch be non nil while err == io.EOF // This depends on the behavior of rlp.Stream - batch, err := cr.nextBatchFn() + batchData, err := cr.nextBatchFn() if err == io.EOF { cr.NextChannel() return nil, NotEnoughData @@ -89,7 +89,23 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (*BatchData, error) { cr.NextChannel() return nil, NotEnoughData } - return batch.Batch, nil + switch batchData.BatchType { + case SingularBatchType: + return &batchData.SingularBatch, nil + case SpanBatchType: + if origin := cr.Origin(); !cr.cfg.IsSpanBatch(origin.Time) { + return nil, NewTemporaryError(fmt.Errorf("cannot accept span batch in L1 block %s at time %d", origin, origin.Time)) + } + // If the batch type is Span batch, derive block inputs from RawSpanBatch. + spanBatch, err := batchData.RawSpanBatch.derive(cr.cfg.BlockTime, cr.cfg.Genesis.L2Time, cr.cfg.L2ChainID) + if err != nil { + return nil, err + } + return spanBatch, nil + default: + // error is bubbled up to user, but pipeline can skip the batch and continue after. + return nil, NewTemporaryError(fmt.Errorf("unrecognized batch type: %w", err)) + } } func (cr *ChannelInReader) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.SystemConfig) error { diff --git a/op-node/rollup/derive/engine_queue.go b/op-node/rollup/derive/engine_queue.go index 775ab64a8959..b8cd3ae5917f 100644 --- a/op-node/rollup/derive/engine_queue.go +++ b/op-node/rollup/derive/engine_queue.go @@ -35,6 +35,7 @@ type Engine interface { PayloadByNumber(context.Context, uint64) (*eth.ExecutionPayload, error) L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L2BlockRef, error) L2BlockRefByHash(ctx context.Context, l2Hash common.Hash) (eth.L2BlockRef, error) + L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, error) SystemConfigL2Fetcher } diff --git a/op-node/rollup/derive/pipeline.go b/op-node/rollup/derive/pipeline.go index 7b017515b763..d054939f151d 100644 --- a/op-node/rollup/derive/pipeline.go +++ b/op-node/rollup/derive/pipeline.go @@ -90,7 +90,7 @@ func NewDerivationPipeline(log log.Logger, cfg *rollup.Config, l1Fetcher L1Fetch frameQueue := NewFrameQueue(log, l1Src) bank := NewChannelBank(log, cfg, frameQueue, l1Fetcher, metrics) chInReader := NewChannelInReader(cfg, log, bank, metrics) - batchQueue := NewBatchQueue(log, cfg, chInReader) + batchQueue := NewBatchQueue(log, cfg, chInReader, engine) attrBuilder := NewFetchingAttributesBuilder(cfg, l1Fetcher, engine) attributesQueue := NewAttributesQueue(log, cfg, attrBuilder, batchQueue) diff --git a/op-node/rollup/derive/span_batch.go b/op-node/rollup/derive/span_batch.go index 81baf158bfee..d68d542ff957 100644 --- a/op-node/rollup/derive/span_batch.go +++ b/op-node/rollup/derive/span_batch.go @@ -27,6 +27,8 @@ import ( var ErrTooBigSpanBatchFieldSize = errors.New("batch would cause field bytes to go over limit") +var ErrEmptySpanBatch = errors.New("span-batch must not be empty") + type spanBatchPrefix struct { relTimestamp uint64 // Relative timestamp of the first block l1OriginNum uint64 // L1 origin number @@ -139,10 +141,13 @@ func (bp *spanBatchPrefix) decodePrefix(r *bytes.Reader) error { // decodeBlockCount parses data into bp.blockCount func (bp *spanBatchPayload) decodeBlockCount(r *bytes.Reader) error { blockCount, err := binary.ReadUvarint(r) - bp.blockCount = blockCount if err != nil { return fmt.Errorf("failed to read block count: %w", err) } + bp.blockCount = blockCount + if blockCount == 0 { + return ErrEmptySpanBatch + } return nil } @@ -362,6 +367,9 @@ func (b *RawSpanBatch) encodeBytes() ([]byte, error) { // derive converts RawSpanBatch into SpanBatch, which has a list of spanBatchElement. // We need chain config constants to derive values for making payload attributes. func (b *RawSpanBatch) derive(blockTime, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) { + if b.blockCount == 0 { + return nil, ErrEmptySpanBatch + } blockOriginNums := make([]uint64, b.blockCount) l1OriginBlockNumber := b.l1OriginNum for i := int(b.blockCount) - 1; i >= 0; i-- { diff --git a/op-node/rollup/derive/span_batch_test.go b/op-node/rollup/derive/span_batch_test.go index 3348b358a1cf..d2a0ac3f8d54 100644 --- a/op-node/rollup/derive/span_batch_test.go +++ b/op-node/rollup/derive/span_batch_test.go @@ -6,12 +6,15 @@ import ( "math/rand" "testing" - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-service/testutils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" - "github.com/stretchr/testify/assert" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/testutils" ) func TestSpanBatchForBatchInterface(t *testing.T) { @@ -33,6 +36,39 @@ func TestSpanBatchForBatchInterface(t *testing.T) { assert.True(t, spanBatch.CheckParentHash(singularBatches[0].ParentHash)) } +func TestEmptySpanBatch(t *testing.T) { + rng := rand.New(rand.NewSource(0x77556691)) + chainID := big.NewInt(rng.Int63n(1000)) + spanTxs, err := newSpanBatchTxs(nil, chainID) + require.NoError(t, err) + + rawSpanBatch := RawSpanBatch{ + spanBatchPrefix: spanBatchPrefix{ + relTimestamp: uint64(rng.Uint32()), + l1OriginNum: rng.Uint64(), + parentCheck: testutils.RandomData(rng, 20), + l1OriginCheck: testutils.RandomData(rng, 20), + }, + spanBatchPayload: spanBatchPayload{ + blockCount: 0, + originBits: big.NewInt(0), + blockTxCounts: []uint64{}, + txs: spanTxs, + }, + } + + var buf bytes.Buffer + err = rawSpanBatch.encodeBlockCount(&buf) + assert.NoError(t, err) + + result := buf.Bytes() + r := bytes.NewReader(result) + var sb RawSpanBatch + + err = sb.decodeBlockCount(r) + require.ErrorIs(t, err, ErrEmptySpanBatch) +} + func TestSpanBatchOriginBits(t *testing.T) { rng := rand.New(rand.NewSource(0x77665544)) chainID := big.NewInt(rng.Int63n(1000)) diff --git a/op-program/client/l2/engine.go b/op-program/client/l2/engine.go index 3ed4476b71ad..e6ffadebf470 100644 --- a/op-program/client/l2/engine.go +++ b/op-program/client/l2/engine.go @@ -109,6 +109,14 @@ func (o *OracleEngine) L2BlockRefByHash(ctx context.Context, l2Hash common.Hash) return derive.L2BlockToBlockRef(block, &o.rollupCfg.Genesis) } +func (o *OracleEngine) L2BlockRefByNumber(ctx context.Context, n uint64) (eth.L2BlockRef, error) { + hash := o.backend.GetCanonicalHash(n) + if hash == (common.Hash{}) { + return eth.L2BlockRef{}, ErrNotFound + } + return o.L2BlockRefByHash(ctx, hash) +} + func (o *OracleEngine) SystemConfigByL2Hash(ctx context.Context, hash common.Hash) (eth.SystemConfig, error) { payload, err := o.PayloadByHash(ctx, hash) if err != nil { diff --git a/op-service/testutils/mock_l2.go b/op-service/testutils/mock_l2.go index 18f5f4c82b1c..6b01b401a402 100644 --- a/op-service/testutils/mock_l2.go +++ b/op-service/testutils/mock_l2.go @@ -3,9 +3,8 @@ package testutils import ( "context" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" ) type MockL2Client struct { @@ -13,7 +12,8 @@ type MockL2Client struct { } func (c *MockL2Client) L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L2BlockRef, error) { - return c.Mock.MethodCalled("L2BlockRefByLabel", label).Get(0).(eth.L2BlockRef), nil + out := c.Mock.MethodCalled("L2BlockRefByLabel", label) + return out[0].(eth.L2BlockRef), *out[1].(*error) } func (m *MockL2Client) ExpectL2BlockRefByLabel(label eth.BlockLabel, ref eth.L2BlockRef, err error) { @@ -21,7 +21,8 @@ func (m *MockL2Client) ExpectL2BlockRefByLabel(label eth.BlockLabel, ref eth.L2B } func (c *MockL2Client) L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, error) { - return c.Mock.MethodCalled("L2BlockRefByNumber", num).Get(0).(eth.L2BlockRef), nil + out := c.Mock.MethodCalled("L2BlockRefByNumber", num) + return out[0].(eth.L2BlockRef), *out[1].(*error) } func (m *MockL2Client) ExpectL2BlockRefByNumber(num uint64, ref eth.L2BlockRef, err error) { @@ -29,7 +30,8 @@ func (m *MockL2Client) ExpectL2BlockRefByNumber(num uint64, ref eth.L2BlockRef, } func (c *MockL2Client) L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L2BlockRef, error) { - return c.Mock.MethodCalled("L2BlockRefByHash", hash).Get(0).(eth.L2BlockRef), nil + out := c.Mock.MethodCalled("L2BlockRefByHash", hash) + return out[0].(eth.L2BlockRef), *out[1].(*error) } func (m *MockL2Client) ExpectL2BlockRefByHash(hash common.Hash, ref eth.L2BlockRef, err error) { @@ -37,7 +39,8 @@ func (m *MockL2Client) ExpectL2BlockRefByHash(hash common.Hash, ref eth.L2BlockR } func (m *MockL2Client) SystemConfigByL2Hash(ctx context.Context, hash common.Hash) (eth.SystemConfig, error) { - return m.Mock.MethodCalled("SystemConfigByL2Hash", hash).Get(0).(eth.SystemConfig), nil + out := m.Mock.MethodCalled("SystemConfigByL2Hash", hash) + return out[0].(eth.SystemConfig), *out[1].(*error) } func (m *MockL2Client) ExpectSystemConfigByL2Hash(hash common.Hash, cfg eth.SystemConfig, err error) { @@ -45,7 +48,8 @@ func (m *MockL2Client) ExpectSystemConfigByL2Hash(hash common.Hash, cfg eth.Syst } func (m *MockL2Client) OutputV0AtBlock(ctx context.Context, blockHash common.Hash) (*eth.OutputV0, error) { - return m.Mock.MethodCalled("OutputV0AtBlock", blockHash).Get(0).(*eth.OutputV0), nil + out := m.Mock.MethodCalled("OutputV0AtBlock", blockHash) + return out[0].(*eth.OutputV0), *out[1].(*error) } func (m *MockL2Client) ExpectOutputV0AtBlock(blockHash common.Hash, output *eth.OutputV0, err error) {