diff --git a/config/config_test.go b/config/config_test.go index cf918c4afe..f93735eeb9 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -101,6 +101,10 @@ func Test_Defaults(t *testing.T) { path: "Sequencer.Finalizer.ResourceExhaustedMarginPct", expectedValue: uint32(10), }, + { + path: "Sequencer.Finalizer.StateRootSyncInterval", + expectedValue: types.NewDuration(3600 * time.Second), + }, { path: "Sequencer.Finalizer.ForcedBatchesL1BlockConfirmations", expectedValue: uint64(64), @@ -127,7 +131,7 @@ func Test_Defaults(t *testing.T) { }, { path: "Sequencer.Finalizer.BatchMaxDeltaTimestamp", - expectedValue: types.NewDuration(10 * time.Second), + expectedValue: types.NewDuration(1800 * time.Second), }, { path: "Sequencer.Finalizer.Metrics.Interval", diff --git a/config/default.go b/config/default.go index 061a04982f..f55feed513 100644 --- a/config/default.go +++ b/config/default.go @@ -146,12 +146,13 @@ StateConsistencyCheckInterval = "5s" ForcedBatchesCheckInterval = "10s" L1InfoTreeL1BlockConfirmations = 64 L1InfoTreeCheckInterval = "10s" - BatchMaxDeltaTimestamp = "10s" + BatchMaxDeltaTimestamp = "1800s" L2BlockMaxDeltaTimestamp = "3s" ResourceExhaustedMarginPct = 10 + StateRootSyncInterval = "3600s" HaltOnBatchNumber = 0 SequentialBatchSanityCheck = false - SequentialProcessL2Block = true + SequentialProcessL2Block = false [Sequencer.Finalizer.Metrics] Interval = "60m" EnableLog = true diff --git a/config/environments/local/local.node.config.toml b/config/environments/local/local.node.config.toml index 7fdad2a456..436a0d84fa 100644 --- a/config/environments/local/local.node.config.toml +++ b/config/environments/local/local.node.config.toml @@ -101,9 +101,10 @@ StateConsistencyCheckInterval = "5s" BatchMaxDeltaTimestamp = "120s" L2BlockMaxDeltaTimestamp = "3s" ResourceExhaustedMarginPct = 10 + StateRootSyncInterval = "360s" HaltOnBatchNumber = 0 SequentialBatchSanityCheck = false - SequentialProcessL2Block = true + SequentialProcessL2Block = false [Sequencer.Finalizer.Metrics] Interval = "60m" EnableLog = true diff --git a/db/migrations/state/0021.sql b/db/migrations/state/0021.sql new file mode 100644 index 0000000000..846cda1fab --- /dev/null +++ b/db/migrations/state/0021.sql @@ -0,0 +1,7 @@ +-- +migrate Up +ALTER TABLE state.batch + ADD COLUMN high_reserved_counters JSONB; + +-- +migrate Down +ALTER TABLE state.batch + DROP COLUMN high_reserved_counters; diff --git a/db/migrations/state/0021_test.go b/db/migrations/state/0021_test.go new file mode 100644 index 0000000000..512ba55191 --- /dev/null +++ b/db/migrations/state/0021_test.go @@ -0,0 +1,64 @@ +package migrations_test + +import ( + "database/sql" + "testing" + + "github.com/stretchr/testify/assert" +) + +type migrationTest0021 struct{} + +func (m migrationTest0021) InsertData(db *sql.DB) error { + const insertBatch0 = ` + INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, wip) + VALUES (0,'0x0000', '0x0000', '0x0000', '0x0000', now(), '0x0000', null, null, true)` + + // insert batch + _, err := db.Exec(insertBatch0) + if err != nil { + return err + } + + return nil +} + +func (m migrationTest0021) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + var result int + + // Check column high_reserved_counters exists in state.batch table + const getColumn = `SELECT count(*) FROM information_schema.columns WHERE table_name='batch' and column_name='high_reserved_counters'` + row := db.QueryRow(getColumn) + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 1, result) + + const insertBatch0 = ` + INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, wip, high_reserved_counters) + VALUES (1,'0x0001', '0x0001', '0x0001', '0x0001', now(), '0x0001', null, null, true, '{"Steps": 1890125}')` + + // insert batch 1 + _, err := db.Exec(insertBatch0) + assert.NoError(t, err) + + const insertBatch1 = ` + INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, wip, high_reserved_counters) + VALUES (2,'0x0002', '0x0002', '0x0002', '0x0002', now(), '0x0002', null, null, false, '{"Steps": 1890125}')` + + // insert batch 2 + _, err = db.Exec(insertBatch1) + assert.NoError(t, err) +} + +func (m migrationTest0021) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + var result int + + // Check column high_reserved_counters doesn't exists in state.batch table + const getCheckedColumn = `SELECT count(*) FROM information_schema.columns WHERE table_name='batch' and column_name='high_reserved_counters'` + row := db.QueryRow(getCheckedColumn) + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 0, result) +} + +func TestMigration0021(t *testing.T) { + runMigrationTest(t, 21, migrationTest0021{}) +} diff --git a/docs/config-file/node-config-doc.html b/docs/config-file/node-config-doc.html index 16cfba8d7d..9a82ded83c 100644 --- a/docs/config-file/node-config-doc.html +++ b/docs/config-file/node-config-doc.html @@ -44,13 +44,15 @@
"300ms"
ResourceExhaustedMarginPct is the percentage window of the resource left out for the batch to be closed
ForcedBatchesL1BlockConfirmations is number of blocks to consider GER final
L1InfoTreeL1BlockConfirmations is number of blocks to consider L1InfoRoot final
ForcedBatchesCheckInterval is used by the closing signals manager to wait for its operation
"1m"
"300ms"
-
L1InfoTreeCheckInterval is the wait time to check if the L1InfoRoot has been updated
"1m"
+
L1InfoTreeCheckInterval is the time interval to check if the L1InfoRoot has been updated
"1m"
"300ms"
-
BatchMaxDeltaTimestamp is the resolution of the timestamp used to close a batch
"1m"
+
BatchMaxDeltaTimestamp is the resolution of the timestamp used to close a batch
"1m"
"300ms"
L2BlockMaxDeltaTimestamp is the resolution of the timestamp used to close a L2 block
"1m"
"300ms"
-
HaltOnBatchNumber specifies the batch number where the Sequencer will stop to process more transactions and generate new batches.
The Sequencer will halt after it closes the batch equal to this number
SequentialBatchSanityCheck indicates if the reprocess of a closed batch (sanity check) must be done in a
sequential way (instead than in parallel)
SequentialProcessL2Block indicates if the processing of a L2 Block must be done in the same finalizer go func instead
in the processPendingL2Blocks go func
Interval is the interval of time to calculate sequencer metrics
"1m"
+
StateRootSyncInterval indicates how often the stateroot generated by the L2 block process will be synchronized with
the stateroot used in the tx-by-tx execution
"1m"
+
"300ms"
+
HaltOnBatchNumber specifies the batch number where the Sequencer will stop to process more transactions and generate new batches.
The Sequencer will halt after it closes the batch equal to this number
SequentialBatchSanityCheck indicates if the reprocess of a closed batch (sanity check) must be done in a
sequential way (instead than in parallel)
SequentialProcessL2Block indicates if the processing of a L2 Block must be done in the same finalizer go func instead
in the processPendingL2Blocks go func
Interval is the interval of time to calculate sequencer metrics
"1m"
"300ms"
EnableLog is a flag to enable/disable metrics logs
Port to listen on
Filename of the binary data file
Version of the binary data file
ChainID is the chain ID
Enabled is a flag to enable/disable the data streamer
UpgradeEtrogBatchNumber is the batch number of the upgrade etrog
WaitPeriodSendSequence is the time the sequencer waits until
trying to send a sequence to L1
"1m"
"300ms"
diff --git a/docs/config-file/node-config-doc.md b/docs/config-file/node-config-doc.md
index 144dfb3324..ea312a6929 100644
--- a/docs/config-file/node-config-doc.md
+++ b/docs/config-file/node-config-doc.md
@@ -2083,6 +2083,7 @@ StateConsistencyCheckInterval="5s"
| - [L1InfoTreeCheckInterval](#Sequencer_Finalizer_L1InfoTreeCheckInterval ) | No | string | No | - | Duration |
| - [BatchMaxDeltaTimestamp](#Sequencer_Finalizer_BatchMaxDeltaTimestamp ) | No | string | No | - | Duration |
| - [L2BlockMaxDeltaTimestamp](#Sequencer_Finalizer_L2BlockMaxDeltaTimestamp ) | No | string | No | - | Duration |
+| - [StateRootSyncInterval](#Sequencer_Finalizer_StateRootSyncInterval ) | No | string | No | - | Duration |
| - [HaltOnBatchNumber](#Sequencer_Finalizer_HaltOnBatchNumber ) | No | integer | No | - | HaltOnBatchNumber specifies the batch number where the Sequencer will stop to process more transactions and generate new batches.
The Sequencer will halt after it closes the batch equal to this number |
| - [SequentialBatchSanityCheck](#Sequencer_Finalizer_SequentialBatchSanityCheck ) | No | boolean | No | - | SequentialBatchSanityCheck indicates if the reprocess of a closed batch (sanity check) must be done in a
sequential way (instead than in parallel) |
| - [SequentialProcessL2Block](#Sequencer_Finalizer_SequentialProcessL2Block ) | No | boolean | No | - | SequentialProcessL2Block indicates if the processing of a L2 Block must be done in the same finalizer go func instead
in the processPendingL2Blocks go func |
@@ -2216,7 +2217,7 @@ ForcedBatchesCheckInterval="10s"
**Default:** `"10s"`
-**Description:** L1InfoTreeCheckInterval is the wait time to check if the L1InfoRoot has been updated
+**Description:** L1InfoTreeCheckInterval is the time interval to check if the L1InfoRoot has been updated
**Examples:**
@@ -2240,7 +2241,7 @@ L1InfoTreeCheckInterval="10s"
**Type:** : `string`
-**Default:** `"10s"`
+**Default:** `"30m0s"`
**Description:** BatchMaxDeltaTimestamp is the resolution of the timestamp used to close a batch
@@ -2254,10 +2255,10 @@ L1InfoTreeCheckInterval="10s"
"300ms"
```
-**Example setting the default value** ("10s"):
+**Example setting the default value** ("30m0s"):
```
[Sequencer.Finalizer]
-BatchMaxDeltaTimestamp="10s"
+BatchMaxDeltaTimestamp="30m0s"
```
#### 10.7.9. `Sequencer.Finalizer.L2BlockMaxDeltaTimestamp`
@@ -2286,7 +2287,34 @@ BatchMaxDeltaTimestamp="10s"
L2BlockMaxDeltaTimestamp="3s"
```
-#### 10.7.10. `Sequencer.Finalizer.HaltOnBatchNumber`
+#### 10.7.10. `Sequencer.Finalizer.StateRootSyncInterval`
+
+**Title:** Duration
+
+**Type:** : `string`
+
+**Default:** `"1h0m0s"`
+
+**Description:** StateRootSyncInterval indicates how often the stateroot generated by the L2 block process will be synchronized with
+the stateroot used in the tx-by-tx execution
+
+**Examples:**
+
+```json
+"1m"
+```
+
+```json
+"300ms"
+```
+
+**Example setting the default value** ("1h0m0s"):
+```
+[Sequencer.Finalizer]
+StateRootSyncInterval="1h0m0s"
+```
+
+#### 10.7.11. `Sequencer.Finalizer.HaltOnBatchNumber`
**Type:** : `integer`
@@ -2301,7 +2329,7 @@ The Sequencer will halt after it closes the batch equal to this number
HaltOnBatchNumber=0
```
-#### 10.7.11. `Sequencer.Finalizer.SequentialBatchSanityCheck`
+#### 10.7.12. `Sequencer.Finalizer.SequentialBatchSanityCheck`
**Type:** : `boolean`
@@ -2316,22 +2344,22 @@ sequential way (instead than in parallel)
SequentialBatchSanityCheck=false
```
-#### 10.7.12. `Sequencer.Finalizer.SequentialProcessL2Block`
+#### 10.7.13. `Sequencer.Finalizer.SequentialProcessL2Block`
**Type:** : `boolean`
-**Default:** `true`
+**Default:** `false`
**Description:** SequentialProcessL2Block indicates if the processing of a L2 Block must be done in the same finalizer go func instead
in the processPendingL2Blocks go func
-**Example setting the default value** (true):
+**Example setting the default value** (false):
```
[Sequencer.Finalizer]
-SequentialProcessL2Block=true
+SequentialProcessL2Block=false
```
-#### 10.7.13. `[Sequencer.Finalizer.Metrics]`
+#### 10.7.14. `[Sequencer.Finalizer.Metrics]`
**Type:** : `object`
**Description:** Metrics is the config for the sequencer metrics
@@ -2341,7 +2369,7 @@ SequentialProcessL2Block=true
| - [Interval](#Sequencer_Finalizer_Metrics_Interval ) | No | string | No | - | Duration |
| - [EnableLog](#Sequencer_Finalizer_Metrics_EnableLog ) | No | boolean | No | - | EnableLog is a flag to enable/disable metrics logs |
-##### 10.7.13.1. `Sequencer.Finalizer.Metrics.Interval`
+##### 10.7.14.1. `Sequencer.Finalizer.Metrics.Interval`
**Title:** Duration
@@ -2367,7 +2395,7 @@ SequentialProcessL2Block=true
Interval="1h0m0s"
```
-##### 10.7.13.2. `Sequencer.Finalizer.Metrics.EnableLog`
+##### 10.7.14.2. `Sequencer.Finalizer.Metrics.EnableLog`
**Type:** : `boolean`
diff --git a/docs/config-file/node-config-schema.json b/docs/config-file/node-config-schema.json
index 021f2859bb..dffb74f672 100644
--- a/docs/config-file/node-config-schema.json
+++ b/docs/config-file/node-config-schema.json
@@ -825,7 +825,7 @@
"L1InfoTreeCheckInterval": {
"type": "string",
"title": "Duration",
- "description": "L1InfoTreeCheckInterval is the wait time to check if the L1InfoRoot has been updated",
+ "description": "L1InfoTreeCheckInterval is the time interval to check if the L1InfoRoot has been updated",
"default": "10s",
"examples": [
"1m",
@@ -836,7 +836,7 @@
"type": "string",
"title": "Duration",
"description": "BatchMaxDeltaTimestamp is the resolution of the timestamp used to close a batch",
- "default": "10s",
+ "default": "30m0s",
"examples": [
"1m",
"300ms"
@@ -852,6 +852,16 @@
"300ms"
]
},
+ "StateRootSyncInterval": {
+ "type": "string",
+ "title": "Duration",
+ "description": "StateRootSyncInterval indicates how often the stateroot generated by the L2 block process will be synchronized with\nthe stateroot used in the tx-by-tx execution",
+ "default": "1h0m0s",
+ "examples": [
+ "1m",
+ "300ms"
+ ]
+ },
"HaltOnBatchNumber": {
"type": "integer",
"description": "HaltOnBatchNumber specifies the batch number where the Sequencer will stop to process more transactions and generate new batches.\nThe Sequencer will halt after it closes the batch equal to this number",
@@ -865,7 +875,7 @@
"SequentialProcessL2Block": {
"type": "boolean",
"description": "SequentialProcessL2Block indicates if the processing of a L2 Block must be done in the same finalizer go func instead\nin the processPendingL2Blocks go func",
- "default": true
+ "default": false
},
"Metrics": {
"properties": {
diff --git a/event/event.go b/event/event.go
index e6a72799ce..6e486e21ad 100644
--- a/event/event.go
+++ b/event/event.go
@@ -50,6 +50,8 @@ const (
EventID_ReservedZKCountersOverflow EventID = "RESERVED ZKCOUNTERS OVERFLOW"
// EventID_InvalidInfoRoot is triggered when an invalid l1InfoRoot was synced
EventID_InvalidInfoRoot EventID = "INVALID INFOROOT"
+ // EventID_L2BlockReorg is triggered when a L2 block reorg has happened in the sequencer
+ EventID_L2BlockReorg EventID = "L2 BLOCK REORG"
// Source_Node is the source of the event
Source_Node Source = "node"
diff --git a/go.mod b/go.mod
index 5205922dcd..db2f719558 100644
--- a/go.mod
+++ b/go.mod
@@ -3,7 +3,7 @@ module github.com/0xPolygonHermez/zkevm-node
go 1.21
require (
- github.com/0xPolygonHermez/zkevm-data-streamer v0.2.3-0.20240422135400-0df0d27226b3
+ github.com/0xPolygonHermez/zkevm-data-streamer v0.2.3-0.20240426122934-6f47d2485fc1
github.com/didip/tollbooth/v6 v6.1.2
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127
github.com/ethereum/go-ethereum v1.13.11
diff --git a/go.sum b/go.sum
index 3bb8e0138d..a9de27ba91 100644
--- a/go.sum
+++ b/go.sum
@@ -39,8 +39,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/0xPolygonHermez/zkevm-data-streamer v0.2.3-0.20240422135400-0df0d27226b3 h1:g5IMJalQxVRNfnXrzQG7bx2COktaFBf1mNuF4SLuQss=
-github.com/0xPolygonHermez/zkevm-data-streamer v0.2.3-0.20240422135400-0df0d27226b3/go.mod h1:0QkAXcFa92mFJrCbN3UPUJGJYes851yEgYHLONnaosE=
+github.com/0xPolygonHermez/zkevm-data-streamer v0.2.3-0.20240426122934-6f47d2485fc1 h1:4wbCJOGcZ8BTuOfNFrcZ1cAVfTWaX1W9EYHaDx3imLc=
+github.com/0xPolygonHermez/zkevm-data-streamer v0.2.3-0.20240426122934-6f47d2485fc1/go.mod h1:0QkAXcFa92mFJrCbN3UPUJGJYes851yEgYHLONnaosE=
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
diff --git a/sequencer/addrqueue.go b/sequencer/addrqueue.go
index 9c0d8d996e..3b2d4847c9 100644
--- a/sequencer/addrqueue.go
+++ b/sequencer/addrqueue.go
@@ -121,22 +121,25 @@ func (a *addrQueue) IsEmpty() bool {
}
// deleteTx deletes the tx from the addrQueue
-func (a *addrQueue) deleteTx(txHash common.Hash) (deletedReadyTx *TxTracker) {
+func (a *addrQueue) deleteTx(txHash common.Hash) (deletedTx *TxTracker, isReady bool) {
txHashStr := txHash.String()
if (a.readyTx != nil) && (a.readyTx.HashStr == txHashStr) {
log.Infof("deleting readyTx %s from addrQueue %s", txHashStr, a.fromStr)
prevReadyTx := a.readyTx
a.readyTx = nil
- return prevReadyTx
+ return prevReadyTx, true
} else {
+ var deletedTx *TxTracker
for _, txTracker := range a.notReadyTxs {
if txTracker.HashStr == txHashStr {
+ deletedTx = txTracker
log.Infof("deleting notReadyTx %s from addrQueue %s", txHashStr, a.fromStr)
delete(a.notReadyTxs, txTracker.Nonce)
+ break
}
}
- return nil
+ return deletedTx, false
}
}
@@ -158,6 +161,22 @@ func (a *addrQueue) deletePendingTxToStore(txHash common.Hash) {
}
}
+func (a *addrQueue) getTransactions() []*TxTracker {
+ // TODO: Add test for this function
+
+ txsList := []*TxTracker{}
+
+ if a.readyTx != nil {
+ txsList = append(txsList, a.readyTx)
+ }
+
+ for _, tx := range a.notReadyTxs {
+ txsList = append(txsList, tx)
+ }
+
+ return txsList
+}
+
// updateCurrentNonceBalance updates the nonce and balance of the addrQueue and updates the ready and notReady txs
func (a *addrQueue) updateCurrentNonceBalance(nonce *uint64, balance *big.Int) (newReadyTx, prevReadyTx *TxTracker, toDelete []*TxTracker) {
var oldReadyTx *TxTracker = nil
@@ -179,7 +198,7 @@ func (a *addrQueue) updateCurrentNonceBalance(nonce *uint64, balance *big.Int) (
}
}
for _, txTracker := range txsToDelete {
- log.Infof("deleting notReadyTx with nonce %d from addrQueue %s", txTracker.Nonce, a.fromStr)
+ log.Infof("deleting notReadyTx with nonce %d from addrQueue %s, reason: %s", txTracker.Nonce, a.fromStr, *txTracker.FailedReason)
delete(a.notReadyTxs, txTracker.Nonce)
}
}
diff --git a/sequencer/addrqueue_test.go b/sequencer/addrqueue_test.go
index d39ce5a356..a04e0ee793 100644
--- a/sequencer/addrqueue_test.go
+++ b/sequencer/addrqueue_test.go
@@ -164,11 +164,11 @@ func TestAddrQueue(t *testing.T) {
t.Run("Delete readyTx 0x01", func(t *testing.T) {
tc := addTxTestCases[2]
tx := newTestTxTracker(tc.hash, tc.nonce, tc.gasPrice, tc.cost)
- deltx := addr.deleteTx(tx.Hash)
+ deltx, isReady := addr.deleteTx(tx.Hash)
if !(addr.readyTx == nil) {
t.Fatalf("Error readyTx not nil. Expected=%s, Actual=%s", "", addr.readyTx.HashStr)
}
- if !(deltx.HashStr == tx.HashStr) {
+ if !isReady || !(deltx.HashStr == tx.HashStr) {
t.Fatalf("Error returning deletedReadyTx. Expected=%s, Actual=%s", tx.HashStr, deltx.HashStr)
}
})
diff --git a/sequencer/batch.go b/sequencer/batch.go
index 1d644b22c5..ecaa93aca7 100644
--- a/sequencer/batch.go
+++ b/sequencer/batch.go
@@ -11,25 +11,28 @@ import (
"github.com/0xPolygonHermez/zkevm-node/state"
stateMetrics "github.com/0xPolygonHermez/zkevm-node/state/metrics"
"github.com/ethereum/go-ethereum/common"
+ "github.com/jackc/pgx/v4"
)
// Batch represents a wip or processed batch.
type Batch struct {
- batchNumber uint64
- coinbase common.Address
- timestamp time.Time
- initialStateRoot common.Hash // initial stateRoot of the batch
- imStateRoot common.Hash // intermediate stateRoot when processing tx-by-tx
- finalStateRoot common.Hash // final stateroot of the batch when a L2 block is processed
- countOfTxs int
- countOfL2Blocks int
- imRemainingResources state.BatchResources // remaining batch resources when processing tx-by-tx
- finalRemainingResources state.BatchResources // remaining batch resources when a L2 block is processed
- closingReason state.ClosingReason
+ batchNumber uint64
+ coinbase common.Address
+ timestamp time.Time
+ initialStateRoot common.Hash // initial stateRoot of the batch
+ imStateRoot common.Hash // intermediate stateRoot when processing tx-by-tx
+ finalStateRoot common.Hash // final stateroot of the batch when a L2 block is processed
+ countOfTxs int
+ countOfL2Blocks int
+ imRemainingResources state.BatchResources // remaining batch resources when processing tx-by-tx
+ imHighReservedZKCounters state.ZKCounters
+ finalRemainingResources state.BatchResources // remaining batch resources when a L2 block is processed
+ finalHighReservedZKCounters state.ZKCounters
+ closingReason state.ClosingReason
}
-func (w *Batch) isEmpty() bool {
- return w.countOfL2Blocks == 0
+func (b *Batch) isEmpty() bool {
+ return b.countOfL2Blocks == 0
}
// processBatchesPendingtoCheck performs a sanity check for batches closed but pending to be checked
@@ -77,23 +80,25 @@ func (f *finalizer) setWIPBatch(ctx context.Context, wipStateBatch *state.Batch)
wipStateBatchCountOfTxs = wipStateBatchCountOfTxs + len(rawBlock.Transactions)
}
- remainingResources := getMaxRemainingResources(f.batchConstraints)
+ remainingResources := getMaxBatchResources(f.batchConstraints)
overflow, overflowResource := remainingResources.Sub(wipStateBatch.Resources)
if overflow {
- return nil, fmt.Errorf("failed to subtract used resources when setting the WIP batch to the state batch %d, overflow resource: %s", wipStateBatch.BatchNumber, overflowResource)
+ return nil, fmt.Errorf("failed to subtract used resources when setting the wip batch to the state batch %d, overflow resource: %s", wipStateBatch.BatchNumber, overflowResource)
}
wipBatch := &Batch{
- batchNumber: wipStateBatch.BatchNumber,
- coinbase: wipStateBatch.Coinbase,
- imStateRoot: wipStateBatch.StateRoot,
- initialStateRoot: prevStateBatch.StateRoot,
- finalStateRoot: wipStateBatch.StateRoot,
- timestamp: wipStateBatch.Timestamp,
- countOfL2Blocks: len(wipStateBatchBlocks.Blocks),
- countOfTxs: wipStateBatchCountOfTxs,
- imRemainingResources: remainingResources,
- finalRemainingResources: remainingResources,
+ batchNumber: wipStateBatch.BatchNumber,
+ coinbase: wipStateBatch.Coinbase,
+ imStateRoot: wipStateBatch.StateRoot,
+ initialStateRoot: prevStateBatch.StateRoot,
+ finalStateRoot: wipStateBatch.StateRoot,
+ timestamp: wipStateBatch.Timestamp,
+ countOfL2Blocks: len(wipStateBatchBlocks.Blocks),
+ countOfTxs: wipStateBatchCountOfTxs,
+ imRemainingResources: remainingResources,
+ finalRemainingResources: remainingResources,
+ imHighReservedZKCounters: wipStateBatch.HighReservedZKCounters,
+ finalHighReservedZKCounters: wipStateBatch.HighReservedZKCounters,
}
return wipBatch, nil
@@ -125,22 +130,52 @@ func (f *finalizer) initWIPBatch(ctx context.Context) {
if lastStateBatch.BatchNumber+1 == f.cfg.HaltOnBatchNumber {
f.Halt(ctx, fmt.Errorf("finalizer reached stop sequencer on batch number: %d", f.cfg.HaltOnBatchNumber), false)
}
-
- f.wipBatch, err = f.openNewWIPBatch(ctx, lastStateBatch.BatchNumber+1, lastStateBatch.StateRoot)
- if err != nil {
- log.Fatalf("failed to open new wip batch, error: %v", err)
- }
- } else { /// if it's not closed, it is the wip state batch, set it as wip batch in the finalizer
+ f.wipBatch = f.openNewWIPBatch(lastStateBatch.BatchNumber+1, lastStateBatch.StateRoot)
+ f.pipBatch = nil
+ f.sipBatch = nil
+ } else { /// if it's not closed, it is the wip/pip/sip batch
f.wipBatch, err = f.setWIPBatch(ctx, lastStateBatch)
if err != nil {
log.Fatalf("failed to set wip batch, error: %v", err)
}
+ f.pipBatch = f.wipBatch
+ f.sipBatch = f.wipBatch
}
log.Infof("initial batch: %d, initialStateRoot: %s, stateRoot: %s, coinbase: %s",
f.wipBatch.batchNumber, f.wipBatch.initialStateRoot, f.wipBatch.finalStateRoot, f.wipBatch.coinbase)
}
+func (f *finalizer) processL2BlockReorg(ctx context.Context) error {
+ f.waitPendingL2Blocks()
+
+ if f.sipBatch != nil && f.sipBatch.batchNumber != f.wipBatch.batchNumber {
+ // If the sip batch is the previous to the current wip batch and it's still open these means that the L2 block that caused
+ // the reorg is the first L2 block of the wip batch, therefore we need to close sip batch before to continue.
+ // If we don't close the sip batch the initWIPBatch function will load the sip batch as the initial one and when trying to reprocess
+ // the first tx reorged we can have a batch resource overflow (if we have closed the sip batch for this reason) and we will return
+ // the reorged tx to the worker (calling UpdateTxZKCounters) missing the order in which we need to reprocess the reorged txs
+
+ err := f.finalizeSIPBatch(ctx)
+ if err != nil {
+ return fmt.Errorf("error finalizing sip batch, error: %v", err)
+ }
+ }
+
+ f.workerIntf.RestoreTxsPendingToStore(ctx)
+
+ f.initWIPBatch(ctx)
+
+ f.initWIPL2Block(ctx)
+
+ // Since when processing the L2 block reorg we sync the state root we can reset next state root syncing
+ f.scheduleNextStateRootSync()
+
+ f.l2BlockReorg.Store(false)
+
+ return nil
+}
+
// finalizeWIPBatch closes the current batch and opens a new one, potentially processing forced batches between the batch is closed and the resulting new empty batch
func (f *finalizer) finalizeWIPBatch(ctx context.Context, closeReason state.ClosingReason) {
prevTimestamp := f.wipL2Block.timestamp
@@ -153,7 +188,7 @@ func (f *finalizer) finalizeWIPBatch(ctx context.Context, closeReason state.Clos
err := f.closeAndOpenNewWIPBatch(ctx, closeReason)
if err != nil {
- f.Halt(ctx, fmt.Errorf("failed to create new WIP batch, error: %v", err), true)
+ f.Halt(ctx, fmt.Errorf("failed to create new wip batch, error: %v", err), true)
}
// If we have closed the wipL2Block then we open a new one
@@ -162,88 +197,126 @@ func (f *finalizer) finalizeWIPBatch(ctx context.Context, closeReason state.Clos
}
}
+// finalizeSIPBatch closes the current store-in-progress batch
+func (f *finalizer) finalizeSIPBatch(ctx context.Context) error {
+ dbTx, err := f.stateIntf.BeginStateTransaction(ctx)
+ if err != nil {
+ return fmt.Errorf("error creating db transaction to close sip batch %d, error: %v", f.sipBatch.batchNumber, err)
+ }
+
+ // Close sip batch (close in statedb)
+ err = f.closeSIPBatch(ctx, dbTx)
+ if err != nil {
+ return fmt.Errorf("failed to close sip batch %d, error: %v", f.sipBatch.batchNumber, err)
+ }
+
+ if err != nil {
+ rollbackErr := dbTx.Rollback(ctx)
+ if rollbackErr != nil {
+ return fmt.Errorf("error when rollback db transaction to close sip batch %d, error: %v", f.sipBatch.batchNumber, rollbackErr)
+ }
+ return err
+ }
+
+ err = dbTx.Commit(ctx)
+ if err != nil {
+ return fmt.Errorf("error when commit db transaction to close sip batch %d, error: %v", f.sipBatch.batchNumber, err)
+ }
+
+ return nil
+}
+
// closeAndOpenNewWIPBatch closes the current batch and opens a new one, potentially processing forced batches between the batch is closed and the resulting new wip batch
func (f *finalizer) closeAndOpenNewWIPBatch(ctx context.Context, closeReason state.ClosingReason) error {
f.nextForcedBatchesMux.Lock()
processForcedBatches := len(f.nextForcedBatches) > 0
f.nextForcedBatchesMux.Unlock()
- // If we will process forced batches after we close the wip batch then we must close the current wip L2 block,
- // since the processForcedBatches function needs to create new L2 blocks (cannot "reuse" the current wip L2 block if it's empty)
+ f.wipBatch.closingReason = closeReason
+
+ var lastStateRoot common.Hash
+
+ //TODO: review forced batches implementation since is not good "idea" to check here for forced batches, maybe is better to do it on finalizeBatches loop
if processForcedBatches {
+ // If we have reach the time to sync stateroot or we will process forced batches we must close the current wip L2 block and wip batch
f.closeWIPL2Block(ctx)
- }
+ // We need to wait that all pending L2 blocks are processed and stored
+ f.waitPendingL2Blocks()
- // Wait until all L2 blocks are processed by the executor
- startWait := time.Now()
- f.pendingL2BlocksToProcessWG.Wait()
- elapsed := time.Since(startWait)
- log.Debugf("waiting for pending L2 blocks to be processed took: %v", elapsed)
+ lastStateRoot = f.sipBatch.finalStateRoot
- // Wait until all L2 blocks are store
- startWait = time.Now()
- f.pendingL2BlocksToStoreWG.Wait()
- log.Debugf("waiting for pending L2 blocks to be stored took: %v", time.Since(startWait))
+ err := f.finalizeSIPBatch(ctx)
+ if err != nil {
+ return fmt.Errorf("error finalizing sip batch %d when processing forced batches, error: %v", f.sipBatch.batchNumber, err)
+ }
+ } else {
+ lastStateRoot = f.wipBatch.imStateRoot
+ }
- f.wipBatch.closingReason = closeReason
+ // Close the wip batch. After will close them f.wipBatch will be nil, therefore we store in local variables the info we need from the f.wipBatch
+ lastBatchNumber := f.wipBatch.batchNumber
- // Close the wip batch
- var err error
- err = f.closeWIPBatch(ctx)
- if err != nil {
- return fmt.Errorf("failed to close batch, error: %v", err)
- }
+ f.closeWIPBatch(ctx)
- log.Infof("batch %d closed, closing reason: %s", f.wipBatch.batchNumber, closeReason)
+ if lastBatchNumber+1 == f.cfg.HaltOnBatchNumber {
+ f.waitPendingL2Blocks()
- // Reprocess full batch as sanity check
- if f.cfg.SequentialBatchSanityCheck {
- // Do the full batch reprocess now
- _, _ = f.batchSanityCheck(ctx, f.wipBatch.batchNumber, f.wipBatch.initialStateRoot, f.wipBatch.finalStateRoot)
- } else {
- // Do the full batch reprocess in parallel
- go func() {
- _, _ = f.batchSanityCheck(ctx, f.wipBatch.batchNumber, f.wipBatch.initialStateRoot, f.wipBatch.finalStateRoot)
- }()
- }
+ // We finalize the current sip batch
+ err := f.finalizeSIPBatch(ctx)
+ if err != nil {
+ return fmt.Errorf("error finalizing sip batch %d when halting on batch %d", f.sipBatch.batchNumber, f.cfg.HaltOnBatchNumber)
+ }
- if f.wipBatch.batchNumber+1 == f.cfg.HaltOnBatchNumber {
f.Halt(ctx, fmt.Errorf("finalizer reached stop sequencer on batch number: %d", f.cfg.HaltOnBatchNumber), false)
}
- // Metadata for the next batch
- stateRoot := f.wipBatch.finalStateRoot
- lastBatchNumber := f.wipBatch.batchNumber
-
// Process forced batches
if processForcedBatches {
- lastBatchNumber, stateRoot = f.processForcedBatches(ctx, lastBatchNumber, stateRoot)
- // We must init/reset the wip L2 block from the state since processForcedBatches can created new L2 blocks
- f.initWIPL2Block(ctx)
+ lastBatchNumber, lastStateRoot = f.processForcedBatches(ctx, lastBatchNumber, lastStateRoot)
}
- f.wipBatch, err = f.openNewWIPBatch(ctx, lastBatchNumber+1, stateRoot)
- if err != nil {
- return fmt.Errorf("failed to open new wip batch, error: %v", err)
- }
+ f.wipBatch = f.openNewWIPBatch(lastBatchNumber+1, lastStateRoot)
- if f.wipL2Block != nil {
+ if processForcedBatches {
+ // We need to init/reset the wip L2 block in case we have processed forced batches
+ f.initWIPL2Block(ctx)
+ } else if f.wipL2Block != nil {
+ // If we are "reusing" the wip L2 block because it's empty we assign it to the new wip batch
f.wipBatch.imStateRoot = f.wipL2Block.imStateRoot
- // Subtract the WIP L2 block used resources to batch
- overflow, overflowResource := f.wipBatch.imRemainingResources.Sub(state.BatchResources{ZKCounters: f.wipL2Block.usedZKCounters, Bytes: f.wipL2Block.bytes})
+ f.wipL2Block.batch = f.wipBatch
+
+ // We subtract the wip L2 block used resources to the new wip batch
+ overflow, overflowResource := f.wipBatch.imRemainingResources.Sub(state.BatchResources{ZKCounters: f.wipL2Block.usedZKCountersOnNew, Bytes: f.wipL2Block.bytes})
if overflow {
return fmt.Errorf("failed to subtract L2 block [%d] used resources to new wip batch %d, overflow resource: %s",
f.wipL2Block.trackingNum, f.wipBatch.batchNumber, overflowResource)
}
}
- log.Infof("new WIP batch %d", f.wipBatch.batchNumber)
+ log.Infof("new wip batch %d", f.wipBatch.batchNumber)
return nil
}
// openNewWIPBatch opens a new batch in the state and returns it as WipBatch
-func (f *finalizer) openNewWIPBatch(ctx context.Context, batchNumber uint64, stateRoot common.Hash) (*Batch, error) {
+func (f *finalizer) openNewWIPBatch(batchNumber uint64, stateRoot common.Hash) *Batch {
+ maxRemainingResources := getMaxBatchResources(f.batchConstraints)
+
+ return &Batch{
+ batchNumber: batchNumber,
+ coinbase: f.sequencerAddress,
+ initialStateRoot: stateRoot,
+ imStateRoot: stateRoot,
+ finalStateRoot: stateRoot,
+ timestamp: now(),
+ imRemainingResources: maxRemainingResources,
+ finalRemainingResources: maxRemainingResources,
+ closingReason: state.EmptyClosingReason,
+ }
+}
+
+// insertSIPBatch inserts a new state-in-progress batch in the state db
+func (f *finalizer) insertSIPBatch(ctx context.Context, batchNumber uint64, stateRoot common.Hash, dbTx pgx.Tx) error {
// open next batch
newStateBatch := state.Batch{
BatchNumber: batchNumber,
@@ -254,82 +327,83 @@ func (f *finalizer) openNewWIPBatch(ctx context.Context, batchNumber uint64, sta
LocalExitRoot: state.ZeroHash,
}
- dbTx, err := f.stateIntf.BeginStateTransaction(ctx)
- if err != nil {
- return nil, fmt.Errorf("failed to begin state transaction to open batch, error: %v", err)
- }
-
// OpenBatch opens a new wip batch in the state
- err = f.stateIntf.OpenWIPBatch(ctx, newStateBatch, dbTx)
+ //TODO: rename OpenWipBatch to InsertBatch
+ err := f.stateIntf.OpenWIPBatch(ctx, newStateBatch, dbTx)
if err != nil {
- if rollbackErr := dbTx.Rollback(ctx); rollbackErr != nil {
- return nil, fmt.Errorf("failed to rollback due to error when open a new wip batch, rollback error: %v, error: %v", rollbackErr, err)
- }
- return nil, fmt.Errorf("failed to open new wip batch, error: %v", err)
- }
-
- if err := dbTx.Commit(ctx); err != nil {
- return nil, fmt.Errorf("failed to commit database transaction for opening a wip batch, error: %v", err)
+ return fmt.Errorf("failed to insert new batch in state db, error: %v", err)
}
// Send batch bookmark to the datastream
f.DSSendBatchBookmark(batchNumber)
// Check if synchronizer is up-to-date
+ //TODO: review if this is needed
for !f.isSynced(ctx) {
log.Info("wait for synchronizer to sync last batch")
time.Sleep(time.Second)
}
- maxRemainingResources := getMaxRemainingResources(f.batchConstraints)
-
- return &Batch{
- batchNumber: newStateBatch.BatchNumber,
- coinbase: newStateBatch.Coinbase,
- initialStateRoot: newStateBatch.StateRoot,
- imStateRoot: newStateBatch.StateRoot,
- finalStateRoot: newStateBatch.StateRoot,
- timestamp: newStateBatch.Timestamp,
- imRemainingResources: maxRemainingResources,
- finalRemainingResources: maxRemainingResources,
- closingReason: state.EmptyClosingReason,
- }, err
+ return nil
}
-// closeWIPBatch closes the current batch in the state
-func (f *finalizer) closeWIPBatch(ctx context.Context) error {
+// closeWIPBatch closes the current wip batch
+func (f *finalizer) closeWIPBatch(ctx context.Context) {
// Sanity check: batch must not be empty (should have L2 blocks)
if f.wipBatch.isEmpty() {
- f.Halt(ctx, fmt.Errorf("closing WIP batch %d without L2 blocks and should have at least 1", f.wipBatch.batchNumber), false)
+ f.Halt(ctx, fmt.Errorf("closing wip batch %d without L2 blocks and should have at least 1", f.wipBatch.batchNumber), false)
+ }
+
+ log.Infof("wip batch %d closed, closing reason: %s", f.wipBatch.batchNumber, f.wipBatch.closingReason)
+
+ f.wipBatch = nil
+}
+
+// closeSIPBatch closes the current sip batch in the state
+func (f *finalizer) closeSIPBatch(ctx context.Context, dbTx pgx.Tx) error {
+ // Sanity check: this can't happen
+ if f.sipBatch == nil {
+ f.Halt(ctx, fmt.Errorf("closing sip batch that is nil"), false)
}
- usedResources := getUsedBatchResources(f.batchConstraints, f.wipBatch.imRemainingResources)
+ // Sanity check: batch must not be empty (should have L2 blocks)
+ if f.sipBatch.isEmpty() {
+ f.Halt(ctx, fmt.Errorf("closing sip batch %d without L2 blocks and should have at least 1", f.sipBatch.batchNumber), false)
+ }
+
+ usedResources := getUsedBatchResources(f.batchConstraints, f.sipBatch.imRemainingResources)
receipt := state.ProcessingReceipt{
- BatchNumber: f.wipBatch.batchNumber,
+ BatchNumber: f.sipBatch.batchNumber,
BatchResources: usedResources,
- ClosingReason: f.wipBatch.closingReason,
+ ClosingReason: f.sipBatch.closingReason,
}
- dbTx, err := f.stateIntf.BeginStateTransaction(ctx)
+ err := f.stateIntf.CloseWIPBatch(ctx, receipt, dbTx)
+
if err != nil {
return err
}
- err = f.stateIntf.CloseWIPBatch(ctx, receipt, dbTx)
- if err != nil {
- rollbackErr := dbTx.Rollback(ctx)
- if rollbackErr != nil {
- log.Errorf("error rolling back due to error when closing wip batch, rollback error: %v, error: %v", rollbackErr, err)
- }
- return err
+ // We store values needed for the batch sanity check in local variables, as we can execute the sanity check in a go func (parallel) and in this case f.sipBatch will be nil during some time
+ batchNumber := f.sipBatch.batchNumber
+ initialStateRoot := f.sipBatch.initialStateRoot
+ finalStateRoot := f.sipBatch.finalStateRoot
+
+ // Reprocess full batch as sanity check
+ if f.cfg.SequentialBatchSanityCheck {
+ // Do the full batch reprocess now
+ _, _ = f.batchSanityCheck(ctx, batchNumber, initialStateRoot, finalStateRoot)
} else {
- err := dbTx.Commit(ctx)
- if err != nil {
- log.Errorf("error committing close wip batch, error: %v", err)
- return err
- }
+ // Do the full batch reprocess in parallel
+ go func() {
+ _, _ = f.batchSanityCheck(ctx, batchNumber, initialStateRoot, finalStateRoot)
+ }()
}
+ log.Infof("sip batch %d closed in statedb, closing reason: %s", f.sipBatch.batchNumber, f.sipBatch.closingReason)
+
+ f.sipBatch = nil
+
return nil
}
@@ -353,7 +427,7 @@ func (f *finalizer) batchSanityCheck(ctx context.Context, batchNum uint64, initi
batchLog += fmt.Sprintf(" tx[%d]: %s, egpPct: %d\n", txIdx, rawTx.Tx.Hash(), rawTx.EfficiencyPercentage)
}
}
- log.Infof("DUMP batch %d, blocks: %d, txs: %d\n%s", batch.BatchNumber, len(rawL2Blocks.Blocks), totalTxs, batchLog)
+ log.Infof("dump batch %d, blocks: %d, txs: %d\n%s", batch.BatchNumber, len(rawL2Blocks.Blocks), totalTxs, batchLog)
f.Halt(ctx, fmt.Errorf("batch sanity check error. Check previous errors in logs to know which was the cause"), false)
}
@@ -384,10 +458,8 @@ func (f *finalizer) batchSanityCheck(ctx context.Context, batchNum uint64, initi
return nil, ErrGetBatchByNumber
}
- var batchResponse *state.ProcessBatchResponse
-
startProcessing := time.Now()
- batchResponse, err = f.stateIntf.ProcessBatchV2(ctx, batchRequest, false)
+ batchResponse, contextid, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, false)
endProcessing := time.Now()
if err != nil {
@@ -429,9 +501,9 @@ func (f *finalizer) batchSanityCheck(ctx context.Context, batchNum uint64, initi
return nil, ErrUpdateBatchAsChecked
}
- log.Infof("successful sanity check for batch %d, initialStateRoot: %s, stateRoot: %s, l2Blocks: %d, time: %v, used counters: %s",
+ log.Infof("successful sanity check for batch %d, initialStateRoot: %s, stateRoot: %s, l2Blocks: %d, time: %v, used counters: %s, contextId: %s",
batch.BatchNumber, initialStateRoot, batchResponse.NewStateRoot.String(), len(batchResponse.BlockResponses),
- endProcessing.Sub(startProcessing), f.logZKCounters(batchResponse.UsedZkCounters))
+ endProcessing.Sub(startProcessing), f.logZKCounters(batchResponse.UsedZkCounters), contextid)
return batchResponse, nil
}
@@ -509,8 +581,8 @@ func getUsedBatchResources(constraints state.BatchConstraintsCfg, remainingResou
}
}
-// getMaxRemainingResources returns the max resources that can be used in a batch
-func getMaxRemainingResources(constraints state.BatchConstraintsCfg) state.BatchResources {
+// getMaxBatchResources returns the max resources that can be used in a batch
+func getMaxBatchResources(constraints state.BatchConstraintsCfg) state.BatchResources {
return state.BatchResources{
ZKCounters: state.ZKCounters{
GasUsed: constraints.MaxCumulativeGasUsed,
@@ -527,6 +599,51 @@ func getMaxRemainingResources(constraints state.BatchConstraintsCfg) state.Batch
}
}
+// getNeededZKCounters returns the needed counters to fit a tx in the wip batch. The needed counters are the counters used by the tx plus the high reserved counters.
+// It will take into account the current high reserved counter got with previous txs but also checking reserved counters diff needed by this tx, since could be greater.
+func getNeededZKCounters(highReservedCounters state.ZKCounters, usedCounters state.ZKCounters, reservedCounters state.ZKCounters) (state.ZKCounters, state.ZKCounters) {
+ neededCounter := func(counterName string, highCounter uint32, usedCounter uint32, reservedCounter uint32) (uint32, uint32) {
+ if reservedCounter < usedCounter {
+ log.Warnf("%s reserved counter %d is less than used counter %d, this shouldn't be possible", counterName, reservedCounter, usedCounter)
+ return usedCounter + highCounter, highCounter
+ }
+ diffReserved := reservedCounter - usedCounter
+ if diffReserved > highCounter { // reserved counter for this tx (difference) is greater that the high reserved counter got in previous txs
+ return usedCounter + diffReserved, diffReserved
+ } else {
+ return usedCounter + highCounter, highCounter
+ }
+ }
+
+ needed := state.ZKCounters{}
+ newHigh := state.ZKCounters{}
+
+ needed.Arithmetics, newHigh.Arithmetics = neededCounter("Arithmetics", highReservedCounters.Arithmetics, usedCounters.Arithmetics, reservedCounters.Arithmetics)
+ needed.Binaries, newHigh.Binaries = neededCounter("Binaries", highReservedCounters.Binaries, usedCounters.Binaries, reservedCounters.Binaries)
+ needed.KeccakHashes, newHigh.KeccakHashes = neededCounter("KeccakHashes", highReservedCounters.KeccakHashes, usedCounters.KeccakHashes, reservedCounters.KeccakHashes)
+ needed.MemAligns, newHigh.MemAligns = neededCounter("MemAligns", highReservedCounters.MemAligns, usedCounters.MemAligns, reservedCounters.MemAligns)
+ needed.PoseidonHashes, newHigh.PoseidonHashes = neededCounter("PoseidonHashes", highReservedCounters.PoseidonHashes, usedCounters.PoseidonHashes, reservedCounters.PoseidonHashes)
+ needed.PoseidonPaddings, newHigh.PoseidonPaddings = neededCounter("PoseidonPaddings", highReservedCounters.PoseidonPaddings, usedCounters.PoseidonPaddings, reservedCounters.PoseidonPaddings)
+ needed.Sha256Hashes_V2, newHigh.Sha256Hashes_V2 = neededCounter("Sha256Hashes_V2", highReservedCounters.Sha256Hashes_V2, usedCounters.Sha256Hashes_V2, reservedCounters.Sha256Hashes_V2)
+ needed.Steps, newHigh.Steps = neededCounter("Steps", highReservedCounters.Steps, usedCounters.Steps, reservedCounters.Steps)
+
+ if reservedCounters.GasUsed < usedCounters.GasUsed {
+ log.Warnf("gasUsed reserved counter %d is less than used counter %d, this shouldn't be possible", reservedCounters.GasUsed, usedCounters.GasUsed)
+ needed.GasUsed = usedCounters.GasUsed + highReservedCounters.GasUsed
+ } else {
+ diffReserved := reservedCounters.GasUsed - usedCounters.GasUsed
+ if diffReserved > highReservedCounters.GasUsed {
+ needed.GasUsed = usedCounters.GasUsed + diffReserved
+ newHigh.GasUsed = diffReserved
+ } else {
+ needed.GasUsed = usedCounters.GasUsed + highReservedCounters.GasUsed
+ newHigh.GasUsed = highReservedCounters.GasUsed
+ }
+ }
+
+ return needed, newHigh
+}
+
// checkIfFinalizeBatch returns true if the batch must be closed due to a closing reason, also it returns the description of the close reason
func (f *finalizer) checkIfFinalizeBatch() (bool, state.ClosingReason) {
// Max txs per batch
diff --git a/sequencer/config.go b/sequencer/config.go
index 45210c4840..8b813c52db 100644
--- a/sequencer/config.go
+++ b/sequencer/config.go
@@ -70,7 +70,7 @@ type FinalizerCfg struct {
// ForcedBatchesCheckInterval is used by the closing signals manager to wait for its operation
ForcedBatchesCheckInterval types.Duration `mapstructure:"ForcedBatchesCheckInterval"`
- // L1InfoTreeCheckInterval is the wait time to check if the L1InfoRoot has been updated
+ // L1InfoTreeCheckInterval is the time interval to check if the L1InfoRoot has been updated
L1InfoTreeCheckInterval types.Duration `mapstructure:"L1InfoTreeCheckInterval"`
// BatchMaxDeltaTimestamp is the resolution of the timestamp used to close a batch
@@ -79,6 +79,10 @@ type FinalizerCfg struct {
// L2BlockMaxDeltaTimestamp is the resolution of the timestamp used to close a L2 block
L2BlockMaxDeltaTimestamp types.Duration `mapstructure:"L2BlockMaxDeltaTimestamp"`
+ // StateRootSyncInterval indicates how often the stateroot generated by the L2 block process will be synchronized with
+ // the stateroot used in the tx-by-tx execution
+ StateRootSyncInterval types.Duration `mapstructure:"StateRootSyncInterval"`
+
// HaltOnBatchNumber specifies the batch number where the Sequencer will stop to process more transactions and generate new batches.
// The Sequencer will halt after it closes the batch equal to this number
HaltOnBatchNumber uint64 `mapstructure:"HaltOnBatchNumber"`
diff --git a/sequencer/datastreamer.go b/sequencer/datastreamer.go
index bbbfe14496..7f5e7e763a 100644
--- a/sequencer/datastreamer.go
+++ b/sequencer/datastreamer.go
@@ -43,7 +43,7 @@ func (f *finalizer) DSSendL2Block(batchNumber uint64, blockResponse *state.Proce
l2Transactions = append(l2Transactions, l2Transaction)
}
- log.Infof("sending l2block %d to datastream channel", blockResponse.BlockNumber)
+ log.Infof("[ds-debug] sending l2block %d to datastream channel", blockResponse.BlockNumber)
f.dataToStream <- state.DSL2FullBlock{
DSL2Block: l2Block,
Txs: l2Transactions,
diff --git a/sequencer/finalizer.go b/sequencer/finalizer.go
index 64c28604ce..a06f2341fa 100644
--- a/sequencer/finalizer.go
+++ b/sequencer/finalizer.go
@@ -41,9 +41,13 @@ type finalizer struct {
stateIntf stateInterface
etherman ethermanInterface
wipBatch *Batch
+ pipBatch *Batch // processing-in-progress batch is the batch that is being processing (L2 block process)
+ sipBatch *Batch // storing-in-progress batch is the batch that is being stored/updated in the state db
wipL2Block *L2Block
batchConstraints state.BatchConstraintsCfg
haltFinalizer atomic.Bool
+ // stateroot sync
+ nextStateRootSync time.Time
// forced batches
nextForcedBatches []state.ForcedBatch
nextForcedBatchDeadline int64
@@ -60,10 +64,12 @@ type finalizer struct {
effectiveGasPrice *pool.EffectiveGasPrice
// pending L2 blocks to process (executor)
pendingL2BlocksToProcess chan *L2Block
- pendingL2BlocksToProcessWG *sync.WaitGroup
+ pendingL2BlocksToProcessWG *WaitGroupCount
+ l2BlockReorg atomic.Bool
+ lastL2BlockWasReorg bool
// pending L2 blocks to store in the state
pendingL2BlocksToStore chan *L2Block
- pendingL2BlocksToStoreWG *sync.WaitGroup
+ pendingL2BlocksToStoreWG *WaitGroupCount
// L2 block counter for tracking purposes
l2BlockCounter uint64
// executor flushid control
@@ -106,6 +112,8 @@ func newFinalizer(
stateIntf: stateIntf,
etherman: etherman,
batchConstraints: batchConstraints,
+ // stateroot sync
+ nextStateRootSync: time.Now().Add(cfg.StateRootSyncInterval.Duration),
// forced batches
nextForcedBatches: make([]state.ForcedBatch, 0),
nextForcedBatchDeadline: 0,
@@ -120,10 +128,10 @@ func newFinalizer(
effectiveGasPrice: pool.NewEffectiveGasPrice(poolCfg.EffectiveGasPrice),
// pending L2 blocks to process (executor)
pendingL2BlocksToProcess: make(chan *L2Block, pendingL2BlocksBufferSize),
- pendingL2BlocksToProcessWG: new(sync.WaitGroup),
+ pendingL2BlocksToProcessWG: new(WaitGroupCount),
// pending L2 blocks to store in the state
pendingL2BlocksToStore: make(chan *L2Block, pendingL2BlocksBufferSize),
- pendingL2BlocksToStoreWG: new(sync.WaitGroup),
+ pendingL2BlocksToStoreWG: new(WaitGroupCount),
storedFlushID: 0,
// executor flushid control
proverID: "",
@@ -139,6 +147,7 @@ func newFinalizer(
dataToStream: dataToStream,
}
+ f.l2BlockReorg.Store(false)
f.haltFinalizer.Store(false)
return &f
@@ -375,12 +384,19 @@ func (f *finalizer) finalizeBatches(ctx context.Context) {
log.Debug("finalizer init loop")
showNotFoundTxLog := true // used to log debug only the first message when there is no txs to process
for {
+ if f.l2BlockReorg.Load() {
+ err := f.processL2BlockReorg(ctx)
+ if err != nil {
+ log.Errorf("error processing L2 block reorg, error: %v", err)
+ }
+ }
+
// We have reached the L2 block time, we need to close the current L2 block and open a new one
- if f.wipL2Block.timestamp+uint64(f.cfg.L2BlockMaxDeltaTimestamp.Seconds()) <= uint64(time.Now().Unix()) {
+ if f.wipL2Block.createdAt.Add(f.cfg.L2BlockMaxDeltaTimestamp.Duration).Before(time.Now()) {
f.finalizeWIPL2Block(ctx)
}
- tx, err := f.workerIntf.GetBestFittingTx(f.wipBatch.imRemainingResources)
+ tx, err := f.workerIntf.GetBestFittingTx(f.wipBatch.imRemainingResources, f.wipBatch.imHighReservedZKCounters)
// If we have txs pending to process but none of them fits into the wip batch, we close the wip batch and open a new one
if err == ErrNoFittingTransaction {
@@ -394,8 +410,7 @@ func (f *finalizer) finalizeBatches(ctx context.Context) {
firstTxProcess := true
for {
- var err error
- _, err = f.processTransaction(ctx, tx, firstTxProcess)
+ _, err := f.processTransaction(ctx, tx, firstTxProcess)
if err != nil {
if err == ErrEffectiveGasPriceReprocess {
firstTxProcess = false
@@ -504,7 +519,7 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first
loss := new(big.Int).Sub(tx.EffectiveGasPrice, txGasPrice)
// If loss > 0 the warning message indicating we loss fee for thix tx
if loss.Cmp(new(big.Int).SetUint64(0)) == 1 {
- log.Warnf("egp-loss: gasPrice: %d, effectiveGasPrice1: %d, loss: %d, tx: %s", txGasPrice, tx.EffectiveGasPrice, loss, tx.HashStr)
+ log.Infof("egp-loss: gasPrice: %d, effectiveGasPrice1: %d, loss: %d, tx: %s", txGasPrice, tx.EffectiveGasPrice, loss, tx.HashStr)
}
tx.EffectiveGasPrice.Set(txGasPrice)
@@ -542,7 +557,7 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first
batchRequest.Transactions = append(batchRequest.Transactions, effectivePercentageAsDecodedHex...)
executionStart := time.Now()
- batchResponse, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, false)
+ batchResponse, contextId, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, false)
executionTime := time.Since(executionStart)
f.wipL2Block.metrics.transactionsTimes.executor += executionTime
@@ -569,24 +584,27 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first
oldStateRoot := f.wipBatch.imStateRoot
if len(batchResponse.BlockResponses) > 0 {
- errWg, err = f.handleProcessTransactionResponse(ctx, tx, batchResponse, oldStateRoot)
+ var neededZKCounters state.ZKCounters
+ errWg, err, neededZKCounters = f.handleProcessTransactionResponse(ctx, tx, batchResponse, oldStateRoot)
if err != nil {
return errWg, err
}
- }
- // Update imStateRoot
- f.wipBatch.imStateRoot = batchResponse.NewStateRoot
+ // Update imStateRoot
+ f.wipBatch.imStateRoot = batchResponse.NewStateRoot
- log.Infof("processed tx %s, batchNumber: %d, l2Block: [%d], newStateRoot: %s, oldStateRoot: %s, time: {process: %v, executor: %v}, used counters: %s, reserved counters: %s",
- tx.HashStr, batchRequest.BatchNumber, f.wipL2Block.trackingNum, batchResponse.NewStateRoot.String(), batchRequest.OldStateRoot.String(),
- time.Since(start), executionTime, f.logZKCounters(batchResponse.UsedZkCounters), f.logZKCounters(batchResponse.ReservedZkCounters))
+ log.Infof("processed tx %s, batchNumber: %d, l2Block: [%d], newStateRoot: %s, oldStateRoot: %s, time: {process: %v, executor: %v}, counters: {used: %s, reserved: %s, needed: %s}, contextId: %s",
+ tx.HashStr, batchRequest.BatchNumber, f.wipL2Block.trackingNum, batchResponse.NewStateRoot.String(), batchRequest.OldStateRoot.String(),
+ time.Since(start), executionTime, f.logZKCounters(batchResponse.UsedZkCounters), f.logZKCounters(batchResponse.ReservedZkCounters), f.logZKCounters(neededZKCounters), contextId)
- return nil, nil
+ return nil, nil
+ } else {
+ return nil, fmt.Errorf("error executirn batch %d, batchResponse has returned 0 blockResponses and should return 1", f.wipBatch.batchNumber)
+ }
}
// handleProcessTransactionResponse handles the response of transaction processing.
-func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *TxTracker, result *state.ProcessBatchResponse, oldStateRoot common.Hash) (errWg *sync.WaitGroup, err error) {
+func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *TxTracker, result *state.ProcessBatchResponse, oldStateRoot common.Hash) (errWg *sync.WaitGroup, err error, neededZKCounters state.ZKCounters) {
txResponse := result.BlockResponses[0].TransactionResponses[0]
// Update metrics
@@ -597,7 +615,7 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx
if !state.IsStateRootChanged(errorCode) {
// If intrinsic error or OOC error, we skip adding the transaction to the batch
errWg = f.handleProcessTransactionError(ctx, result, tx)
- return errWg, txResponse.RomError
+ return errWg, txResponse.RomError, state.ZKCounters{}
}
egpEnabled := f.effectiveGasPrice.IsEnabled()
@@ -612,7 +630,7 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx
if err != nil {
if egpEnabled {
log.Errorf("failed to calculate effective gas price with new gasUsed for tx %s, error: %v", tx.HashStr, err.Error())
- return nil, err
+ return nil, err, state.ZKCounters{}
} else {
log.Warnf("effectiveGasPrice is disabled, but failed to calculate effective gas price with new gasUsed for tx %s, error: %v", tx.HashStr, err.Error())
tx.EGPLog.Error = fmt.Sprintf("%s; CalculateEffectiveGasPrice#2: %s", tx.EGPLog.Error, err)
@@ -637,28 +655,33 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx
}
if errCompare != nil && egpEnabled {
- return nil, errCompare
+ return nil, errCompare, state.ZKCounters{}
}
}
}
- // Check if reserved resources of the tx fits in the remaining batch resources
+ // Check if needed resources of the tx fits in the remaining batch resources
+ // Needed resources are the used resources plus the max difference between used and reserved of all the txs (including this) in the batch
+ neededZKCounters, newHighZKCounters := getNeededZKCounters(f.wipBatch.imHighReservedZKCounters, result.UsedZkCounters, result.ReservedZkCounters)
subOverflow := false
- fits, overflowResource := f.wipBatch.imRemainingResources.Fits(state.BatchResources{ZKCounters: result.ReservedZkCounters, Bytes: uint64(len(tx.RawTx))})
+ fits, overflowResource := f.wipBatch.imRemainingResources.Fits(state.BatchResources{ZKCounters: neededZKCounters, Bytes: uint64(len(tx.RawTx))})
if fits {
// Subtract the used resources from the batch
subOverflow, overflowResource = f.wipBatch.imRemainingResources.Sub(state.BatchResources{ZKCounters: result.UsedZkCounters, Bytes: uint64(len(tx.RawTx))})
- if subOverflow { // Sanity check, this cannot happen as reservedZKCounters should be >= that usedZKCounters
- sLog := fmt.Sprintf("tx %s used resources exceeds the remaining batch resources, overflow resource: %s, updating metadata for tx in worker and continuing. Batch counters: %s, tx used counters: %s",
- tx.HashStr, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(result.UsedZkCounters))
+ if subOverflow { // Sanity check, this cannot happen as neededZKCounters should be >= that usedZKCounters
+ sLog := fmt.Sprintf("tx %s used resources exceeds the remaining batch resources, overflow resource: %s, updating metadata for tx in worker and continuing. counters: {batch: %s, used: %s, reserved: %s, needed: %s, high: %s}",
+ tx.HashStr, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(result.UsedZkCounters), f.logZKCounters(result.ReservedZkCounters), f.logZKCounters(neededZKCounters), f.logZKCounters(f.wipBatch.imHighReservedZKCounters))
log.Errorf(sLog)
f.LogEvent(ctx, event.Level_Error, event.EventID_UsedZKCountersOverflow, sLog, nil)
}
+
+ // Update highReservedZKCounters
+ f.wipBatch.imHighReservedZKCounters = newHighZKCounters
} else {
- log.Infof("current tx %s reserved resources exceeds the remaining batch resources, overflow resource: %s, updating metadata for tx in worker and continuing. Batch counters: %s, tx reserved counters: %s",
- tx.HashStr, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(result.ReservedZkCounters))
+ log.Infof("current tx %s needed resources exceeds the remaining batch resources, overflow resource: %s, updating metadata for tx in worker and continuing. counters: {batch: %s, used: %s, reserved: %s, needed: %s, high: %s}",
+ tx.HashStr, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(result.UsedZkCounters), f.logZKCounters(result.ReservedZkCounters), f.logZKCounters(neededZKCounters), f.logZKCounters(f.wipBatch.imHighReservedZKCounters))
if !f.batchConstraints.IsWithinConstraints(result.ReservedZkCounters) {
log.Infof("current tx %s reserved resources exceeds the max limit for batch resources (node OOC), setting tx as invalid in the pool", tx.HashStr)
@@ -674,15 +697,15 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx
log.Errorf("failed to update status to invalid in the pool for tx %s, error: %v", tx.Hash.String(), err)
}
- return nil, ErrBatchResourceOverFlow
+ return nil, ErrBatchResourceOverFlow, state.ZKCounters{}
}
}
- // If reserved tx resources don't fit in the remaining batch resources (or we got an overflow when trying to subtract the used resources)
+ // If needed tx resources don't fit in the remaining batch resources (or we got an overflow when trying to subtract the used resources)
// we update the ZKCounters of the tx and returns ErrBatchResourceOverFlow error
if !fits || subOverflow {
f.workerIntf.UpdateTxZKCounters(txResponse.TxHash, tx.From, result.UsedZkCounters, result.ReservedZkCounters)
- return nil, ErrBatchResourceOverFlow
+ return nil, ErrBatchResourceOverFlow, state.ZKCounters{}
}
// Save Enabled, GasPriceOC, BalanceOC and final effective gas price for later logging
@@ -705,7 +728,7 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx
// Update metrics
f.wipL2Block.metrics.gas += txResponse.GasUsed
- return nil, nil
+ return nil, nil, neededZKCounters
}
// compareTxEffectiveGasPrice compares newEffectiveGasPrice with tx.EffectiveGasPrice.
@@ -753,14 +776,14 @@ func (f *finalizer) compareTxEffectiveGasPrice(ctx context.Context, tx *TxTracke
}
func (f *finalizer) updateWorkerAfterSuccessfulProcessing(ctx context.Context, txHash common.Hash, txFrom common.Address, isForced bool, result *state.ProcessBatchResponse) {
- // Delete the transaction from the worker
+ // Delete the transaction from the worker pool
if isForced {
f.workerIntf.DeleteForcedTx(txHash, txFrom)
- log.Debugf("forced tx %s deleted from address %s", txHash.String(), txFrom.Hex())
+ log.Debugf("forced tx %s deleted from worker, address: %s", txHash.String(), txFrom.Hex())
return
} else {
- f.workerIntf.DeleteTx(txHash, txFrom)
- log.Debugf("tx %s deleted from address %s", txHash.String(), txFrom.Hex())
+ f.workerIntf.MoveTxPendingToStore(txHash, txFrom)
+ log.Debugf("tx %s moved to pending to store in worker, address: %s", txHash.String(), txFrom.Hex())
}
txsToDelete := f.workerIntf.UpdateAfterSingleSuccessfulTxExecution(txFrom, result.ReadWriteAddresses)
@@ -819,7 +842,7 @@ func (f *finalizer) handleProcessTransactionError(ctx context.Context, result *s
} else {
// Delete the transaction from the txSorted list
f.workerIntf.DeleteTx(tx.Hash, tx.From)
- log.Debugf("tx %s deleted from txSorted list", tx.HashStr)
+ log.Debugf("tx %s deleted from worker pool, address: %s", tx.HashStr, tx.From)
wg.Add(1)
go func() {
@@ -859,7 +882,7 @@ func (f *finalizer) logZKCounters(counters state.ZKCounters) string {
func (f *finalizer) Halt(ctx context.Context, err error, isFatal bool) {
f.haltFinalizer.Store(true)
- f.LogEvent(ctx, event.Level_Critical, event.EventID_FinalizerHalt, fmt.Sprintf("finalizer halted due to error, error: %s", err), nil)
+ f.LogEvent(ctx, event.Level_Critical, event.EventID_FinalizerHalt, fmt.Sprintf("finalizer halted due to error: %s", err), nil)
if isFatal {
log.Fatalf("fatal error on finalizer, error: %v", err)
diff --git a/sequencer/finalizer_test.go b/sequencer/finalizer_test.go
index 8e7b5fa9d9..6e54c342f8 100644
--- a/sequencer/finalizer_test.go
+++ b/sequencer/finalizer_test.go
@@ -941,21 +941,8 @@ func TestNewFinalizer(t *testing.T) {
}
}*/
-// TestFinalizer_closeBatch tests the closeBatch method.
-func TestFinalizer_closeWIPBatch(t *testing.T) {
- // arrange
- f = setupFinalizer(true)
- // set wip batch has at least one L2 block as it can not be closed empty
- f.wipBatch.countOfL2Blocks++
-
- usedResources := getUsedBatchResources(f.batchConstraints, f.wipBatch.imRemainingResources)
-
- receipt := state.ProcessingReceipt{
- BatchNumber: f.wipBatch.batchNumber,
- BatchResources: usedResources,
- ClosingReason: f.wipBatch.closingReason,
- }
-
+// TestFinalizer_finalizeSIPBatch tests the finalizeSIPBatch method.
+func TestFinalizer_finalizeSIPBatch(t *testing.T) {
managerErr := fmt.Errorf("some err")
testCases := []struct {
@@ -979,22 +966,39 @@ func TestFinalizer_closeWIPBatch(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// arrange
- stateMock.Mock.On("CloseWIPBatch", ctx, receipt, mock.Anything).Return(tc.managerErr).Once()
+ f = setupFinalizer(true)
+ // set wip batch has at least one L2 block as it can not be closed empty
+ f.sipBatch.countOfL2Blocks++
+
+ usedResources := getUsedBatchResources(f.batchConstraints, f.wipBatch.imRemainingResources)
+
+ receipt := state.ProcessingReceipt{
+ BatchNumber: f.wipBatch.batchNumber,
+ BatchResources: usedResources,
+ ClosingReason: f.wipBatch.closingReason,
+ }
+
+ // arrange
stateMock.On("BeginStateTransaction", ctx).Return(dbTxMock, nilErr).Once()
+ stateMock.On("CloseWIPBatch", ctx, receipt, mock.Anything).Return(tc.managerErr).Once()
+
if tc.managerErr == nil {
+ stateMock.On("GetBatchByNumber", ctx, f.sipBatch.batchNumber, nil).Return(&state.Batch{BatchNumber: f.sipBatch.batchNumber}, nilErr).Once()
+ stateMock.On("GetForkIDByBatchNumber", f.wipBatch.batchNumber).Return(uint64(9)).Once()
+ stateMock.On("GetL1InfoTreeDataFromBatchL2Data", ctx, mock.Anything, nil).Return(map[uint32]state.L1DataV2{}, state.ZeroHash, state.ZeroHash, nil)
+ stateMock.On("ProcessBatchV2", ctx, mock.Anything, false).Return(&state.ProcessBatchResponse{}, "", nil)
+ stateMock.On("UpdateBatchAsChecked", ctx, f.sipBatch.batchNumber, nil).Return(nil)
dbTxMock.On("Commit", ctx).Return(nilErr).Once()
} else {
dbTxMock.On("Rollback", ctx).Return(nilErr).Once()
}
// act
- err := f.closeWIPBatch(ctx)
+ err := f.finalizeSIPBatch(ctx)
// assert
if tc.expectedErr != nil {
- assert.Error(t, err)
- assert.EqualError(t, err, tc.expectedErr.Error())
- assert.ErrorIs(t, err, tc.managerErr)
+ assert.ErrorContains(t, err, tc.expectedErr.Error())
} else {
assert.NoError(t, err)
}
@@ -1745,7 +1749,7 @@ func TestFinalizer_updateWorkerAfterSuccessfulProcessing(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
// arrange
finalizerInstance := setupFinalizer(false)
- workerMock.On("DeleteTx", tc.txTracker.Hash, tc.txTracker.From).Times(tc.expectedDeleteTxCount)
+ workerMock.On("MoveTxPendingToStore", tc.txTracker.Hash, tc.txTracker.From).Times(tc.expectedDeleteTxCount)
txsToDelete := make([]*TxTracker, 0, len(tc.processBatchResponse.ReadWriteAddresses))
for _, infoReadWrite := range tc.processBatchResponse.ReadWriteAddresses {
txsToDelete = append(txsToDelete, &TxTracker{
@@ -2037,7 +2041,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
// arrange
f = setupFinalizer(true)
- maxRemainingResource := getMaxRemainingResources(bc)
+ maxRemainingResource := getMaxBatchResources(bc)
f.wipBatch.imRemainingResources = tc.modifyResourceFunc(maxRemainingResource)
// act
@@ -2098,7 +2102,7 @@ func TestFinalizer_getConstraintThresholdUint32(t *testing.T) {
func TestFinalizer_getRemainingResources(t *testing.T) {
// act
- remainingResources := getMaxRemainingResources(bc)
+ remainingResources := getMaxBatchResources(bc)
// assert
assert.Equal(t, remainingResources.ZKCounters.GasUsed, bc.MaxCumulativeGasUsed)
@@ -2196,7 +2200,7 @@ func setupFinalizer(withWipBatch bool) *finalizer {
initialStateRoot: oldHash,
imStateRoot: newHash,
timestamp: now(),
- imRemainingResources: getMaxRemainingResources(bc),
+ imRemainingResources: getMaxBatchResources(bc),
closingReason: state.EmptyClosingReason,
}
}
@@ -2213,6 +2217,7 @@ func setupFinalizer(withWipBatch bool) *finalizer {
poolIntf: poolMock,
stateIntf: stateMock,
wipBatch: wipBatch,
+ sipBatch: wipBatch,
batchConstraints: bc,
nextForcedBatches: make([]state.ForcedBatch, 0),
nextForcedBatchDeadline: 0,
@@ -2220,9 +2225,9 @@ func setupFinalizer(withWipBatch bool) *finalizer {
effectiveGasPrice: pool.NewEffectiveGasPrice(poolCfg.EffectiveGasPrice),
eventLog: eventLog,
pendingL2BlocksToProcess: make(chan *L2Block, pendingL2BlocksBufferSize),
- pendingL2BlocksToProcessWG: new(sync.WaitGroup),
+ pendingL2BlocksToProcessWG: new(WaitGroupCount),
pendingL2BlocksToStore: make(chan *L2Block, pendingL2BlocksBufferSize),
- pendingL2BlocksToStoreWG: new(sync.WaitGroup),
+ pendingL2BlocksToStoreWG: new(WaitGroupCount),
storedFlushID: 0,
storedFlushIDCond: sync.NewCond(new(sync.Mutex)),
proverID: "",
diff --git a/sequencer/forcedbatch.go b/sequencer/forcedbatch.go
index ebe078c1b8..85f74abee1 100644
--- a/sequencer/forcedbatch.go
+++ b/sequencer/forcedbatch.go
@@ -40,15 +40,16 @@ func (f *finalizer) processForcedBatches(ctx context.Context, lastBatchNumber ui
forcedBatchToProcess = *missingForcedBatch
}
+ var contextId string
log.Infof("processing forced batch %d, lastBatchNumber: %d, stateRoot: %s", forcedBatchToProcess.ForcedBatchNumber, lastBatchNumber, stateRoot.String())
- lastBatchNumber, stateRoot, err = f.processForcedBatch(ctx, forcedBatchToProcess, lastBatchNumber, stateRoot)
+ lastBatchNumber, stateRoot, contextId, err = f.processForcedBatch(ctx, forcedBatchToProcess, lastBatchNumber, stateRoot)
if err != nil {
log.Errorf("error when processing forced batch %d, error: %v", forcedBatchToProcess.ForcedBatchNumber, err)
return lastBatchNumber, stateRoot
}
- log.Infof("processed forced batch %d, batchNumber: %d, newStateRoot: %s", forcedBatchToProcess.ForcedBatchNumber, lastBatchNumber, stateRoot.String())
+ log.Infof("processed forced batch %d, batchNumber: %d, newStateRoot: %s, contextId: %s", forcedBatchToProcess.ForcedBatchNumber, lastBatchNumber, stateRoot.String(), contextId)
nextForcedBatchNumber += 1
}
@@ -57,26 +58,26 @@ func (f *finalizer) processForcedBatches(ctx context.Context, lastBatchNumber ui
return lastBatchNumber, stateRoot
}
-func (f *finalizer) processForcedBatch(ctx context.Context, forcedBatch state.ForcedBatch, lastBatchNumber uint64, stateRoot common.Hash) (newLastBatchNumber uint64, newStateRoot common.Hash, retErr error) {
+func (f *finalizer) processForcedBatch(ctx context.Context, forcedBatch state.ForcedBatch, lastBatchNumber uint64, stateRoot common.Hash) (newLastBatchNumber uint64, newStateRoot common.Hash, ctxId string, retErr error) {
dbTx, err := f.stateIntf.BeginStateTransaction(ctx)
if err != nil {
log.Errorf("failed to begin state transaction for process forced batch %d, error: %v", forcedBatch.ForcedBatchNumber, err)
- return lastBatchNumber, stateRoot, err
+ return lastBatchNumber, stateRoot, "", err
}
// Helper function in case we get an error when processing the forced batch
- rollbackOnError := func(retError error) (newLastBatchNumber uint64, newStateRoot common.Hash, retErr error) {
+ rollbackOnError := func(retError error) (newLastBatchNumber uint64, newStateRoot common.Hash, ctxId string, retErr error) {
err := dbTx.Rollback(ctx)
if err != nil {
- return lastBatchNumber, stateRoot, fmt.Errorf("rollback error due to error %v, error: %v", retError, err)
+ return lastBatchNumber, stateRoot, "", fmt.Errorf("rollback error due to error %v, error: %v", retError, err)
}
- return lastBatchNumber, stateRoot, retError
+ return lastBatchNumber, stateRoot, "", retError
}
// Get L1 block for the forced batch
fbL1Block, err := f.stateIntf.GetBlockByNumber(ctx, forcedBatch.BlockNumber, dbTx)
if err != nil {
- return lastBatchNumber, stateRoot, fmt.Errorf("error getting L1 block number %d for forced batch %d, error: %v", forcedBatch.ForcedBatchNumber, forcedBatch.ForcedBatchNumber, err)
+ return lastBatchNumber, stateRoot, "", fmt.Errorf("error getting L1 block number %d for forced batch %d, error: %v", forcedBatch.ForcedBatchNumber, forcedBatch.ForcedBatchNumber, err)
}
newBatchNumber := lastBatchNumber + 1
@@ -107,7 +108,7 @@ func (f *finalizer) processForcedBatch(ctx context.Context, forcedBatch state.Fo
Caller: stateMetrics.DiscardCallerLabel,
}
- batchResponse, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, true)
+ batchResponse, contextId, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, true)
if err != nil {
return rollbackOnError(fmt.Errorf("failed to process/execute forced batch %d, error: %v", forcedBatch.ForcedBatchNumber, err))
}
@@ -141,7 +142,7 @@ func (f *finalizer) processForcedBatch(ctx context.Context, forcedBatch state.Fo
return rollbackOnError(fmt.Errorf("error when commit dbTx when processing forced batch %d, error: %v", forcedBatch.ForcedBatchNumber, err))
}
- return newBatchNumber, batchResponse.NewStateRoot, nil
+ return newBatchNumber, batchResponse.NewStateRoot, contextId, nil
}
// addForcedTxToWorker adds the txs of the forced batch to the worker
diff --git a/sequencer/interfaces.go b/sequencer/interfaces.go
index 10c58980ac..c92f502e10 100644
--- a/sequencer/interfaces.go
+++ b/sequencer/interfaces.go
@@ -50,7 +50,7 @@ type stateInterface interface {
GetBalanceByStateRoot(ctx context.Context, address common.Address, root common.Hash) (*big.Int, error)
GetNonceByStateRoot(ctx context.Context, address common.Address, root common.Hash) (*big.Int, error)
GetLastStateRoot(ctx context.Context, dbTx pgx.Tx) (common.Hash, error)
- ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error)
+ ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, string, error)
CloseBatch(ctx context.Context, receipt state.ProcessingReceipt, dbTx pgx.Tx) error
CloseWIPBatch(ctx context.Context, receipt state.ProcessingReceipt, dbTx pgx.Tx) error
GetForcedBatch(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (*state.ForcedBatch, error)
@@ -84,15 +84,16 @@ type stateInterface interface {
}
type workerInterface interface {
- GetBestFittingTx(resources state.BatchResources) (*TxTracker, error)
+ GetBestFittingTx(remainingResources state.BatchResources, highReservedCounters state.ZKCounters) (*TxTracker, error)
UpdateAfterSingleSuccessfulTxExecution(from common.Address, touchedAddresses map[common.Address]*state.InfoReadWrite) []*TxTracker
UpdateTxZKCounters(txHash common.Hash, from common.Address, usedZKCounters state.ZKCounters, reservedZKCounters state.ZKCounters)
AddTxTracker(ctx context.Context, txTracker *TxTracker) (replacedTx *TxTracker, dropReason error)
MoveTxToNotReady(txHash common.Hash, from common.Address, actualNonce *uint64, actualBalance *big.Int) []*TxTracker
DeleteTx(txHash common.Hash, from common.Address)
- AddPendingTxToStore(txHash common.Hash, addr common.Address)
- DeletePendingTxToStore(txHash common.Hash, addr common.Address)
+ MoveTxPendingToStore(txHash common.Hash, addr common.Address)
+ DeleteTxPendingToStore(txHash common.Hash, addr common.Address)
NewTxTracker(tx types.Transaction, usedZKcounters state.ZKCounters, reservedZKCouners state.ZKCounters, ip string) (*TxTracker, error)
AddForcedTx(txHash common.Hash, addr common.Address)
DeleteForcedTx(txHash common.Hash, addr common.Address)
+ RestoreTxsPendingToStore(ctx context.Context) ([]*TxTracker, []*TxTracker)
}
diff --git a/sequencer/l2block.go b/sequencer/l2block.go
index ed7ae314f6..b64a6db902 100644
--- a/sequencer/l2block.go
+++ b/sequencer/l2block.go
@@ -24,9 +24,11 @@ type L2Block struct {
l1InfoTreeExitRoot state.L1InfoTreeExitRootStorageEntry
l1InfoTreeExitRootChanged bool
bytes uint64
- usedZKCounters state.ZKCounters
- reservedZKCounters state.ZKCounters
+ usedZKCountersOnNew state.ZKCounters
+ reservedZKCountersOnNew state.ZKCounters
+ highReservedZKCounters state.ZKCounters
transactions []*TxTracker
+ batch *Batch
batchResponse *state.ProcessBatchResponse
metrics metrics
}
@@ -85,10 +87,6 @@ func (f *finalizer) addPendingL2BlockToProcess(ctx context.Context, l2Block *L2B
func (f *finalizer) addPendingL2BlockToStore(ctx context.Context, l2Block *L2Block) {
f.pendingL2BlocksToStoreWG.Add(1)
- for _, tx := range l2Block.transactions {
- f.workerIntf.AddPendingTxToStore(tx.Hash, tx.From)
- }
-
select {
case f.pendingL2BlocksToStore <- l2Block:
case <-ctx.Done():
@@ -96,13 +94,15 @@ func (f *finalizer) addPendingL2BlockToStore(ctx context.Context, l2Block *L2Blo
// delete the pending TxToStore added in the worker
f.pendingL2BlocksToStoreWG.Done()
for _, tx := range l2Block.transactions {
- f.workerIntf.DeletePendingTxToStore(tx.Hash, tx.From)
+ f.workerIntf.DeleteTxPendingToStore(tx.Hash, tx.From)
}
}
}
// processPendingL2Blocks processes (executor) the pending to process L2 blocks
func (f *finalizer) processPendingL2Blocks(ctx context.Context) {
+ //rand.Seed(time.Now().UnixNano())
+
for {
select {
case l2Block, ok := <-f.pendingL2BlocksToProcess:
@@ -111,12 +111,36 @@ func (f *finalizer) processPendingL2Blocks(ctx context.Context) {
return
}
+ // if l2BlockReorg we need to "flush" the channel to discard pending L2Blocks
+ if f.l2BlockReorg.Load() {
+ f.pendingL2BlocksToProcessWG.Done()
+ continue
+ }
+
err := f.processL2Block(ctx, l2Block)
if err != nil {
+ halt := false
+ if f.lastL2BlockWasReorg {
+ // We had 2 consecutives reorg in the same L2 block, we halt after log/dump the info
+ halt = true
+ } else {
+ f.l2BlockReorg.Store(true)
+ f.lastL2BlockWasReorg = true
+ }
+
+ warnmsg := fmt.Sprintf("sequencer L2 block [%d] reorg detected, batch: %d, processing it...", l2Block.trackingNum, l2Block.batch.batchNumber)
+ log.Warnf(warnmsg)
+ f.LogEvent(ctx, event.Level_Critical, event.EventID_L2BlockReorg, warnmsg, nil)
+
// Dump L2Block info
f.dumpL2Block(l2Block)
- f.Halt(ctx, fmt.Errorf("error processing L2 block [%d], error: %v", l2Block.trackingNum, err), false)
+
+ if halt {
+ f.Halt(ctx, fmt.Errorf("consecutives L2 block reorgs in the same L2 block [%d]", l2Block.trackingNum), false)
+ }
+ } else {
+ f.lastL2BlockWasReorg = false
}
f.pendingL2BlocksToProcessWG.Done()
@@ -164,13 +188,23 @@ func (f *finalizer) storePendingL2Blocks(ctx context.Context) {
func (f *finalizer) processL2Block(ctx context.Context, l2Block *L2Block) error {
processStart := time.Now()
- initialStateRoot := f.wipBatch.finalStateRoot
+ if f.pipBatch == nil {
+ f.pipBatch = l2Block.batch
+ } else if f.pipBatch.batchNumber != l2Block.batch.batchNumber {
+ // We have received the first L2 block of the next batch to process
+ // We need to "propagate" finalStateRoot to the new batch as initalStateRoot/finalStateRoot and set it as the current pipBatch
+ l2Block.batch.initialStateRoot = f.pipBatch.finalStateRoot
+ l2Block.batch.finalStateRoot = f.pipBatch.finalStateRoot
+ f.pipBatch = l2Block.batch
+ }
+
+ initialStateRoot := f.pipBatch.finalStateRoot
log.Infof("processing L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, initialStateRoot: %s txs: %d",
- l2Block.trackingNum, f.wipBatch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex,
+ l2Block.trackingNum, l2Block.batch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex,
l2Block.l1InfoTreeExitRootChanged, initialStateRoot, len(l2Block.transactions))
- batchResponse, batchL2DataSize, err := f.executeL2Block(ctx, initialStateRoot, l2Block)
+ batchResponse, batchL2DataSize, contextId, err := f.executeL2Block(ctx, initialStateRoot, l2Block)
if err != nil {
return fmt.Errorf("failed to execute L2 block [%d], error: %v", l2Block.trackingNum, err)
@@ -199,39 +233,55 @@ func (f *finalizer) processL2Block(ctx context.Context, l2Block *L2Block) error
l2Block.batchResponse = batchResponse
+ // Check if needed resources of the L2 block fits in the remaining batch resources
+ // Needed resources are the used resources plus the max difference between used and reserved of all the L2 blocks (including this) in the batch
+ neededZKCounters, newHighZKCounters := getNeededZKCounters(l2Block.batch.finalHighReservedZKCounters, batchResponse.UsedZkCounters, batchResponse.ReservedZkCounters)
+
// Update finalRemainingResources of the batch
- fits, overflowResource := f.wipBatch.finalRemainingResources.Fits(state.BatchResources{ZKCounters: batchResponse.ReservedZkCounters, Bytes: batchL2DataSize})
+ fits, overflowResource := l2Block.batch.finalRemainingResources.Fits(state.BatchResources{ZKCounters: neededZKCounters, Bytes: batchL2DataSize})
if fits {
- subOverflow, overflowResource := f.wipBatch.finalRemainingResources.Sub(state.BatchResources{ZKCounters: batchResponse.UsedZkCounters, Bytes: batchL2DataSize})
+ subOverflow, overflowResource := l2Block.batch.finalRemainingResources.Sub(state.BatchResources{ZKCounters: batchResponse.UsedZkCounters, Bytes: batchL2DataSize})
if subOverflow { // Sanity check, this cannot happen as reservedZKCounters should be >= that usedZKCounters
- return fmt.Errorf("error subtracting L2 block %d [%d] used resources from the batch %d, overflow resource: %s, batch counters: %s, L2 block used counters: %s, batch bytes: %d, L2 block bytes: %d",
- blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, overflowResource, f.logZKCounters(f.wipBatch.finalRemainingResources.ZKCounters), f.logZKCounters(batchResponse.UsedZkCounters), f.wipBatch.finalRemainingResources.Bytes, batchL2DataSize)
+ return fmt.Errorf("error subtracting L2 block %d [%d] needed resources from the batch %d, overflow resource: %s, batch bytes: %d, L2 block bytes: %d, counters: {batch: %s, used: %s, reserved: %s, needed: %s, high: %s}",
+ blockResponse.BlockNumber, l2Block.trackingNum, l2Block.batch.batchNumber, overflowResource, l2Block.batch.finalRemainingResources.Bytes, batchL2DataSize,
+ f.logZKCounters(l2Block.batch.finalRemainingResources.ZKCounters), f.logZKCounters(batchResponse.UsedZkCounters), f.logZKCounters(batchResponse.ReservedZkCounters), f.logZKCounters(neededZKCounters), f.logZKCounters(l2Block.batch.imHighReservedZKCounters))
}
- } else {
- overflowLog := fmt.Sprintf("L2 block %d [%d] reserved resources exceeds the remaining batch %d resources, overflow resource: %s, batch counters: %s, L2 block reserved counters: %s, batch bytes: %d, L2 block bytes: %d",
- blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, overflowResource, f.logZKCounters(f.wipBatch.finalRemainingResources.ZKCounters), f.logZKCounters(batchResponse.ReservedZkCounters), f.wipBatch.finalRemainingResources.Bytes, batchL2DataSize)
- log.Warnf(overflowLog)
+ l2Block.batch.finalHighReservedZKCounters = newHighZKCounters
+ l2Block.highReservedZKCounters = l2Block.batch.finalHighReservedZKCounters
+ } else {
+ overflowLog := fmt.Sprintf("L2 block %d [%d] needed resources exceeds the remaining batch %d resources, overflow resource: %s, batch bytes: %d, L2 block bytes: %d, counters: {batch: %s, used: %s, reserved: %s, needed: %s, high: %s}",
+ blockResponse.BlockNumber, l2Block.trackingNum, l2Block.batch.batchNumber, overflowResource, l2Block.batch.finalRemainingResources.Bytes, batchL2DataSize,
+ f.logZKCounters(l2Block.batch.finalRemainingResources.ZKCounters), f.logZKCounters(batchResponse.UsedZkCounters), f.logZKCounters(batchResponse.ReservedZkCounters), f.logZKCounters(neededZKCounters), f.logZKCounters(l2Block.batch.imHighReservedZKCounters))
f.LogEvent(ctx, event.Level_Warning, event.EventID_ReservedZKCountersOverflow, overflowLog, nil)
+
+ return fmt.Errorf(overflowLog)
}
// Update finalStateRoot of the batch to the newStateRoot for the L2 block
- f.wipBatch.finalStateRoot = l2Block.batchResponse.NewStateRoot
+ l2Block.batch.finalStateRoot = l2Block.batchResponse.NewStateRoot
f.updateFlushIDs(batchResponse.FlushID, batchResponse.StoredFlushID)
+ if f.pendingL2BlocksToStoreWG.Count() > 0 {
+ startWait := time.Now()
+ f.pendingL2BlocksToStoreWG.Wait()
+ log.Debugf("waiting for previous L2 block to be stored took: %v", time.Since(startWait))
+ }
f.addPendingL2BlockToStore(ctx, l2Block)
// metrics
l2Block.metrics.l2BlockTimes.sequencer = time.Since(processStart) - l2Block.metrics.l2BlockTimes.executor
- l2Block.metrics.close(l2Block.createdAt, int64(len(l2Block.transactions)))
+ if f.cfg.SequentialProcessL2Block {
+ l2Block.metrics.close(l2Block.createdAt, int64(len(l2Block.transactions)), f.cfg.SequentialProcessL2Block)
+ }
f.metrics.addL2BlockMetrics(l2Block.metrics)
- log.Infof("processed L2 block %d [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, initialStateRoot: %s, newStateRoot: %s, txs: %d/%d, blockHash: %s, infoRoot: %s, used counters: %s, reserved counters: %s",
- blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, l2Block.l1InfoTreeExitRootChanged, initialStateRoot, l2Block.batchResponse.NewStateRoot,
+ log.Infof("processed L2 block %d [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, initialStateRoot: %s, newStateRoot: %s, txs: %d/%d, blockHash: %s, infoRoot: %s, counters: {used: %s, reserved: %s, needed: %s, high: %s}, contextId: %s",
+ blockResponse.BlockNumber, l2Block.trackingNum, l2Block.batch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, l2Block.l1InfoTreeExitRootChanged, initialStateRoot, l2Block.batchResponse.NewStateRoot,
len(l2Block.transactions), len(blockResponse.TransactionResponses), blockResponse.BlockHash, blockResponse.BlockInfoRoot,
- f.logZKCounters(batchResponse.UsedZkCounters), f.logZKCounters(batchResponse.ReservedZkCounters))
+ f.logZKCounters(batchResponse.UsedZkCounters), f.logZKCounters(batchResponse.ReservedZkCounters), f.logZKCounters(neededZKCounters), f.logZKCounters(l2Block.batch.finalHighReservedZKCounters), contextId)
if f.cfg.Metrics.EnableLog {
log.Infof("metrics-log: {l2block: {num: %d, trackingNum: %d, metrics: {%s}}, interval: {startAt: %d, metrics: {%s}}}",
@@ -242,12 +292,12 @@ func (f *finalizer) processL2Block(ctx context.Context, l2Block *L2Block) error
}
// executeL2Block executes a L2 Block in the executor and returns the batch response from the executor and the batchL2Data size
-func (f *finalizer) executeL2Block(ctx context.Context, initialStateRoot common.Hash, l2Block *L2Block) (*state.ProcessBatchResponse, uint64, error) {
+func (f *finalizer) executeL2Block(ctx context.Context, initialStateRoot common.Hash, l2Block *L2Block) (*state.ProcessBatchResponse, uint64, string, error) {
executeL2BLockError := func(err error) {
- log.Errorf("execute L2 block [%d] error %v, batch: %d, initialStateRoot: %s", l2Block.trackingNum, err, f.wipBatch.batchNumber, initialStateRoot)
+ log.Errorf("execute L2 block [%d] error %v, batch: %d, initialStateRoot: %s", l2Block.trackingNum, err, l2Block.batch.batchNumber, initialStateRoot)
// Log batch detailed info
for i, tx := range l2Block.transactions {
- log.Infof("batch: %d, block: [%d], tx position: %d, tx hash: %s", f.wipBatch.batchNumber, l2Block.trackingNum, i, tx.HashStr)
+ log.Infof("batch: %d, block: [%d], tx position: %d, tx hash: %s", l2Block.batch.batchNumber, l2Block.trackingNum, i, tx.HashStr)
}
}
@@ -262,7 +312,7 @@ func (f *finalizer) executeL2Block(ctx context.Context, initialStateRoot common.
epHex, err := hex.DecodeHex(fmt.Sprintf("%x", tx.EGPPercentage))
if err != nil {
log.Errorf("error decoding hex value for effective gas price percentage for tx %s, error: %v", tx.HashStr, err)
- return nil, 0, err
+ return nil, 0, "", err
}
txData := append(tx.RawTx, epHex...)
@@ -271,16 +321,16 @@ func (f *finalizer) executeL2Block(ctx context.Context, initialStateRoot common.
}
batchRequest := state.ProcessRequest{
- BatchNumber: f.wipBatch.batchNumber,
+ BatchNumber: l2Block.batch.batchNumber,
OldStateRoot: initialStateRoot,
- Coinbase: f.wipBatch.coinbase,
+ Coinbase: l2Block.batch.coinbase,
L1InfoRoot_V2: state.GetMockL1InfoRoot(),
TimestampLimit_V2: l2Block.timestamp,
Transactions: batchL2Data,
SkipFirstChangeL2Block_V2: false,
SkipWriteBlockInfoRoot_V2: false,
Caller: stateMetrics.DiscardCallerLabel,
- ForkID: f.stateIntf.GetForkIDByBatchNumber(f.wipBatch.batchNumber),
+ ForkID: f.stateIntf.GetForkIDByBatchNumber(l2Block.batch.batchNumber),
SkipVerifyL1InfoRoot_V2: true,
L1InfoTreeData_V2: map[uint32]state.L1DataV2{},
}
@@ -290,31 +340,26 @@ func (f *finalizer) executeL2Block(ctx context.Context, initialStateRoot common.
MinTimestamp: uint64(l2Block.l1InfoTreeExitRoot.GlobalExitRoot.Timestamp.Unix()),
}
- var (
- err error
- batchResponse *state.ProcessBatchResponse
- )
-
executionStart := time.Now()
- batchResponse, err = f.stateIntf.ProcessBatchV2(ctx, batchRequest, true)
+ batchResponse, contextId, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, true)
l2Block.metrics.l2BlockTimes.executor = time.Since(executionStart)
if err != nil {
executeL2BLockError(err)
- return nil, 0, err
+ return nil, 0, contextId, err
}
if batchResponse.ExecutorError != nil {
executeL2BLockError(batchResponse.ExecutorError)
- return nil, 0, ErrExecutorError
+ return nil, 0, contextId, ErrExecutorError
}
if batchResponse.IsRomOOCError {
executeL2BLockError(batchResponse.RomError_V2)
- return nil, 0, ErrProcessBatchOOC
+ return nil, 0, contextId, ErrProcessBatchOOC
}
- return batchResponse, uint64(len(batchL2Data)), nil
+ return batchResponse, uint64(len(batchL2Data)), contextId, nil
}
// storeL2Block stores the L2 block in the state and updates the related batch and transactions
@@ -331,7 +376,7 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error {
// If the L2 block has txs now f.storedFlushID >= l2BlockToStore.flushId, we can store tx
blockResponse := l2Block.batchResponse.BlockResponses[0]
log.Infof("storing L2 block %d [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, txs: %d/%d, blockHash: %s, infoRoot: %s",
- blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex,
+ blockResponse.BlockNumber, l2Block.trackingNum, l2Block.batch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex,
l2Block.l1InfoTreeExitRootChanged, len(l2Block.transactions), len(blockResponse.TransactionResponses), blockResponse.BlockHash, blockResponse.BlockInfoRoot.String())
dbTx, err := f.stateIntf.BeginStateTransaction(ctx)
@@ -347,7 +392,24 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error {
return retError
}
- forkID := f.stateIntf.GetForkIDByBatchNumber(f.wipBatch.batchNumber)
+ if (f.sipBatch == nil) || (f.sipBatch.batchNumber != l2Block.batch.batchNumber) {
+ // We have l2 blocks to store from a new batch, therefore we insert this new batch in the statedb
+ // First we need to close the current sipBatch
+ if f.sipBatch != nil {
+ err := f.closeSIPBatch(ctx, dbTx)
+ if err != nil {
+ return rollbackOnError(fmt.Errorf("error when closing sip batch %d, initialStateRoot: %s, error: %v", f.sipBatch.batchNumber, f.sipBatch.initialStateRoot, err))
+ }
+ }
+ // We insert new SIP batch in the statedb
+ err := f.insertSIPBatch(ctx, l2Block.batch.batchNumber, l2Block.batch.initialStateRoot, dbTx)
+ if err != nil {
+ return rollbackOnError(fmt.Errorf("error when inserting new sip batch %d, initialStateRoot: %s, error: %v", l2Block.batch.batchNumber, l2Block.batch.initialStateRoot, err))
+ }
+ f.sipBatch = l2Block.batch
+ }
+
+ forkID := f.stateIntf.GetForkIDByBatchNumber(l2Block.batch.batchNumber)
txsEGPLog := []*state.EffectiveGasPriceLog{}
for _, tx := range l2Block.transactions {
@@ -356,16 +418,16 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error {
}
// Store L2 block in the state
- err = f.stateIntf.StoreL2Block(ctx, f.wipBatch.batchNumber, blockResponse, txsEGPLog, dbTx)
+ err = f.stateIntf.StoreL2Block(ctx, l2Block.batch.batchNumber, blockResponse, txsEGPLog, dbTx)
if err != nil {
return rollbackOnError(fmt.Errorf("database error on storing L2 block %d [%d], error: %v", blockResponse.BlockNumber, l2Block.trackingNum, err))
}
// Now we need to update de BatchL2Data of the wip batch and also update the status of the L2 block txs in the pool
- batch, err := f.stateIntf.GetBatchByNumber(ctx, f.wipBatch.batchNumber, dbTx)
+ batch, err := f.stateIntf.GetBatchByNumber(ctx, l2Block.batch.batchNumber, dbTx)
if err != nil {
- return rollbackOnError(fmt.Errorf("error when getting batch %d from the state, error: %v", f.wipBatch.batchNumber, err))
+ return rollbackOnError(fmt.Errorf("error when getting batch %d from the state, error: %v", l2Block.batch.batchNumber, err))
}
// Add changeL2Block to batch.BatchL2Data
@@ -384,13 +446,15 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error {
batch.BatchL2Data = append(batch.BatchL2Data, blockL2Data...)
batch.Resources.SumUp(state.BatchResources{ZKCounters: l2Block.batchResponse.UsedZkCounters, Bytes: uint64(len(blockL2Data))})
+ batch.HighReservedZKCounters = l2Block.highReservedZKCounters
receipt := state.ProcessingReceipt{
- BatchNumber: f.wipBatch.batchNumber,
- StateRoot: l2Block.batchResponse.NewStateRoot,
- LocalExitRoot: l2Block.batchResponse.NewLocalExitRoot,
- BatchL2Data: batch.BatchL2Data,
- BatchResources: batch.Resources,
+ BatchNumber: l2Block.batch.batchNumber,
+ StateRoot: l2Block.batchResponse.NewStateRoot,
+ LocalExitRoot: l2Block.batchResponse.NewLocalExitRoot,
+ BatchL2Data: batch.BatchL2Data,
+ BatchResources: batch.Resources,
+ HighReservedZKCounters: batch.HighReservedZKCounters,
}
// We need to update the batch GER only in the GER of the block (response) is not zero, since the final GER stored in the batch
@@ -403,7 +467,7 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error {
err = f.stateIntf.UpdateWIPBatch(ctx, receipt, dbTx)
if err != nil {
- return rollbackOnError(fmt.Errorf("error when updating wip batch %d, error: %v", f.wipBatch.batchNumber, err))
+ return rollbackOnError(fmt.Errorf("error when updating wip batch %d, error: %v", l2Block.batch.batchNumber, err))
}
err = dbTx.Commit(ctx)
@@ -411,8 +475,8 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error {
return err
}
- //TODO: remove this log
- log.Infof("l2 block %d [%d] stored in statedb", blockResponse.BlockNumber, l2Block.trackingNum)
+ //TODO: remove this Log
+ log.Infof("[ds-debug] l2 block %d [%d] stored in statedb", blockResponse.BlockNumber, l2Block.trackingNum)
// Update txs status in the pool
for _, txResponse := range blockResponse.TransactionResponses {
@@ -424,7 +488,7 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error {
}
//TODO: remove this log
- log.Infof("l2 block %d [%d] transactions updated as selected in the pooldb", blockResponse.BlockNumber, l2Block.trackingNum)
+ log.Infof("[ds-debug] l2 block %d [%d] transactions updated as selected in the pooldb", blockResponse.BlockNumber, l2Block.trackingNum)
// Send L2 block to data streamer
err = f.DSSendL2Block(f.wipBatch.batchNumber, blockResponse, l2Block.getL1InfoTreeIndex())
@@ -434,17 +498,17 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error {
}
//TODO: remove this log
- log.Infof("l2 block %d [%d] sent to datastream", blockResponse.BlockNumber, l2Block.trackingNum)
+ log.Infof("[ds-debug] l2 block %d [%d] sent to datastream", blockResponse.BlockNumber, l2Block.trackingNum)
for _, tx := range l2Block.transactions {
// Delete the tx from the pending list in the worker (addrQueue)
- f.workerIntf.DeletePendingTxToStore(tx.Hash, tx.From)
+ f.workerIntf.DeleteTxPendingToStore(tx.Hash, tx.From)
}
endStoring := time.Now()
log.Infof("stored L2 block %d [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, txs: %d/%d, blockHash: %s, infoRoot: %s, time: %v",
- blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex,
+ blockResponse.BlockNumber, l2Block.trackingNum, l2Block.batch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex,
l2Block.l1InfoTreeExitRootChanged, len(l2Block.transactions), len(blockResponse.TransactionResponses), blockResponse.BlockHash, blockResponse.BlockInfoRoot.String(), endStoring.Sub(startStoring))
return nil
@@ -452,7 +516,7 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error {
// finalizeWIPL2Block closes the wip L2 block and opens a new one
func (f *finalizer) finalizeWIPL2Block(ctx context.Context) {
- log.Debugf("finalizing WIP L2 block [%d]", f.wipL2Block.trackingNum)
+ log.Debugf("finalizing wip L2 block [%d]", f.wipL2Block.trackingNum)
prevTimestamp := f.wipL2Block.timestamp
prevL1InfoTreeIndex := f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex
@@ -464,7 +528,7 @@ func (f *finalizer) finalizeWIPL2Block(ctx context.Context) {
// closeWIPL2Block closes the wip L2 block
func (f *finalizer) closeWIPL2Block(ctx context.Context) {
- log.Debugf("closing WIP L2 block [%d]", f.wipL2Block.trackingNum)
+ log.Debugf("closing wip L2 block [%d]", f.wipL2Block.trackingNum)
f.wipBatch.countOfL2Blocks++
@@ -478,7 +542,46 @@ func (f *finalizer) closeWIPL2Block(ctx context.Context) {
// We update imStateRoot (used in tx-by-tx execution) to the finalStateRoot that has been updated after process the WIP L2 Block
f.wipBatch.imStateRoot = f.wipBatch.finalStateRoot
} else {
+ if f.pendingL2BlocksToProcessWG.Count() > 0 {
+ startWait := time.Now()
+ f.pendingL2BlocksToProcessWG.Wait()
+ waitTime := time.Since(startWait)
+ log.Debugf("waiting for previous L2 block to be processed took: %v", waitTime)
+ f.wipL2Block.metrics.waitl2BlockTime = waitTime
+ }
+
f.addPendingL2BlockToProcess(ctx, f.wipL2Block)
+
+ f.wipL2Block.metrics.close(f.wipL2Block.createdAt, int64(len(f.wipL2Block.transactions)), f.cfg.SequentialProcessL2Block)
+
+ l2BlockResourcesUsed := state.BatchResources{}
+ l2BlockResourcesReserved := state.BatchResources{}
+
+ for _, tx := range f.wipL2Block.transactions {
+ l2BlockResourcesUsed.ZKCounters.SumUp(tx.UsedZKCounters)
+ l2BlockResourcesReserved.ZKCounters.SumUp(tx.ReservedZKCounters)
+ }
+ l2BlockResourcesUsed.ZKCounters.SumUp(f.wipL2Block.usedZKCountersOnNew)
+ l2BlockResourcesReserved.ZKCounters.SumUp(f.wipL2Block.reservedZKCountersOnNew)
+
+ log.Infof("closed wip L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, txs: %d, used counters: %s, reserved counters: %s",
+ f.wipL2Block.trackingNum, f.wipL2Block.batch.batchNumber, f.wipL2Block.deltaTimestamp, f.wipL2Block.timestamp, f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex,
+ f.wipL2Block.l1InfoTreeExitRootChanged, len(f.wipL2Block.transactions), f.logZKCounters(l2BlockResourcesUsed.ZKCounters), f.logZKCounters(l2BlockResourcesReserved.ZKCounters))
+
+ if f.nextStateRootSync.Before(time.Now()) {
+ log.Debug("sync stateroot time reached")
+ f.waitPendingL2Blocks()
+
+ // Sanity-check: At this point f.sipBatch should be the same as the batch of the last L2 block processed
+ // (only if we haven't had a L2 block reorg just in the last block and it's the first one of the wipBatch)
+ if f.wipBatch.batchNumber != f.sipBatch.batchNumber && !(f.l2BlockReorg.Load() && f.wipBatch.countOfL2Blocks <= 2) {
+ f.Halt(ctx, fmt.Errorf("wipBatch %d doesn't match sipBatch %d after all pending L2 blocks has been processed/stored", f.wipBatch.batchNumber, f.sipBatch.batchNumber), false)
+ }
+
+ f.wipBatch.imStateRoot = f.wipBatch.finalStateRoot
+ f.scheduleNextStateRootSync()
+ log.Infof("stateroot synced on L2 block [%d] to %s, next sync at %v", f.wipL2Block.trackingNum, f.wipBatch.imStateRoot, f.nextStateRootSync)
+ }
}
f.wipL2Block = nil
@@ -489,15 +592,15 @@ func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp uint64,
processStart := time.Now()
newL2Block := &L2Block{}
- newL2Block.createdAt = time.Now()
+ now := time.Now()
+ newL2Block.createdAt = now
+ newL2Block.deltaTimestamp = uint32(uint64(now.Unix()) - prevTimestamp)
+ newL2Block.timestamp = prevTimestamp + uint64(newL2Block.deltaTimestamp)
// Tracking number
f.l2BlockCounter++
newL2Block.trackingNum = f.l2BlockCounter
- newL2Block.deltaTimestamp = uint32(uint64(now().Unix()) - prevTimestamp)
- newL2Block.timestamp = prevTimestamp + uint64(newL2Block.deltaTimestamp)
-
newL2Block.transactions = []*TxTracker{}
f.lastL1InfoTreeMux.Lock()
@@ -521,13 +624,13 @@ func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp uint64,
f.wipL2Block = newL2Block
- log.Debugf("creating new WIP L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v",
+ log.Debugf("creating new wip L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v",
f.wipL2Block.trackingNum, f.wipBatch.batchNumber, f.wipL2Block.deltaTimestamp, f.wipL2Block.timestamp, f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, f.wipL2Block.l1InfoTreeExitRootChanged)
// We process (execute) the new wip L2 block to update the imStateRoot and also get the counters used by the wip l2block
- batchResponse, err := f.executeNewWIPL2Block(ctx)
+ batchResponse, contextId, err := f.executeNewWIPL2Block(ctx)
if err != nil {
- f.Halt(ctx, fmt.Errorf("failed to execute new WIP L2 block [%d], error: %v ", f.wipL2Block.trackingNum, err), false)
+ f.Halt(ctx, fmt.Errorf("failed to execute new wip L2 block [%d], error: %v ", f.wipL2Block.trackingNum, err), false)
}
if len(batchResponse.BlockResponses) != 1 {
@@ -541,23 +644,28 @@ func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp uint64,
// Save the resources used/reserved and subtract the ZKCounters reserved by the new WIP L2 block from the WIP batch
// We need to increase the poseidon hashes to reserve in the batch the hashes needed to write the L1InfoRoot when processing the final L2 Block (SkipWriteBlockInfoRoot_V2=false)
- f.wipL2Block.usedZKCounters = batchResponse.UsedZkCounters
- f.wipL2Block.usedZKCounters.PoseidonHashes = (batchResponse.UsedZkCounters.PoseidonHashes * 2) + 2 // nolint:gomnd
- f.wipL2Block.reservedZKCounters = batchResponse.ReservedZkCounters
- f.wipL2Block.reservedZKCounters.PoseidonHashes = (batchResponse.ReservedZkCounters.PoseidonHashes * 2) + 2 // nolint:gomnd
+ f.wipL2Block.usedZKCountersOnNew = batchResponse.UsedZkCounters
+ f.wipL2Block.usedZKCountersOnNew.PoseidonHashes = (batchResponse.UsedZkCounters.PoseidonHashes * 2) + 2 // nolint:gomnd
+ f.wipL2Block.reservedZKCountersOnNew = batchResponse.ReservedZkCounters
+ f.wipL2Block.reservedZKCountersOnNew.PoseidonHashes = (batchResponse.ReservedZkCounters.PoseidonHashes * 2) + 2 // nolint:gomnd
f.wipL2Block.bytes = changeL2BlockSize
+ neededZKCounters, newHighZKCounters := getNeededZKCounters(f.wipBatch.imHighReservedZKCounters, f.wipL2Block.usedZKCountersOnNew, f.wipL2Block.reservedZKCountersOnNew)
subOverflow := false
- fits, overflowResource := f.wipBatch.imRemainingResources.Fits(state.BatchResources{ZKCounters: f.wipL2Block.reservedZKCounters, Bytes: f.wipL2Block.bytes})
+ fits, overflowResource := f.wipBatch.imRemainingResources.Fits(state.BatchResources{ZKCounters: neededZKCounters, Bytes: f.wipL2Block.bytes})
if fits {
- subOverflow, overflowResource = f.wipBatch.imRemainingResources.Sub(state.BatchResources{ZKCounters: f.wipL2Block.usedZKCounters, Bytes: f.wipL2Block.bytes})
+ subOverflow, overflowResource = f.wipBatch.imRemainingResources.Sub(state.BatchResources{ZKCounters: f.wipL2Block.usedZKCountersOnNew, Bytes: f.wipL2Block.bytes})
if subOverflow { // Sanity check, this cannot happen as reservedZKCounters should be >= that usedZKCounters
- log.Infof("new WIP L2 block [%d] used resources exceeds the remaining batch resources, overflow resource: %s, closing WIP batch and creating new one. Batch counters: %s, L2 block used counters: %s",
- f.wipL2Block.trackingNum, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(f.wipL2Block.usedZKCounters))
+ log.Infof("new wip L2 block [%d] used resources exceeds the remaining batch resources, overflow resource: %s, closing WIP batch and creating new one. counters: {batch: %s, used: %s, reserved: %s, needed: %s, high: %s}",
+ f.wipL2Block.trackingNum, overflowResource,
+ f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(f.wipL2Block.usedZKCountersOnNew), f.logZKCounters(f.wipL2Block.reservedZKCountersOnNew), f.logZKCounters(neededZKCounters), f.logZKCounters(f.wipBatch.imHighReservedZKCounters))
}
+
+ f.wipBatch.imHighReservedZKCounters = newHighZKCounters
} else {
- log.Infof("new WIP L2 block [%d] reserved resources exceeds the remaining batch resources, overflow resource: %s, closing WIP batch and creating new one. Batch counters: %s, L2 block reserved counters: %s",
- f.wipL2Block.trackingNum, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(f.wipL2Block.reservedZKCounters))
+ log.Infof("new wip L2 block [%d] reserved resources exceeds the remaining batch resources, overflow resource: %s, closing WIP batch and creating new one. counters: {batch: %s, used: %s, reserved: %s, needed: %s, high: %s}",
+ f.wipL2Block.trackingNum, overflowResource,
+ f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(f.wipL2Block.usedZKCountersOnNew), f.logZKCounters(f.wipL2Block.reservedZKCountersOnNew), f.logZKCounters(neededZKCounters), f.logZKCounters(f.wipBatch.imHighReservedZKCounters))
}
// If reserved WIP L2 block resources don't fit in the remaining batch resources (or we got an overflow when trying to subtract the used resources)
@@ -565,19 +673,22 @@ func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp uint64,
if !fits || subOverflow {
err := f.closeAndOpenNewWIPBatch(ctx, state.ResourceExhaustedClosingReason)
if err != nil {
- f.Halt(ctx, fmt.Errorf("failed to create new WIP batch [%d], error: %v", f.wipL2Block.trackingNum, err), true)
+ f.Halt(ctx, fmt.Errorf("failed to create new wip batch [%d], error: %v", f.wipL2Block.trackingNum, err), true)
}
}
+ // We assign the wipBatch as the batch where this wipL2Block belongs
+ f.wipL2Block.batch = f.wipBatch
+
f.wipL2Block.metrics.newL2BlockTimes.sequencer = time.Since(processStart) - f.wipL2Block.metrics.newL2BlockTimes.executor
- log.Infof("created new WIP L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, oldStateRoot: %s, imStateRoot: %s, used counters: %s, reserved counters: %s",
- f.wipL2Block.trackingNum, f.wipBatch.batchNumber, f.wipL2Block.deltaTimestamp, f.wipL2Block.timestamp, f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex,
- f.wipL2Block.l1InfoTreeExitRootChanged, oldIMStateRoot, f.wipL2Block.imStateRoot, f.logZKCounters(f.wipL2Block.usedZKCounters), f.logZKCounters(f.wipL2Block.reservedZKCounters))
+ log.Infof("created new wip L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, oldStateRoot: %s, imStateRoot: %s, counters: {used: %s, reserved: %s, needed: %s, high: %s}, contextId: %s",
+ f.wipL2Block.trackingNum, f.wipBatch.batchNumber, f.wipL2Block.deltaTimestamp, f.wipL2Block.timestamp, f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, f.wipL2Block.l1InfoTreeExitRootChanged, oldIMStateRoot, f.wipL2Block.imStateRoot,
+ f.logZKCounters(f.wipL2Block.usedZKCountersOnNew), f.logZKCounters(f.wipL2Block.usedZKCountersOnNew), f.logZKCounters(f.wipL2Block.reservedZKCountersOnNew), f.logZKCounters(f.wipBatch.imHighReservedZKCounters), contextId)
}
// executeNewWIPL2Block executes an empty L2 Block in the executor and returns the batch response from the executor
-func (f *finalizer) executeNewWIPL2Block(ctx context.Context) (*state.ProcessBatchResponse, error) {
+func (f *finalizer) executeNewWIPL2Block(ctx context.Context) (*state.ProcessBatchResponse, string, error) {
batchRequest := state.ProcessRequest{
BatchNumber: f.wipBatch.batchNumber,
OldStateRoot: f.wipBatch.imStateRoot,
@@ -600,22 +711,38 @@ func (f *finalizer) executeNewWIPL2Block(ctx context.Context) (*state.ProcessBat
}
executorTime := time.Now()
- batchResponse, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, false)
+ batchResponse, contextId, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, false)
f.wipL2Block.metrics.newL2BlockTimes.executor = time.Since(executorTime)
if err != nil {
- return nil, err
+ return nil, contextId, err
}
if batchResponse.ExecutorError != nil {
- return nil, ErrExecutorError
+ return nil, contextId, ErrExecutorError
}
if batchResponse.IsRomOOCError {
- return nil, ErrProcessBatchOOC
+ return nil, contextId, ErrProcessBatchOOC
}
- return batchResponse, nil
+ return batchResponse, contextId, nil
+}
+
+func (f *finalizer) scheduleNextStateRootSync() {
+ f.nextStateRootSync = time.Now().Add(f.cfg.StateRootSyncInterval.Duration)
+}
+
+func (f *finalizer) waitPendingL2Blocks() {
+ // Wait until all L2 blocks are processed/discarded
+ startWait := time.Now()
+ f.pendingL2BlocksToProcessWG.Wait()
+ log.Debugf("waiting for pending L2 blocks to be processed took: %v", time.Since(startWait))
+
+ // Wait until all L2 blocks are stored
+ startWait = time.Now()
+ f.pendingL2BlocksToStoreWG.Wait()
+ log.Debugf("waiting for pending L2 blocks to be stored took: %v", time.Since(startWait))
}
func (f *finalizer) dumpL2Block(l2Block *L2Block) {
@@ -628,12 +755,12 @@ func (f *finalizer) dumpL2Block(l2Block *L2Block) {
sLog := ""
for i, tx := range l2Block.transactions {
- sLog += fmt.Sprintf(" tx[%d] hash: %s, from: %s, nonce: %d, gas: %d, gasPrice: %d, bytes: %d, egpPct: %d, used counters: %s, reserved counters: %s\n",
+ sLog += fmt.Sprintf(" tx[%d] hash: %s, from: %s, nonce: %d, gas: %d, gasPrice: %d, bytes: %d, egpPct: %d, countersOnNew: {used: %s, reserved: %s}\n",
i, tx.HashStr, tx.FromStr, tx.Nonce, tx.Gas, tx.GasPrice, tx.Bytes, tx.EGPPercentage, f.logZKCounters(tx.UsedZKCounters), f.logZKCounters(tx.ReservedZKCounters))
}
- log.Infof("DUMP L2 block [%d], timestamp: %d, deltaTimestamp: %d, imStateRoot: %s, l1InfoTreeIndex: %d, bytes: %d, used counters: %s, reserved counters: %s\n%s",
+ log.Infof("dump L2 block [%d], timestamp: %d, deltaTimestamp: %d, imStateRoot: %s, l1InfoTreeIndex: %d, bytes: %d, used counters: %s, reserved counters: %s\n%s",
l2Block.trackingNum, l2Block.timestamp, l2Block.deltaTimestamp, l2Block.imStateRoot, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, l2Block.bytes,
- f.logZKCounters(l2Block.usedZKCounters), f.logZKCounters(l2Block.reservedZKCounters), sLog)
+ f.logZKCounters(l2Block.usedZKCountersOnNew), f.logZKCounters(l2Block.reservedZKCountersOnNew), sLog)
sLog = ""
if blockResp != nil {
@@ -643,7 +770,7 @@ func (f *finalizer) dumpL2Block(l2Block *L2Block) {
txResp.EffectivePercentage, txResp.HasGaspriceOpcode, txResp.HasBalanceOpcode)
}
- log.Infof("DUMP L2 block %d [%d] response, timestamp: %d, parentHash: %s, coinbase: %s, ger: %s, blockHashL1: %s, gasUsed: %d, blockInfoRoot: %s, blockHash: %s, used counters: %s, reserved counters: %s\n%s",
+ log.Infof("dump L2 block %d [%d] response, timestamp: %d, parentHash: %s, coinbase: %s, ger: %s, blockHashL1: %s, gasUsed: %d, blockInfoRoot: %s, blockHash: %s, counters: {used: %s, reserved: %s}\n%s",
blockResp.BlockNumber, l2Block.trackingNum, blockResp.Timestamp, blockResp.ParentHash, blockResp.Coinbase, blockResp.GlobalExitRoot, blockResp.BlockHashL1,
blockResp.GasUsed, blockResp.BlockInfoRoot, blockResp.BlockHash, f.logZKCounters(l2Block.batchResponse.UsedZkCounters), f.logZKCounters(l2Block.batchResponse.ReservedZkCounters), sLog)
}
diff --git a/sequencer/metrics.go b/sequencer/metrics.go
index 2be977e23e..5481587399 100644
--- a/sequencer/metrics.go
+++ b/sequencer/metrics.go
@@ -6,6 +6,7 @@ import (
"time"
)
+// SEQUENTIAL L2 BLOCK PROCESSING
// |-----------------------------------------------------------------------------| -> totalTime
// |------------| |-------------------------| -> transactionsTime
// |-newL2Block-|----tx 1----| |---tx 2---|-----tx 3-----| |-----l2Block-----|
@@ -14,6 +15,20 @@ import (
// idle | |iiii| | |ii| | -> idleTime
//
+// PARALLEL L2 BLOCK PROCESSING
+// |---------------------------------------------------------------------------------------------| -> totalTime
+// |-----------------------L2 block 1-----------------------| |-----------L2 block 2------------|
+// |------------| |-------------------------| |--------------------| -> transactionsTime
+// |-newL2Block-|----tx 1----| |---tx 2---|-----tx 3-----| |-newL2Block-|--tx 4---|---tx 5---|
+// sequencer |sssss ss|sss ss| |sss ss|sss ss| |sssss ss|ss ss|sss ss| -> sequencerTime
+// executor | xxxxx | xxxxxxx | | xxxxx | xxxxxxxxx | | xxxxx | xxxxxx | xxxxx | -> executorTime
+// idle | |iiii| | |ii| | -> idleTime
+
+// | -> L2 block 1 |
+// seq-l2block | |ssss ss|
+// exe-l2block | | xxxxxxxxxxx |
+//
+
type processTimes struct {
sequencer time.Duration
executor time.Duration
@@ -41,9 +56,11 @@ type metrics struct {
newL2BlockTimes processTimes
transactionsTimes processTimes
l2BlockTimes processTimes
+ waitl2BlockTime time.Duration
gas uint64
estimatedTxsPerSec float64
estimatedGasPerSec uint64
+ sequential bool
}
func (m *metrics) sub(mSub metrics) {
@@ -53,6 +70,7 @@ func (m *metrics) sub(mSub metrics) {
m.newL2BlockTimes.sub(mSub.newL2BlockTimes)
m.transactionsTimes.sub(mSub.transactionsTimes)
m.l2BlockTimes.sub(mSub.l2BlockTimes)
+ m.waitl2BlockTime -= mSub.waitl2BlockTime
m.gas -= mSub.gas
}
@@ -63,32 +81,57 @@ func (m *metrics) sumUp(mSumUp metrics) {
m.newL2BlockTimes.sumUp(mSumUp.newL2BlockTimes)
m.transactionsTimes.sumUp(mSumUp.transactionsTimes)
m.l2BlockTimes.sumUp(mSumUp.l2BlockTimes)
+ m.waitl2BlockTime += mSumUp.waitl2BlockTime
m.gas += mSumUp.gas
}
func (m *metrics) executorTime() time.Duration {
- return m.newL2BlockTimes.executor + m.transactionsTimes.executor + m.l2BlockTimes.executor
+ if m.sequential {
+ return m.newL2BlockTimes.executor + m.transactionsTimes.executor + m.l2BlockTimes.executor
+ } else {
+ return m.newL2BlockTimes.executor + m.transactionsTimes.executor + m.waitl2BlockTime
+ }
}
func (m *metrics) sequencerTime() time.Duration {
- return m.newL2BlockTimes.sequencer + m.transactionsTimes.sequencer + m.l2BlockTimes.sequencer
+ if m.sequential {
+ return m.newL2BlockTimes.sequencer + m.transactionsTimes.sequencer + m.l2BlockTimes.sequencer
+ } else {
+ return m.newL2BlockTimes.sequencer + m.transactionsTimes.sequencer
+ }
}
func (m *metrics) totalTime() time.Duration {
- return m.newL2BlockTimes.total() + m.transactionsTimes.total() + m.l2BlockTimes.total() + m.idleTime
+ if m.sequential {
+ return m.newL2BlockTimes.total() + m.transactionsTimes.total() + m.l2BlockTimes.total() + m.idleTime
+ } else {
+ return m.newL2BlockTimes.total() + m.transactionsTimes.total() + m.waitl2BlockTime + m.idleTime
+ }
}
-func (m *metrics) close(createdAt time.Time, l2BlockTxsCount int64) {
+func (m *metrics) close(createdAt time.Time, l2BlockTxsCount int64, sequential bool) {
// Compute pending fields
m.closedAt = time.Now()
totalTime := time.Since(createdAt)
+ m.sequential = sequential
m.l2BlockTxsCount = l2BlockTxsCount
- m.transactionsTimes.sequencer = totalTime - m.idleTime - m.newL2BlockTimes.total() - m.transactionsTimes.executor - m.l2BlockTimes.total()
+
+ if m.sequential {
+ m.transactionsTimes.sequencer = totalTime - m.idleTime - m.newL2BlockTimes.total() - m.transactionsTimes.executor - m.l2BlockTimes.total()
+ } else {
+ m.transactionsTimes.sequencer = totalTime - m.idleTime - m.newL2BlockTimes.total() - m.transactionsTimes.executor - m.waitl2BlockTime
+ }
// Compute performance
if m.processedTxsCount > 0 {
- // timePerTxuS is the average time spent per tx. This includes the l2Block time since the processing time of this section is proportional to the number of txs
- timePerTxuS := (m.transactionsTimes.total() + m.l2BlockTimes.total()).Microseconds() / m.processedTxsCount
+ var timePerTxuS int64
+ if m.sequential {
+ // timePerTxuS is the average time spent per tx. This includes the l2Block time since the processing time of this section is proportional to the number of txs
+ timePerTxuS = (m.transactionsTimes.total() + m.l2BlockTimes.total()).Microseconds() / m.processedTxsCount
+ } else {
+ // timePerTxuS is the average time spent per tx. This includes the waitl2Block
+ timePerTxuS = (m.transactionsTimes.total() + m.waitl2BlockTime).Microseconds() / m.processedTxsCount
+ }
// estimatedTxs is the number of transactions that we estimate could have been processed in the block
estimatedTxs := float64(totalTime.Microseconds()-m.newL2BlockTimes.total().Microseconds()) / float64(timePerTxuS)
// estimatedTxxPerSec is the estimated transactions per second (rounded to 2 decimal digits)
@@ -102,8 +145,8 @@ func (m *metrics) close(createdAt time.Time, l2BlockTxsCount int64) {
}
func (m *metrics) log() string {
- return fmt.Sprintf("blockTxs: %d, txs: %d, gas: %d, txsSec: %.2f, gasSec: %d, time: {total: %d, idle: %d, sequencer: {total: %d, newL2Block: %d, txs: %d, l2Block: %d}, executor: {total: %d, newL2Block: %d, txs: %d, l2Block: %d}",
- m.l2BlockTxsCount, m.processedTxsCount, m.gas, m.estimatedTxsPerSec, m.estimatedGasPerSec, m.totalTime().Microseconds(), m.idleTime.Microseconds(),
+ return fmt.Sprintf("blockTxs: %d, txs: %d, gas: %d, txsSec: %.2f, gasSec: %d, time: {total: %d, idle: %d, waitL2Block: %d, sequencer: {total: %d, newL2Block: %d, txs: %d, l2Block: %d}, executor: {total: %d, newL2Block: %d, txs: %d, l2Block: %d}",
+ m.l2BlockTxsCount, m.processedTxsCount, m.gas, m.estimatedTxsPerSec, m.estimatedGasPerSec, m.totalTime().Microseconds(), m.idleTime.Microseconds(), m.waitl2BlockTime.Microseconds(),
m.sequencerTime().Microseconds(), m.newL2BlockTimes.sequencer.Microseconds(), m.transactionsTimes.sequencer.Microseconds(), m.l2BlockTimes.sequencer.Microseconds(),
m.executorTime().Microseconds(), m.newL2BlockTimes.executor.Microseconds(), m.transactionsTimes.executor.Microseconds(), m.l2BlockTimes.executor.Microseconds())
}
diff --git a/sequencer/mock_state.go b/sequencer/mock_state.go
index 2a53e28c25..f7f3861d4b 100644
--- a/sequencer/mock_state.go
+++ b/sequencer/mock_state.go
@@ -982,7 +982,7 @@ func (_m *StateMock) OpenWIPBatch(ctx context.Context, batch state.Batch, dbTx p
}
// ProcessBatchV2 provides a mock function with given fields: ctx, request, updateMerkleTree
-func (_m *StateMock) ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error) {
+func (_m *StateMock) ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, string, error) {
ret := _m.Called(ctx, request, updateMerkleTree)
if len(ret) == 0 {
@@ -990,8 +990,9 @@ func (_m *StateMock) ProcessBatchV2(ctx context.Context, request state.ProcessRe
}
var r0 *state.ProcessBatchResponse
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, error)); ok {
+ var r1 string
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, string, error)); ok {
return rf(ctx, request, updateMerkleTree)
}
if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) *state.ProcessBatchResponse); ok {
@@ -1002,13 +1003,19 @@ func (_m *StateMock) ProcessBatchV2(ctx context.Context, request state.ProcessRe
}
}
- if rf, ok := ret.Get(1).(func(context.Context, state.ProcessRequest, bool) error); ok {
+ if rf, ok := ret.Get(1).(func(context.Context, state.ProcessRequest, bool) string); ok {
r1 = rf(ctx, request, updateMerkleTree)
} else {
- r1 = ret.Error(1)
+ r1 = ret.Get(1).(string)
}
- return r0, r1
+ if rf, ok := ret.Get(2).(func(context.Context, state.ProcessRequest, bool) error); ok {
+ r2 = rf(ctx, request, updateMerkleTree)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
}
// StoreL2Block provides a mock function with given fields: ctx, batchNumber, l2Block, txsEGPLog, dbTx
diff --git a/sequencer/mock_worker.go b/sequencer/mock_worker.go
index 215cd08c8e..3ff546f724 100644
--- a/sequencer/mock_worker.go
+++ b/sequencer/mock_worker.go
@@ -25,11 +25,6 @@ func (_m *WorkerMock) AddForcedTx(txHash common.Hash, addr common.Address) {
_m.Called(txHash, addr)
}
-// AddPendingTxToStore provides a mock function with given fields: txHash, addr
-func (_m *WorkerMock) AddPendingTxToStore(txHash common.Hash, addr common.Address) {
- _m.Called(txHash, addr)
-}
-
// AddTxTracker provides a mock function with given fields: ctx, txTracker
func (_m *WorkerMock) AddTxTracker(ctx context.Context, txTracker *TxTracker) (*TxTracker, error) {
ret := _m.Called(ctx, txTracker)
@@ -65,19 +60,19 @@ func (_m *WorkerMock) DeleteForcedTx(txHash common.Hash, addr common.Address) {
_m.Called(txHash, addr)
}
-// DeletePendingTxToStore provides a mock function with given fields: txHash, addr
-func (_m *WorkerMock) DeletePendingTxToStore(txHash common.Hash, addr common.Address) {
- _m.Called(txHash, addr)
-}
-
// DeleteTx provides a mock function with given fields: txHash, from
func (_m *WorkerMock) DeleteTx(txHash common.Hash, from common.Address) {
_m.Called(txHash, from)
}
-// GetBestFittingTx provides a mock function with given fields: resources
-func (_m *WorkerMock) GetBestFittingTx(resources state.BatchResources) (*TxTracker, error) {
- ret := _m.Called(resources)
+// DeleteTxPendingToStore provides a mock function with given fields: txHash, addr
+func (_m *WorkerMock) DeleteTxPendingToStore(txHash common.Hash, addr common.Address) {
+ _m.Called(txHash, addr)
+}
+
+// GetBestFittingTx provides a mock function with given fields: remainingResources, highReservedCounters
+func (_m *WorkerMock) GetBestFittingTx(remainingResources state.BatchResources, highReservedCounters state.ZKCounters) (*TxTracker, error) {
+ ret := _m.Called(remainingResources, highReservedCounters)
if len(ret) == 0 {
panic("no return value specified for GetBestFittingTx")
@@ -85,19 +80,19 @@ func (_m *WorkerMock) GetBestFittingTx(resources state.BatchResources) (*TxTrack
var r0 *TxTracker
var r1 error
- if rf, ok := ret.Get(0).(func(state.BatchResources) (*TxTracker, error)); ok {
- return rf(resources)
+ if rf, ok := ret.Get(0).(func(state.BatchResources, state.ZKCounters) (*TxTracker, error)); ok {
+ return rf(remainingResources, highReservedCounters)
}
- if rf, ok := ret.Get(0).(func(state.BatchResources) *TxTracker); ok {
- r0 = rf(resources)
+ if rf, ok := ret.Get(0).(func(state.BatchResources, state.ZKCounters) *TxTracker); ok {
+ r0 = rf(remainingResources, highReservedCounters)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*TxTracker)
}
}
- if rf, ok := ret.Get(1).(func(state.BatchResources) error); ok {
- r1 = rf(resources)
+ if rf, ok := ret.Get(1).(func(state.BatchResources, state.ZKCounters) error); ok {
+ r1 = rf(remainingResources, highReservedCounters)
} else {
r1 = ret.Error(1)
}
@@ -105,6 +100,11 @@ func (_m *WorkerMock) GetBestFittingTx(resources state.BatchResources) (*TxTrack
return r0, r1
}
+// MoveTxPendingToStore provides a mock function with given fields: txHash, addr
+func (_m *WorkerMock) MoveTxPendingToStore(txHash common.Hash, addr common.Address) {
+ _m.Called(txHash, addr)
+}
+
// MoveTxToNotReady provides a mock function with given fields: txHash, from, actualNonce, actualBalance
func (_m *WorkerMock) MoveTxToNotReady(txHash common.Hash, from common.Address, actualNonce *uint64, actualBalance *big.Int) []*TxTracker {
ret := _m.Called(txHash, from, actualNonce, actualBalance)
@@ -155,6 +155,38 @@ func (_m *WorkerMock) NewTxTracker(tx types.Transaction, usedZKcounters state.ZK
return r0, r1
}
+// RestoreTxsPendingToStore provides a mock function with given fields: ctx
+func (_m *WorkerMock) RestoreTxsPendingToStore(ctx context.Context) ([]*TxTracker, []*TxTracker) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RestoreTxsPendingToStore")
+ }
+
+ var r0 []*TxTracker
+ var r1 []*TxTracker
+ if rf, ok := ret.Get(0).(func(context.Context) ([]*TxTracker, []*TxTracker)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) []*TxTracker); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*TxTracker)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) []*TxTracker); ok {
+ r1 = rf(ctx)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).([]*TxTracker)
+ }
+ }
+
+ return r0, r1
+}
+
// UpdateAfterSingleSuccessfulTxExecution provides a mock function with given fields: from, touchedAddresses
func (_m *WorkerMock) UpdateAfterSingleSuccessfulTxExecution(from common.Address, touchedAddresses map[common.Address]*state.InfoReadWrite) []*TxTracker {
ret := _m.Called(from, touchedAddresses)
diff --git a/sequencer/sequencer.go b/sequencer/sequencer.go
index 22201776ce..b79ad26c17 100644
--- a/sequencer/sequencer.go
+++ b/sequencer/sequencer.go
@@ -75,7 +75,7 @@ func (s *Sequencer) Start(ctx context.Context) {
err := s.pool.MarkWIPTxsAsPending(ctx)
if err != nil {
- log.Fatalf("failed to mark WIP txs as pending, error: %v", err)
+ log.Fatalf("failed to mark wip txs as pending, error: %v", err)
}
// Start stream server if enabled
@@ -93,8 +93,6 @@ func (s *Sequencer) Start(ctx context.Context) {
s.updateDataStreamerFile(ctx, s.cfg.StreamServer.ChainID)
}
- go s.loadFromPool(ctx)
-
if s.streamServer != nil {
go s.sendDataToStreamer(s.cfg.StreamServer.ChainID)
}
@@ -104,6 +102,8 @@ func (s *Sequencer) Start(ctx context.Context) {
s.finalizer = newFinalizer(s.cfg.Finalizer, s.poolCfg, s.worker, s.pool, s.stateIntf, s.etherman, s.address, s.isSynced, s.batchCfg.Constraints, s.eventLog, s.streamServer, s.workerReadyTxsCond, s.dataToStream)
go s.finalizer.Start(ctx)
+ go s.loadFromPool(ctx)
+
go s.deleteOldPoolTxs(ctx)
go s.expireOldWorkerTxs(ctx)
@@ -147,6 +147,11 @@ func (s *Sequencer) updateDataStreamerFile(ctx context.Context, chainID uint64)
func (s *Sequencer) deleteOldPoolTxs(ctx context.Context) {
for {
time.Sleep(s.cfg.DeletePoolTxsCheckInterval.Duration)
+
+ if s.finalizer.haltFinalizer.Load() {
+ return
+ }
+
log.Infof("trying to get txs to delete from the pool...")
earliestTxHash, err := s.pool.GetEarliestProcessedTx(ctx)
if err != nil {
@@ -181,6 +186,11 @@ func (s *Sequencer) deleteOldPoolTxs(ctx context.Context) {
func (s *Sequencer) expireOldWorkerTxs(ctx context.Context) {
for {
time.Sleep(s.cfg.TxLifetimeCheckInterval.Duration)
+
+ if s.finalizer.haltFinalizer.Load() {
+ return
+ }
+
txTrackers := s.worker.ExpireTransactions(s.cfg.TxLifetimeMax.Duration)
failedReason := ErrExpiredTransaction.Error()
for _, txTracker := range txTrackers {
@@ -195,6 +205,10 @@ func (s *Sequencer) expireOldWorkerTxs(ctx context.Context) {
// loadFromPool keeps loading transactions from the pool
func (s *Sequencer) loadFromPool(ctx context.Context) {
for {
+ if s.finalizer.haltFinalizer.Load() {
+ return
+ }
+
poolTransactions, err := s.pool.GetNonWIPPendingTxs(ctx)
if err != nil && err != pool.ErrNotFound {
log.Errorf("error loading txs from pool, error: %v", err)
@@ -257,7 +271,7 @@ func (s *Sequencer) sendDataToStreamer(chainID uint64) {
l2Block := data
//TODO: remove this log
- log.Infof("start atomic op for l2block %d", l2Block.L2BlockNumber)
+ log.Infof("[ds-debug] start atomic op for l2block %d", l2Block.L2BlockNumber)
err = s.streamServer.StartAtomicOp()
if err != nil {
log.Errorf("failed to start atomic op for l2block %d, error: %v ", l2Block.L2BlockNumber, err)
@@ -270,7 +284,7 @@ func (s *Sequencer) sendDataToStreamer(chainID uint64) {
}
//TODO: remove this log
- log.Infof("add stream bookmark for l2block %d", l2Block.L2BlockNumber)
+ log.Infof("[ds-debug] add stream bookmark for l2block %d", l2Block.L2BlockNumber)
_, err = s.streamServer.AddStreamBookmark(bookMark.Encode())
if err != nil {
log.Errorf("failed to add stream bookmark for l2block %d, error: %v", l2Block.L2BlockNumber, err)
@@ -286,7 +300,7 @@ func (s *Sequencer) sendDataToStreamer(chainID uint64) {
}
//TODO: remove this log
- log.Infof("get previous l2block %d", l2Block.L2BlockNumber-1)
+ log.Infof("[ds-debug] get previous l2block %d", l2Block.L2BlockNumber-1)
previousL2BlockEntry, err := s.streamServer.GetFirstEventAfterBookmark(bookMark.Encode())
if err != nil {
log.Errorf("failed to get previous l2block %d, error: %v", l2Block.L2BlockNumber-1, err)
@@ -310,7 +324,7 @@ func (s *Sequencer) sendDataToStreamer(chainID uint64) {
}
//TODO: remove this log
- log.Infof("add l2blockStart stream entry for l2block %d", l2Block.L2BlockNumber)
+ log.Infof("[ds-debug] add l2blockStart stream entry for l2block %d", l2Block.L2BlockNumber)
_, err = s.streamServer.AddStreamEntry(state.EntryTypeL2BlockStart, blockStart.Encode())
if err != nil {
log.Errorf("failed to add stream entry for l2block %d, error: %v", l2Block.L2BlockNumber, err)
@@ -318,7 +332,7 @@ func (s *Sequencer) sendDataToStreamer(chainID uint64) {
}
//TODO: remove this log
- log.Infof("adding l2tx stream entries for l2block %d", l2Block.L2BlockNumber)
+ log.Infof("[ds-debug] adding l2tx stream entries for l2block %d", l2Block.L2BlockNumber)
for _, l2Transaction := range l2Block.Txs {
_, err = s.streamServer.AddStreamEntry(state.EntryTypeL2Tx, l2Transaction.Encode())
if err != nil {
@@ -334,7 +348,7 @@ func (s *Sequencer) sendDataToStreamer(chainID uint64) {
}
//TODO: remove this log
- log.Infof("add l2blockEnd stream entry for l2block %d", l2Block.L2BlockNumber)
+ log.Infof("[ds-debug] add l2blockEnd stream entry for l2block %d", l2Block.L2BlockNumber)
_, err = s.streamServer.AddStreamEntry(state.EntryTypeL2BlockEnd, blockEnd.Encode())
if err != nil {
log.Errorf("failed to add stream entry for l2block %d, error: %v", l2Block.L2BlockNumber, err)
@@ -342,7 +356,7 @@ func (s *Sequencer) sendDataToStreamer(chainID uint64) {
}
//TODO: remove this log
- log.Infof("commit atomic op for l2block %d", l2Block.L2BlockNumber)
+ log.Infof("[ds-debug] commit atomic op for l2block %d", l2Block.L2BlockNumber)
err = s.streamServer.CommitAtomicOp()
if err != nil {
log.Errorf("failed to commit atomic op for l2block %d, error: %v ", l2Block.L2BlockNumber, err)
@@ -350,7 +364,7 @@ func (s *Sequencer) sendDataToStreamer(chainID uint64) {
}
//TODO: remove this log
- log.Infof("l2block %d sent to datastream", l2Block.L2BlockNumber)
+ log.Infof("[ds-debug] l2block %d sent to datastream", l2Block.L2BlockNumber)
// Stream a bookmark
case state.DSBookMark:
diff --git a/sequencer/waitgroupcount.go b/sequencer/waitgroupcount.go
new file mode 100644
index 0000000000..436f088514
--- /dev/null
+++ b/sequencer/waitgroupcount.go
@@ -0,0 +1,29 @@
+package sequencer
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+// WaitGroupCount implements a sync.WaitGroup that also has a field to get the WaitGroup counter
+type WaitGroupCount struct {
+ sync.WaitGroup
+ count atomic.Int32
+}
+
+// Add adds delta to the WaitGroup and increase the counter
+func (wg *WaitGroupCount) Add(delta int) {
+ wg.count.Add(int32(delta))
+ wg.WaitGroup.Add(delta)
+}
+
+// Done decrements the WaitGroup and counter by one
+func (wg *WaitGroupCount) Done() {
+ wg.count.Add(-1)
+ wg.WaitGroup.Done()
+}
+
+// Count returns the counter of the WaitGroup
+func (wg *WaitGroupCount) Count() int {
+ return int(wg.count.Load())
+}
diff --git a/sequencer/worker.go b/sequencer/worker.go
index 0d0b378872..c6be5ed5ab 100644
--- a/sequencer/worker.go
+++ b/sequencer/worker.go
@@ -19,7 +19,9 @@ import (
type Worker struct {
pool map[string]*addrQueue
txSortedList *txSortedList
- workerMutex sync.Mutex
+ pendingToStore []*TxTracker
+ reorgedTxs []*TxTracker
+ workerMutex *sync.Mutex
state stateInterface
batchConstraints state.BatchConstraintsCfg
readyTxsCond *timeoutCond
@@ -30,7 +32,9 @@ type Worker struct {
func NewWorker(state stateInterface, constraints state.BatchConstraintsCfg, readyTxsCond *timeoutCond) *Worker {
w := Worker{
pool: make(map[string]*addrQueue),
+ workerMutex: new(sync.Mutex),
txSortedList: newTxSortedList(),
+ pendingToStore: []*TxTracker{},
state: state,
batchConstraints: constraints,
readyTxsCond: readyTxsCond,
@@ -46,31 +50,36 @@ func (w *Worker) NewTxTracker(tx types.Transaction, usedZKCounters state.ZKCount
// AddTxTracker adds a new Tx to the Worker
func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (replacedTx *TxTracker, dropReason error) {
- w.workerMutex.Lock()
+ return w.addTxTracker(ctx, tx, w.workerMutex)
+}
+
+// addTxTracker adds a new Tx to the Worker
+func (w *Worker) addTxTracker(ctx context.Context, tx *TxTracker, mutex *sync.Mutex) (replacedTx *TxTracker, dropReason error) {
+ mutexLock(mutex)
// Make sure the IP is valid.
if tx.IP != "" && !pool.IsValidIP(tx.IP) {
- w.workerMutex.Unlock()
+ mutexUnlock(mutex)
return nil, pool.ErrInvalidIP
}
// Make sure the transaction's reserved ZKCounters are within the constraints.
if !w.batchConstraints.IsWithinConstraints(tx.ReservedZKCounters) {
log.Errorf("outOfCounters error (node level) for tx %s", tx.Hash.String())
- w.workerMutex.Unlock()
+ mutexUnlock(mutex)
return nil, pool.ErrOutOfCounters
}
if (w.wipTx != nil) && (w.wipTx.FromStr == tx.FromStr) && (w.wipTx.Nonce == tx.Nonce) {
log.Infof("adding tx %s (nonce %d) from address %s that matches current processing tx %s (nonce %d), rejecting it as duplicated nonce", tx.Hash, tx.Nonce, tx.From, w.wipTx.Hash, w.wipTx.Nonce)
- w.workerMutex.Unlock()
+ mutexUnlock(mutex)
return nil, ErrDuplicatedNonce
}
addr, found := w.pool[tx.FromStr]
if !found {
// Unlock the worker to let execute other worker functions while creating the new AddrQueue
- w.workerMutex.Unlock()
+ mutexUnlock(mutex)
root, err := w.state.GetLastStateRoot(ctx, nil)
if err != nil {
@@ -94,7 +103,7 @@ func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (replacedTx *T
addr = newAddrQueue(tx.From, nonce.Uint64(), balance)
// Lock again the worker
- w.workerMutex.Lock()
+ mutexLock(mutex)
w.pool[tx.FromStr] = addr
log.Debugf("new addrQueue %s created (nonce: %d, balance: %s)", tx.FromStr, nonce.Uint64(), balance.String())
@@ -106,7 +115,7 @@ func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (replacedTx *T
newReadyTx, prevReadyTx, repTx, dropReason = addr.addTx(tx)
if dropReason != nil {
log.Infof("dropped tx %s from addrQueue %s, reason: %s", tx.HashStr, tx.FromStr, dropReason.Error())
- w.workerMutex.Unlock()
+ mutexUnlock(mutex)
return repTx, dropReason
}
@@ -124,7 +133,7 @@ func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (replacedTx *T
log.Debugf("tx %s (nonce: %d, gasPrice: %d, addr: %s) has been replaced", repTx.HashStr, repTx.Nonce, repTx.GasPrice, tx.FromStr)
}
- w.workerMutex.Unlock()
+ mutexUnlock(mutex)
return repTx, nil
}
@@ -199,6 +208,28 @@ func (w *Worker) MoveTxToNotReady(txHash common.Hash, from common.Address, actua
return txsToDelete
}
+// deleteTx deletes a regular tx from the addrQueue
+func (w *Worker) deleteTx(txHash common.Hash, addr common.Address) *TxTracker {
+ addrQueue, found := w.pool[addr.String()]
+ if found {
+ deletedTx, isReady := addrQueue.deleteTx(txHash)
+ if deletedTx != nil {
+ if isReady {
+ log.Debugf("tx %s deleted from TxSortedList", deletedTx.Hash)
+ w.txSortedList.delete(deletedTx)
+ }
+ } else {
+ log.Warnf("tx %s not found in addrQueue %s", txHash, addr)
+ }
+
+ return deletedTx
+ } else {
+ log.Warnf("addrQueue %s not found", addr)
+
+ return nil
+ }
+}
+
// DeleteTx deletes a regular tx from the addrQueue
func (w *Worker) DeleteTx(txHash common.Hash, addr common.Address) {
w.workerMutex.Lock()
@@ -206,16 +237,7 @@ func (w *Worker) DeleteTx(txHash common.Hash, addr common.Address) {
w.resetWipTx(txHash)
- addrQueue, found := w.pool[addr.String()]
- if found {
- deletedReadyTx := addrQueue.deleteTx(txHash)
- if deletedReadyTx != nil {
- log.Debugf("tx %s deleted from TxSortedList", deletedReadyTx.Hash.String())
- w.txSortedList.delete(deletedReadyTx)
- }
- } else {
- log.Warnf("addrQueue %s not found", addr.String())
- }
+ w.deleteTx(txHash, addr)
}
// DeleteForcedTx deletes a forced tx from the addrQueue
@@ -257,55 +279,153 @@ func (w *Worker) UpdateTxZKCounters(txHash common.Hash, addr common.Address, use
}
}
-// AddPendingTxToStore adds a tx to the addrQueue list of pending txs to store in the DB (trusted state)
-func (w *Worker) AddPendingTxToStore(txHash common.Hash, addr common.Address) {
+// MoveTxPendingToStore moves a tx to pending to store list
+func (w *Worker) MoveTxPendingToStore(txHash common.Hash, addr common.Address) {
+ // TODO: Add test for this function
+
w.workerMutex.Lock()
defer w.workerMutex.Unlock()
- addrQueue, found := w.pool[addr.String()]
+ // Delete from worker pool and addrQueue
+ deletedTx := w.deleteTx(txHash, addr)
- if found {
+ // Add tx to pending to store list in worker
+ if deletedTx != nil {
+ w.pendingToStore = append(w.pendingToStore, deletedTx)
+ log.Debugf("tx %s add to pendingToStore, order: %d", deletedTx.Hash, len(w.pendingToStore))
+ } else {
+ log.Warnf("tx %s not found when moving it to pending to store, address: %s", txHash, addr)
+ }
+
+ // Add tx to pending to store list in addrQueue
+ if addrQueue, found := w.pool[addr.String()]; found {
addrQueue.addPendingTxToStore(txHash)
} else {
- log.Warnf("addrQueue %s not found", addr.String())
+ log.Warnf("addrQueue %s not found when moving tx %s to pending to store", addr, txHash)
}
}
+// RestoreTxsPendingToStore restores the txs pending to store and move them to the worker pool to be processed again
+func (w *Worker) RestoreTxsPendingToStore(ctx context.Context) ([]*TxTracker, []*TxTracker) {
+ // TODO: Add test for this function
+ // TODO: We need to process restored txs in the same order we processed initially
+
+ w.workerMutex.Lock()
+
+ addrList := make(map[common.Address]struct{})
+ txsList := []*TxTracker{}
+ w.reorgedTxs = []*TxTracker{}
+
+ // Add txs pending to store to the list that will include all the txs to reprocess again
+ // Add txs to the reorgedTxs list to get them in the order which they were processed before the L2 block reorg
+ // Get also the addresses of theses txs since we will need to recreate them
+ for _, txToStore := range w.pendingToStore {
+ txsList = append(txsList, txToStore)
+ w.reorgedTxs = append(w.reorgedTxs, txToStore)
+ addrList[txToStore.From] = struct{}{}
+ }
+
+ // Add txs from addrQueues that will be recreated and delete addrQueues from the pool list
+ for addr := range addrList {
+ addrQueue, found := w.pool[addr.String()]
+ if found {
+ txsList = append(txsList, addrQueue.getTransactions()...)
+ if addrQueue.readyTx != nil {
+ // Delete readyTx from the txSortedList
+ w.txSortedList.delete(addrQueue.readyTx)
+ }
+ // Delete the addrQueue to recreate it later
+ delete(w.pool, addr.String())
+ }
+ }
+
+ // Clear pendingToStore list
+ w.pendingToStore = []*TxTracker{}
+ // Clear wip tx
+ w.wipTx = nil
+
+ for _, tx := range w.reorgedTxs {
+ log.Infof("reorged tx %s, nonce %d, from: %s", tx.Hash, tx.Nonce, tx.From)
+ }
+
+ replacedTxs := []*TxTracker{}
+ droppedTxs := []*TxTracker{}
+ // Add again in the worker the txs to restore (this will recreate addrQueues)
+ for _, restoredTx := range txsList {
+ replacedTx, dropReason := w.addTxTracker(ctx, restoredTx, nil)
+ if dropReason != nil {
+ droppedTxs = append(droppedTxs, restoredTx)
+ }
+ if replacedTx != nil {
+ droppedTxs = append(replacedTxs, restoredTx)
+ }
+ }
+
+ w.workerMutex.Unlock()
+
+ // In this scenario we shouldn't have dropped or replaced txs but we return it just in case
+ return droppedTxs, replacedTxs
+}
+
// AddForcedTx adds a forced tx to the addrQueue
func (w *Worker) AddForcedTx(txHash common.Hash, addr common.Address) {
w.workerMutex.Lock()
defer w.workerMutex.Unlock()
- addrQueue, found := w.pool[addr.String()]
-
- if found {
+ if addrQueue, found := w.pool[addr.String()]; found {
addrQueue.addForcedTx(txHash)
} else {
log.Warnf("addrQueue %s not found", addr.String())
}
}
-// DeletePendingTxToStore delete a tx from the addrQueue list of pending txs to store in the DB (trusted state)
-func (w *Worker) DeletePendingTxToStore(txHash common.Hash, addr common.Address) {
+// DeleteTxPendingToStore delete a tx from the addrQueue list of pending txs to store in the DB (trusted state)
+func (w *Worker) DeleteTxPendingToStore(txHash common.Hash, addr common.Address) {
w.workerMutex.Lock()
defer w.workerMutex.Unlock()
- addrQueue, found := w.pool[addr.String()]
+ // Delete tx from pending to store list in worker
+ found := false
+ for i, txToStore := range w.pendingToStore {
+ if txToStore.Hash == txHash {
+ found = true
+ w.pendingToStore = append(w.pendingToStore[:i], w.pendingToStore[i+1:]...)
+ }
+ }
+ if !found {
+ log.Warnf("tx %s not found when deleting it from worker pool", txHash)
+ }
- if found {
+ // Delete tx from pending to store list in addrQueue
+ if addrQueue, found := w.pool[addr.String()]; found {
addrQueue.deletePendingTxToStore(txHash)
} else {
- log.Warnf("addrQueue %s not found", addr.String())
+ log.Warnf("addrQueue %s not found when deleting pending to store tx %s", addr, txHash)
}
}
// GetBestFittingTx gets the most efficient tx that fits in the available batch resources
-func (w *Worker) GetBestFittingTx(resources state.BatchResources) (*TxTracker, error) {
+func (w *Worker) GetBestFittingTx(remainingResources state.BatchResources, highReservedCounters state.ZKCounters) (*TxTracker, error) {
w.workerMutex.Lock()
defer w.workerMutex.Unlock()
w.wipTx = nil
+ // If we are processing a L2 block reorg we return the next tx in the reorg list
+ for len(w.reorgedTxs) > 0 {
+ reorgedTx := w.reorgedTxs[0]
+ w.reorgedTxs = w.reorgedTxs[1:]
+ if addrQueue, found := w.pool[reorgedTx.FromStr]; found {
+ if addrQueue.readyTx != nil && addrQueue.readyTx.Hash == reorgedTx.Hash {
+ return reorgedTx, nil
+ } else {
+ log.Warnf("reorged tx %s is not the ready tx for addrQueue %s, this shouldn't happen", reorgedTx.Hash, reorgedTx.From)
+ }
+ } else {
+ log.Warnf("addrQueue %s for reorged tx %s not found, this shouldn't happen", reorgedTx.From, reorgedTx.Hash)
+ }
+ }
+
if w.txSortedList.len() == 0 {
return nil, ErrTransactionsListEmpty
}
@@ -334,8 +454,9 @@ func (w *Worker) GetBestFittingTx(resources state.BatchResources) (*TxTracker, e
foundMutex.RUnlock()
txCandidate := w.txSortedList.getByIndex(i)
- overflow, _ := bresources.Sub(state.BatchResources{ZKCounters: txCandidate.ReservedZKCounters, Bytes: txCandidate.Bytes})
- if overflow {
+ needed, _ := getNeededZKCounters(highReservedCounters, txCandidate.UsedZKCounters, txCandidate.ReservedZKCounters)
+ fits, _ := bresources.Fits(state.BatchResources{ZKCounters: needed, Bytes: txCandidate.Bytes})
+ if !fits {
// We don't add this Tx
continue
}
@@ -349,7 +470,7 @@ func (w *Worker) GetBestFittingTx(resources state.BatchResources) (*TxTracker, e
return
}
- }(i, resources)
+ }(i, remainingResources)
}
wg.Wait()
@@ -402,3 +523,15 @@ func (w *Worker) resetWipTx(txHash common.Hash) {
w.wipTx = nil
}
}
+
+func mutexLock(mutex *sync.Mutex) {
+ if mutex != nil {
+ mutex.Lock()
+ }
+}
+
+func mutexUnlock(mutex *sync.Mutex) {
+ if mutex != nil {
+ mutex.Unlock()
+ }
+}
diff --git a/sequencer/worker_test.go b/sequencer/worker_test.go
index a86d7a2f3f..0e2375ad37 100644
--- a/sequencer/worker_test.go
+++ b/sequencer/worker_test.go
@@ -258,7 +258,7 @@ func TestWorkerGetBestTx(t *testing.T) {
ct := 0
for {
- tx, _ := worker.GetBestFittingTx(rc)
+ tx, _ := worker.GetBestFittingTx(rc, state.ZKCounters{})
if tx != nil {
if ct >= len(expectedGetBestTx) {
t.Fatalf("Error getting more best tx than expected. Expected=%d, Actual=%d", len(expectedGetBestTx), ct+1)
diff --git a/state/batch.go b/state/batch.go
index 7cf10ebeab..f5325a08c7 100644
--- a/state/batch.go
+++ b/state/batch.go
@@ -42,11 +42,12 @@ type Batch struct {
AccInputHash common.Hash
// Timestamp (<=incaberry) -> batch time
// (>incaberry) -> minTimestamp used in batch creation, real timestamp is in virtual_batch.batch_timestamp
- Timestamp time.Time
- Transactions []types.Transaction
- GlobalExitRoot common.Hash
- ForcedBatchNum *uint64
- Resources BatchResources
+ Timestamp time.Time
+ Transactions []types.Transaction
+ GlobalExitRoot common.Hash
+ ForcedBatchNum *uint64
+ Resources BatchResources
+ HighReservedZKCounters ZKCounters
// WIP: if WIP == true is a openBatch
WIP bool
}
@@ -83,6 +84,8 @@ const (
MaxDeltaTimestampClosingReason ClosingReason = "Max delta timestamp"
// NoTxFitsClosingReason is the closing reason used when any of the txs in the pool (worker) fits in the remaining resources of the batch
NoTxFitsClosingReason ClosingReason = "No transaction fits"
+ // L2BlockReorgClonsingReason is the closing reason used when we have a L2 block reorg (unexpected error, like OOC, when processing L2 block)
+ L2BlockReorgClonsingReason ClosingReason = "L2 block reorg"
// Reason due Synchronizer
// ------------------------------------------------------------------------------------------
@@ -109,9 +112,10 @@ type ProcessingReceipt struct {
GlobalExitRoot common.Hash
AccInputHash common.Hash
// Txs []types.Transaction
- BatchL2Data []byte
- ClosingReason ClosingReason
- BatchResources BatchResources
+ BatchL2Data []byte
+ ClosingReason ClosingReason
+ BatchResources BatchResources
+ HighReservedZKCounters ZKCounters
}
// VerifiedBatch represents a VerifiedBatch
diff --git a/state/batchV2.go b/state/batchV2.go
index 9de0f39949..32f6fd856b 100644
--- a/state/batchV2.go
+++ b/state/batchV2.go
@@ -38,7 +38,7 @@ type ProcessingContextV2 struct {
}
// ProcessBatchV2 processes a batch for forkID >= ETROG
-func (s *State) ProcessBatchV2(ctx context.Context, request ProcessRequest, updateMerkleTree bool) (*ProcessBatchResponse, error) {
+func (s *State) ProcessBatchV2(ctx context.Context, request ProcessRequest, updateMerkleTree bool) (*ProcessBatchResponse, string, error) {
updateMT := uint32(cFalse)
if updateMerkleTree {
updateMT = cTrue
@@ -85,16 +85,16 @@ func (s *State) ProcessBatchV2(ctx context.Context, request ProcessRequest, upda
res, err := s.sendBatchRequestToExecutorV2(ctx, processBatchRequest, request.Caller)
if err != nil {
- return nil, err
+ return nil, "", err
}
var result *ProcessBatchResponse
result, err = s.convertToProcessBatchResponseV2(res)
if err != nil {
- return nil, err
+ return nil, "", err
}
- return result, nil
+ return result, processBatchRequest.ContextId, nil
}
// ExecuteBatchV2 is used by the synchronizer to reprocess batches to compare generated state root vs stored one
diff --git a/state/pgstatestorage/batch.go b/state/pgstatestorage/batch.go
index 5b1d1f57fd..843c725b12 100644
--- a/state/pgstatestorage/batch.go
+++ b/state/pgstatestorage/batch.go
@@ -84,7 +84,7 @@ func (p *PostgresStorage) GetVerifiedBatch(ctx context.Context, batchNumber uint
// GetLastNBatches returns the last numBatches batches.
func (p *PostgresStorage) GetLastNBatches(ctx context.Context, numBatches uint, dbTx pgx.Tx) ([]*state.Batch, error) {
- const getLastNBatchesSQL = "SELECT batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, batch_resources, wip from state.batch ORDER BY batch_num DESC LIMIT $1"
+ const getLastNBatchesSQL = "SELECT batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, batch_resources, high_reserved_counters, wip from state.batch ORDER BY batch_num DESC LIMIT $1"
e := p.getExecQuerier(dbTx)
rows, err := e.Query(ctx, getLastNBatchesSQL, numBatches)
@@ -257,7 +257,7 @@ func (p *PostgresStorage) SetInitSyncBatch(ctx context.Context, batchNumber uint
// GetBatchByNumber returns the batch with the given number.
func (p *PostgresStorage) GetBatchByNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.Batch, error) {
const getBatchByNumberSQL = `
- SELECT batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, batch_resources, wip
+ SELECT batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, batch_resources, high_reserved_counters, wip
FROM state.batch
WHERE batch_num = $1`
@@ -277,7 +277,7 @@ func (p *PostgresStorage) GetBatchByNumber(ctx context.Context, batchNumber uint
// GetBatchByTxHash returns the batch including the given tx
func (p *PostgresStorage) GetBatchByTxHash(ctx context.Context, transactionHash common.Hash, dbTx pgx.Tx) (*state.Batch, error) {
const getBatchByTxHashSQL = `
- SELECT b.batch_num, b.global_exit_root, b.local_exit_root, b.acc_input_hash, b.state_root, b.timestamp, b.coinbase, b.raw_txs_data, b.forced_batch_num, b.batch_resources, b.wip
+ SELECT b.batch_num, b.global_exit_root, b.local_exit_root, b.acc_input_hash, b.state_root, b.timestamp, b.coinbase, b.raw_txs_data, b.forced_batch_num, b.batch_resources, b.high_reserved_counters, b.wip
FROM state.transaction t, state.batch b, state.l2block l
WHERE t.hash = $1 AND l.block_num = t.l2_block_num AND b.batch_num = l.batch_num`
@@ -296,7 +296,7 @@ func (p *PostgresStorage) GetBatchByTxHash(ctx context.Context, transactionHash
// GetBatchByL2BlockNumber returns the batch related to the l2 block accordingly to the provided l2 block number.
func (p *PostgresStorage) GetBatchByL2BlockNumber(ctx context.Context, l2BlockNumber uint64, dbTx pgx.Tx) (*state.Batch, error) {
const getBatchByL2BlockNumberSQL = `
- SELECT bt.batch_num, bt.global_exit_root, bt.local_exit_root, bt.acc_input_hash, bt.state_root, bt.timestamp, bt.coinbase, bt.raw_txs_data, bt.forced_batch_num, bt.batch_resources, bt.wip
+ SELECT bt.batch_num, bt.global_exit_root, bt.local_exit_root, bt.acc_input_hash, bt.state_root, bt.timestamp, bt.coinbase, bt.raw_txs_data, bt.forced_batch_num, bt.batch_resources, bt.high_reserved_counters, bt.wip
FROM state.batch bt
INNER JOIN state.l2block bl
ON bt.batch_num = bl.batch_num
@@ -329,6 +329,7 @@ func (p *PostgresStorage) GetVirtualBatchByNumber(ctx context.Context, batchNumb
raw_txs_data,
forced_batch_num,
batch_resources,
+ high_reserved_counters,
wip
FROM
state.batch
@@ -386,13 +387,14 @@ func (p *PostgresStorage) IsSequencingTXSynced(ctx context.Context, transactionH
func scanBatch(row pgx.Row) (state.Batch, error) {
batch := state.Batch{}
var (
- gerStr string
- lerStr *string
- aihStr *string
- stateStr *string
- coinbaseStr string
- resourcesData []byte
- wip bool
+ gerStr string
+ lerStr *string
+ aihStr *string
+ stateStr *string
+ coinbaseStr string
+ resourcesData []byte
+ highReservedCounters []byte
+ wip bool
)
err := row.Scan(
&batch.BatchNumber,
@@ -405,6 +407,7 @@ func scanBatch(row pgx.Row) (state.Batch, error) {
&batch.BatchL2Data,
&batch.ForcedBatchNum,
&resourcesData,
+ &highReservedCounters,
&wip,
)
if err != nil {
@@ -427,6 +430,14 @@ func scanBatch(row pgx.Row) (state.Batch, error) {
return batch, err
}
}
+
+ if highReservedCounters != nil {
+ err = json.Unmarshal(highReservedCounters, &batch.HighReservedZKCounters)
+ if err != nil {
+ return batch, err
+ }
+ }
+
batch.WIP = wip
batch.Coinbase = common.HexToAddress(coinbaseStr)
@@ -663,7 +674,7 @@ func (p *PostgresStorage) CloseWIPBatchInStorage(ctx context.Context, receipt st
// GetWIPBatchInStorage returns the wip batch in the state
func (p *PostgresStorage) GetWIPBatchInStorage(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.Batch, error) {
const getWIPBatchByNumberSQL = `
- SELECT batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, batch_resources, wip
+ SELECT batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, batch_resources, high_reserved_counters, wip
FROM state.batch
WHERE batch_num = $1 AND wip = TRUE`
@@ -778,6 +789,7 @@ func (p *PostgresStorage) GetVirtualBatchToProve(ctx context.Context, lastVerfie
b.raw_txs_data,
b.forced_batch_num,
b.batch_resources,
+ b.high_reserved_counters,
b.wip
FROM
state.batch b,
@@ -842,7 +854,7 @@ func (p *PostgresStorage) GetSequences(ctx context.Context, lastVerifiedBatchNum
// GetLastClosedBatch returns the latest closed batch
func (p *PostgresStorage) GetLastClosedBatch(ctx context.Context, dbTx pgx.Tx) (*state.Batch, error) {
const getLastClosedBatchSQL = `
- SELECT bt.batch_num, bt.global_exit_root, bt.local_exit_root, bt.acc_input_hash, bt.state_root, bt.timestamp, bt.coinbase, bt.raw_txs_data, bt.forced_batch_num, bt.batch_resources, bt.wip
+ SELECT bt.batch_num, bt.global_exit_root, bt.local_exit_root, bt.acc_input_hash, bt.state_root, bt.timestamp, bt.coinbase, bt.raw_txs_data, bt.forced_batch_num, bt.batch_resources, bt.high_reserved_counters, bt.wip
FROM state.batch bt
WHERE wip = FALSE
ORDER BY bt.batch_num DESC
@@ -891,14 +903,20 @@ func (p *PostgresStorage) UpdateBatchL2Data(ctx context.Context, batchNumber uin
// UpdateWIPBatch updates the data in a batch
func (p *PostgresStorage) UpdateWIPBatch(ctx context.Context, receipt state.ProcessingReceipt, dbTx pgx.Tx) error {
- const updateL2DataSQL = "UPDATE state.batch SET raw_txs_data = $2, global_exit_root = $3, state_root = $4, local_exit_root = $5, batch_resources = $6 WHERE batch_num = $1"
+ const updateL2DataSQL = "UPDATE state.batch SET raw_txs_data = $2, global_exit_root = $3, state_root = $4, local_exit_root = $5, batch_resources = $6, high_reserved_counters = $7 WHERE batch_num = $1"
e := p.getExecQuerier(dbTx)
batchResourcesJsonBytes, err := json.Marshal(receipt.BatchResources)
if err != nil {
return err
}
- _, err = e.Exec(ctx, updateL2DataSQL, receipt.BatchNumber, receipt.BatchL2Data, receipt.GlobalExitRoot.String(), receipt.StateRoot.String(), receipt.LocalExitRoot.String(), string(batchResourcesJsonBytes))
+
+ highReservedCounters, err := json.Marshal(receipt.HighReservedZKCounters)
+ if err != nil {
+ return err
+ }
+
+ _, err = e.Exec(ctx, updateL2DataSQL, receipt.BatchNumber, receipt.BatchL2Data, receipt.GlobalExitRoot.String(), receipt.StateRoot.String(), receipt.LocalExitRoot.String(), string(batchResourcesJsonBytes), string(highReservedCounters))
return err
}
@@ -1050,7 +1068,7 @@ func (p *PostgresStorage) GetLatestBatchGlobalExitRoot(ctx context.Context, dbTx
// GetNotCheckedBatches returns the batches that are closed but not checked
func (p *PostgresStorage) GetNotCheckedBatches(ctx context.Context, dbTx pgx.Tx) ([]*state.Batch, error) {
const getBatchesNotCheckedSQL = `
- SELECT batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, batch_resources, wip
+ SELECT batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, batch_resources, high_reserved_counters, wip
from state.batch WHERE wip IS FALSE AND checked IS FALSE ORDER BY batch_num ASC`
e := p.getExecQuerier(dbTx)
diff --git a/state/test/forkid_etrog/etrog_test.go b/state/test/forkid_etrog/etrog_test.go
index d8dfd77398..55f7ef48fb 100644
--- a/state/test/forkid_etrog/etrog_test.go
+++ b/state/test/forkid_etrog/etrog_test.go
@@ -125,7 +125,7 @@ func TestStateTransition(t *testing.T) {
SkipVerifyL1InfoRoot_V2: testCase.L1InfoTree.SkipVerifyL1InfoRoot,
}
- processResponse, _ := testState.ProcessBatchV2(ctx, processRequest, true)
+ processResponse, _, _ := testState.ProcessBatchV2(ctx, processRequest, true)
require.Nil(t, processResponse.ExecutorError)
require.Equal(t, testCase.ExpectedNewStateRoot, processResponse.NewStateRoot.String())
}
diff --git a/synchronizer/common/syncinterfaces/mocks/state_full_interface.go b/synchronizer/common/syncinterfaces/mocks/state_full_interface.go
index fa570dbe7f..81fe9a430e 100644
--- a/synchronizer/common/syncinterfaces/mocks/state_full_interface.go
+++ b/synchronizer/common/syncinterfaces/mocks/state_full_interface.go
@@ -2429,7 +2429,7 @@ func (_c *StateFullInterface_ProcessBatch_Call) RunAndReturn(run func(context.Co
}
// ProcessBatchV2 provides a mock function with given fields: ctx, request, updateMerkleTree
-func (_m *StateFullInterface) ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error) {
+func (_m *StateFullInterface) ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, string, error) {
ret := _m.Called(ctx, request, updateMerkleTree)
if len(ret) == 0 {
@@ -2437,8 +2437,9 @@ func (_m *StateFullInterface) ProcessBatchV2(ctx context.Context, request state.
}
var r0 *state.ProcessBatchResponse
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, error)); ok {
+ var r1 string
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, string, error)); ok {
return rf(ctx, request, updateMerkleTree)
}
if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) *state.ProcessBatchResponse); ok {
@@ -2449,13 +2450,19 @@ func (_m *StateFullInterface) ProcessBatchV2(ctx context.Context, request state.
}
}
- if rf, ok := ret.Get(1).(func(context.Context, state.ProcessRequest, bool) error); ok {
+ if rf, ok := ret.Get(1).(func(context.Context, state.ProcessRequest, bool) string); ok {
r1 = rf(ctx, request, updateMerkleTree)
} else {
- r1 = ret.Error(1)
+ r1 = ret.Get(1).(string)
}
- return r0, r1
+ if rf, ok := ret.Get(2).(func(context.Context, state.ProcessRequest, bool) error); ok {
+ r2 = rf(ctx, request, updateMerkleTree)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
}
// StateFullInterface_ProcessBatchV2_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessBatchV2'
@@ -2478,12 +2485,12 @@ func (_c *StateFullInterface_ProcessBatchV2_Call) Run(run func(ctx context.Conte
return _c
}
-func (_c *StateFullInterface_ProcessBatchV2_Call) Return(_a0 *state.ProcessBatchResponse, _a1 error) *StateFullInterface_ProcessBatchV2_Call {
- _c.Call.Return(_a0, _a1)
+func (_c *StateFullInterface_ProcessBatchV2_Call) Return(_a0 *state.ProcessBatchResponse, _a1 string, _a2 error) *StateFullInterface_ProcessBatchV2_Call {
+ _c.Call.Return(_a0, _a1, _a2)
return _c
}
-func (_c *StateFullInterface_ProcessBatchV2_Call) RunAndReturn(run func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, error)) *StateFullInterface_ProcessBatchV2_Call {
+func (_c *StateFullInterface_ProcessBatchV2_Call) RunAndReturn(run func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, string, error)) *StateFullInterface_ProcessBatchV2_Call {
_c.Call.Return(run)
return _c
}
diff --git a/synchronizer/common/syncinterfaces/state.go b/synchronizer/common/syncinterfaces/state.go
index cafae4104e..0b3e248cbf 100644
--- a/synchronizer/common/syncinterfaces/state.go
+++ b/synchronizer/common/syncinterfaces/state.go
@@ -44,7 +44,7 @@ type StateFullInterface interface {
OpenBatch(ctx context.Context, processingContext state.ProcessingContext, dbTx pgx.Tx) error
CloseBatch(ctx context.Context, receipt state.ProcessingReceipt, dbTx pgx.Tx) error
ProcessBatch(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error)
- ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error)
+ ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, string, error)
StoreTransaction(ctx context.Context, batchNumber uint64, processedTx *state.ProcessTransactionResponse, coinbase common.Address, timestamp uint64, egpLog *state.EffectiveGasPriceLog, globalExitRoot, blockInfoRoot common.Hash, dbTx pgx.Tx) (*state.L2Header, error)
GetStateRootByBatchNumber(ctx context.Context, batchNum uint64, dbTx pgx.Tx) (common.Hash, error)
ExecuteBatch(ctx context.Context, batch state.Batch, updateMerkleTree bool, dbTx pgx.Tx) (*executor.ProcessBatchResponse, error)
diff --git a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go
index ea8bbd7fa8..75aebc942c 100644
--- a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go
+++ b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go
@@ -36,7 +36,7 @@ type StateInterface interface {
UpdateWIPBatch(ctx context.Context, receipt state.ProcessingReceipt, dbTx pgx.Tx) error
ResetTrustedState(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error
OpenBatch(ctx context.Context, processingContext state.ProcessingContext, dbTx pgx.Tx) error
- ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error)
+ ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, string, error)
StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *state.ProcessBlockResponse, txsEGPLog []*state.EffectiveGasPriceLog, dbTx pgx.Tx) error
GetL1InfoTreeDataFromBatchL2Data(ctx context.Context, batchL2Data []byte, dbTx pgx.Tx) (map[uint32]state.L1DataV2, common.Hash, common.Hash, error)
GetLastVirtualBatchNum(ctx context.Context, dbTx pgx.Tx) (uint64, error)
@@ -375,7 +375,7 @@ func (b *SyncTrustedBatchExecutorForEtrog) processAndStoreTxs(ctx context.Contex
if request.OldStateRoot == state.ZeroHash {
log.Warnf("%s Processing batch with oldStateRoot == zero....", debugPrefix)
}
- processBatchResp, err := b.state.ProcessBatchV2(ctx, request, true)
+ processBatchResp, _, err := b.state.ProcessBatchV2(ctx, request, true)
if err != nil {
log.Errorf("%s error processing sequencer batch for batch: %v error:%v ", debugPrefix, request.BatchNumber, err)
return nil, err
diff --git a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync_test.go b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync_test.go
index 1fc1b46afa..ce09f0ba14 100644
--- a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync_test.go
+++ b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync_test.go
@@ -76,7 +76,7 @@ func TestIncrementalProcessUpdateBatchL2DataOnCache(t *testing.T) {
processBatchResp := &state.ProcessBatchResponse{
NewStateRoot: expectedStateRoot,
}
- stateMock.EXPECT().ProcessBatchV2(ctx, mock.Anything, true).Return(processBatchResp, nil).Once()
+ stateMock.EXPECT().ProcessBatchV2(ctx, mock.Anything, true).Return(processBatchResp, "", nil).Once()
syncMock.EXPECT().PendingFlushID(mock.Anything, mock.Anything).Once()
syncMock.EXPECT().CheckFlushID(mock.Anything).Return(nil).Maybe()
@@ -140,7 +140,7 @@ func TestIncrementalProcessUpdateBatchL2DataAndGER(t *testing.T) {
processBatchResp := &state.ProcessBatchResponse{
NewStateRoot: expectedStateRoot,
}
- stateMock.EXPECT().ProcessBatchV2(ctx, mock.Anything, true).Return(processBatchResp, nil).Once()
+ stateMock.EXPECT().ProcessBatchV2(ctx, mock.Anything, true).Return(processBatchResp, "", nil).Once()
syncMock.EXPECT().PendingFlushID(mock.Anything, mock.Anything).Once()
syncMock.EXPECT().CheckFlushID(mock.Anything).Return(nil).Maybe()
@@ -274,7 +274,7 @@ func TestNothingProcessDoesntMatchBatchReprocess(t *testing.T) {
processBatchResp := &state.ProcessBatchResponse{
NewStateRoot: data.TrustedBatch.StateRoot,
}
- testData.stateMock.EXPECT().ProcessBatchV2(testData.ctx, mock.Anything, true).Return(processBatchResp, nil).Once()
+ testData.stateMock.EXPECT().ProcessBatchV2(testData.ctx, mock.Anything, true).Return(processBatchResp, "", nil).Once()
testData.stateMock.EXPECT().GetBatchByNumber(testData.ctx, data.BatchNumber, mock.Anything).Return(&state.Batch{}, nil).Once()
_, err := testData.sut.NothingProcess(testData.ctx, &data, nil)
require.NoError(t, err)
diff --git a/synchronizer/l2_sync/l2_sync_etrog/mocks/state_interface.go b/synchronizer/l2_sync/l2_sync_etrog/mocks/state_interface.go
index 5101bb4b6a..43e84ffba5 100644
--- a/synchronizer/l2_sync/l2_sync_etrog/mocks/state_interface.go
+++ b/synchronizer/l2_sync/l2_sync_etrog/mocks/state_interface.go
@@ -423,7 +423,7 @@ func (_c *StateInterface_OpenBatch_Call) RunAndReturn(run func(context.Context,
}
// ProcessBatchV2 provides a mock function with given fields: ctx, request, updateMerkleTree
-func (_m *StateInterface) ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error) {
+func (_m *StateInterface) ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, string, error) {
ret := _m.Called(ctx, request, updateMerkleTree)
if len(ret) == 0 {
@@ -431,8 +431,9 @@ func (_m *StateInterface) ProcessBatchV2(ctx context.Context, request state.Proc
}
var r0 *state.ProcessBatchResponse
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, error)); ok {
+ var r1 string
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, string, error)); ok {
return rf(ctx, request, updateMerkleTree)
}
if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) *state.ProcessBatchResponse); ok {
@@ -443,13 +444,19 @@ func (_m *StateInterface) ProcessBatchV2(ctx context.Context, request state.Proc
}
}
- if rf, ok := ret.Get(1).(func(context.Context, state.ProcessRequest, bool) error); ok {
+ if rf, ok := ret.Get(1).(func(context.Context, state.ProcessRequest, bool) string); ok {
r1 = rf(ctx, request, updateMerkleTree)
} else {
- r1 = ret.Error(1)
+ r1 = ret.Get(1).(string)
}
- return r0, r1
+ if rf, ok := ret.Get(2).(func(context.Context, state.ProcessRequest, bool) error); ok {
+ r2 = rf(ctx, request, updateMerkleTree)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
}
// StateInterface_ProcessBatchV2_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessBatchV2'
@@ -472,12 +479,12 @@ func (_c *StateInterface_ProcessBatchV2_Call) Run(run func(ctx context.Context,
return _c
}
-func (_c *StateInterface_ProcessBatchV2_Call) Return(_a0 *state.ProcessBatchResponse, _a1 error) *StateInterface_ProcessBatchV2_Call {
- _c.Call.Return(_a0, _a1)
+func (_c *StateInterface_ProcessBatchV2_Call) Return(_a0 *state.ProcessBatchResponse, _a1 string, _a2 error) *StateInterface_ProcessBatchV2_Call {
+ _c.Call.Return(_a0, _a1, _a2)
return _c
}
-func (_c *StateInterface_ProcessBatchV2_Call) RunAndReturn(run func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, error)) *StateInterface_ProcessBatchV2_Call {
+func (_c *StateInterface_ProcessBatchV2_Call) RunAndReturn(run func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, string, error)) *StateInterface_ProcessBatchV2_Call {
_c.Call.Return(run)
return _c
}
diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go
index d136d6fef0..73bebe5a12 100644
--- a/synchronizer/synchronizer_test.go
+++ b/synchronizer/synchronizer_test.go
@@ -890,7 +890,7 @@ func expectedCallsForsyncTrustedState(t *testing.T, m *mocks, sync *ClientSynchr
if etrogMode {
m.State.EXPECT().GetL1InfoTreeDataFromBatchL2Data(mock.Anything, mock.Anything, mock.Anything).Return(map[uint32]state.L1DataV2{}, common.Hash{}, common.Hash{}, nil).Times(1)
m.State.EXPECT().ProcessBatchV2(mock.Anything, mock.Anything, mock.Anything).
- Return(&processedBatch, nil).Times(1)
+ Return(&processedBatch, "", nil).Times(1)
m.State.EXPECT().StoreL2Block(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
Return(nil).Times(1)
m.State.EXPECT().UpdateWIPBatch(mock.Anything, mock.Anything, mock.Anything).
diff --git a/test/config/debug.node.config.toml b/test/config/debug.node.config.toml
index 5072e9f5a1..ccd423daee 100644
--- a/test/config/debug.node.config.toml
+++ b/test/config/debug.node.config.toml
@@ -100,9 +100,10 @@ StateConsistencyCheckInterval = "5s"
BatchMaxDeltaTimestamp = "60s"
L2BlockMaxDeltaTimestamp = "3s"
ResourceExhaustedMarginPct = 10
+ StateRootSyncInterval = "120s"
HaltOnBatchNumber = 0
SequentialBatchSanityCheck = false
- SequentialProcessL2Block = true
+ SequentialProcessL2Block = false
[Sequencer.Finalizer.Metrics]
Interval = "60m"
EnableLog = true
diff --git a/test/config/test.node.config.toml b/test/config/test.node.config.toml
index 6a7a7efd0b..506dde2466 100644
--- a/test/config/test.node.config.toml
+++ b/test/config/test.node.config.toml
@@ -37,8 +37,8 @@ MaxTxDataBytesSize=100000
DefaultMinGasPriceAllowed = 1000000000
MinAllowedGasPriceInterval = "5m"
PollMinAllowedGasPriceInterval = "15s"
-AccountQueue = 64
-GlobalQueue = 1024
+AccountQueue = 0
+GlobalQueue = 0
[Pool.EffectiveGasPrice]
Enabled = false
L1GasPriceFactor = 0.25
@@ -115,9 +115,10 @@ StateConsistencyCheckInterval = "5s"
BatchMaxDeltaTimestamp = "20s"
L2BlockMaxDeltaTimestamp = "4s"
ResourceExhaustedMarginPct = 10
+ StateRootSyncInterval = "60s"
HaltOnBatchNumber = 0
SequentialBatchSanityCheck = false
- SequentialProcessL2Block = true
+ SequentialProcessL2Block = false
[Sequencer.Finalizer.Metrics]
Interval = "60m"
EnableLog = true
diff --git a/test/docker-compose.yml b/test/docker-compose.yml
index 08dbc93431..feead65a40 100644
--- a/test/docker-compose.yml
+++ b/test/docker-compose.yml
@@ -1,4 +1,3 @@
-version: "3.5"
networks:
default:
name: zkevm
@@ -26,7 +25,7 @@ services:
volumes:
- ./config/telegraf.conf:/etc/telegraf/telegraf.conf:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
- user: telegraf:${DOCKERGID}
+ user: telegraf:${DOCKERGID:-}
environment:
- POSTGRES_HOST=grafana-db
- POSTGRES_USER=user
@@ -56,8 +55,8 @@ services:
environment:
- ZKEVM_NODE_STATE_DB_HOST=zkevm-state-db
- ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db
- - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI}
- - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI}
+ - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-}
+ - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-}
volumes:
- ./config/test.node.config.toml:/app/config.toml
- ./config/test.genesis.config.json:/app/genesis.json
@@ -77,8 +76,8 @@ services:
environment:
- ZKEVM_NODE_STATE_DB_HOST=zkevm-state-db
- ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db
- - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI}
- - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI}
+ - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-}
+ - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-}
- ZKEVM_NODE_ETHERMAN_URL=http://zkevm-v1tov2-l1-network:8545
volumes:
- ./config/test.node.config.toml:/app/config.toml
@@ -96,8 +95,8 @@ services:
- ZKEVM_NODE_STATE_DB_HOST=zkevm-state-db
- ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db
- ZKEVM_NODE_SEQUENCER_SENDER_ADDRESS=0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266
- - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI}
- - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI}
+ - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-}
+ - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-}
volumes:
- ./sequencer.keystore:/pk/sequencer.keystore
- ./config/test.node.config.toml:/app/config.toml
@@ -114,8 +113,8 @@ services:
- ZKEVM_NODE_STATE_DB_HOST=zkevm-state-db
- ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db
- ZKEVM_NODE_SEQUENCER_SENDER_ADDRESS=0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266
- - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI}
- - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI}
+ - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-}
+ - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-}
- ZKEVM_NODE_ETHERMAN_URL=http://zkevm-v1tov2-l1-network:8545
volumes:
- ./sequencer.keystore:/pk/sequencer.keystore
@@ -136,8 +135,8 @@ services:
environment:
- ZKEVM_NODE_STATE_DB_HOST=zkevm-state-db
- ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db
- - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI}
- - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI}
+ - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-}
+ - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-}
volumes:
- ./config/test.node.config.toml:/app/config.toml
- ./config/test.genesis.config.json:/app/genesis.json
@@ -156,8 +155,8 @@ services:
environment:
- ZKEVM_NODE_STATE_DB_HOST=zkevm-state-db
- ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db
- - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI}
- - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI}
+ - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-}
+ - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-}
- ZKEVM_NODE_ETHERMAN_URL=http://zkevm-v1tov2-l1-network:8545
volumes:
- ./config/test.node.config.toml:/app/config.toml
@@ -210,8 +209,8 @@ services:
- 9095:9091 # needed if metrics enabled
environment:
- ZKEVM_NODE_STATE_DB_HOST=zkevm-state-db
- - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI}
- - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI}
+ - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-}
+ - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-}
volumes:
- ./config/test.node.config.toml:/app/config.toml
- ./config/test.genesis.config.json:/app/genesis.json
@@ -227,8 +226,8 @@ services:
- 9095:9091 # needed if metrics enabled
environment:
- ZKEVM_NODE_STATE_DB_HOST=zkevm-state-db
- - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI}
- - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI}
+ - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-}
+ - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-}
- ZKEVM_NODE_ETHERMAN_URL=http://zkevm-v1tov2-l1-network:8545
volumes:
- ./config/test.node.config.toml:/app/config.toml
@@ -430,8 +429,8 @@ services:
- ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db
- ZKEVM_NODE_RPC_PORT=8124
- ZKEVM_NODE_RPC_WEBSOCKETS_PORT=8134
- - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI}
- - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI}
+ - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-}
+ - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-}
volumes:
- ./config/test.node.config.toml:/app/config.toml
- ./config/test.genesis.config.json:/app/genesis.json